Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1479 /*
1480 Define this macro to 0/1 to disable/enable support for recording functionality,
1481 available through VmaAllocatorCreateInfo::pRecordSettings.
1482 */
1483 #ifndef VMA_RECORDING_ENABLED
1484  #ifdef _WIN32
1485  #define VMA_RECORDING_ENABLED 1
1486  #else
1487  #define VMA_RECORDING_ENABLED 0
1488  #endif
1489 #endif
1490 
1491 #ifndef NOMINMAX
1492  #define NOMINMAX // For windows.h
1493 #endif
1494 
1495 #include <vulkan/vulkan.h>
1496 
1497 #if VMA_RECORDING_ENABLED
1498  #include <windows.h>
1499 #endif
1500 
1501 #if !defined(VMA_DEDICATED_ALLOCATION)
1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1503  #define VMA_DEDICATED_ALLOCATION 1
1504  #else
1505  #define VMA_DEDICATED_ALLOCATION 0
1506  #endif
1507 #endif
1508 
1518 VK_DEFINE_HANDLE(VmaAllocator)
1519 
1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1522  VmaAllocator allocator,
1523  uint32_t memoryType,
1524  VkDeviceMemory memory,
1525  VkDeviceSize size);
1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1528  VmaAllocator allocator,
1529  uint32_t memoryType,
1530  VkDeviceMemory memory,
1531  VkDeviceSize size);
1532 
1546 
1576 
1579 typedef VkFlags VmaAllocatorCreateFlags;
1580 
1585 typedef struct VmaVulkanFunctions {
1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1588  PFN_vkAllocateMemory vkAllocateMemory;
1589  PFN_vkFreeMemory vkFreeMemory;
1590  PFN_vkMapMemory vkMapMemory;
1591  PFN_vkUnmapMemory vkUnmapMemory;
1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1594  PFN_vkBindBufferMemory vkBindBufferMemory;
1595  PFN_vkBindImageMemory vkBindImageMemory;
1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1598  PFN_vkCreateBuffer vkCreateBuffer;
1599  PFN_vkDestroyBuffer vkDestroyBuffer;
1600  PFN_vkCreateImage vkCreateImage;
1601  PFN_vkDestroyImage vkDestroyImage;
1602 #if VMA_DEDICATED_ALLOCATION
1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1605 #endif
1607 
1609 typedef enum VmaRecordFlagBits {
1616 
1619 typedef VkFlags VmaRecordFlags;
1620 
1622 typedef struct VmaRecordSettings
1623 {
1633  const char* pFilePath;
1635 
1638 {
1642 
1643  VkPhysicalDevice physicalDevice;
1645 
1646  VkDevice device;
1648 
1651 
1652  const VkAllocationCallbacks* pAllocationCallbacks;
1654 
1693  const VkDeviceSize* pHeapSizeLimit;
1714 
1716 VkResult vmaCreateAllocator(
1717  const VmaAllocatorCreateInfo* pCreateInfo,
1718  VmaAllocator* pAllocator);
1719 
1721 void vmaDestroyAllocator(
1722  VmaAllocator allocator);
1723 
1729  VmaAllocator allocator,
1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1731 
1737  VmaAllocator allocator,
1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1739 
1747  VmaAllocator allocator,
1748  uint32_t memoryTypeIndex,
1749  VkMemoryPropertyFlags* pFlags);
1750 
1760  VmaAllocator allocator,
1761  uint32_t frameIndex);
1762 
1765 typedef struct VmaStatInfo
1766 {
1768  uint32_t blockCount;
1774  VkDeviceSize usedBytes;
1776  VkDeviceSize unusedBytes;
1779 } VmaStatInfo;
1780 
1782 typedef struct VmaStats
1783 {
1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1787 } VmaStats;
1788 
1790 void vmaCalculateStats(
1791  VmaAllocator allocator,
1792  VmaStats* pStats);
1793 
1794 #define VMA_STATS_STRING_ENABLED 1
1795 
1796 #if VMA_STATS_STRING_ENABLED
1797 
1799 
1801 void vmaBuildStatsString(
1802  VmaAllocator allocator,
1803  char** ppStatsString,
1804  VkBool32 detailedMap);
1805 
1806 void vmaFreeStatsString(
1807  VmaAllocator allocator,
1808  char* pStatsString);
1809 
1810 #endif // #if VMA_STATS_STRING_ENABLED
1811 
1820 VK_DEFINE_HANDLE(VmaPool)
1821 
1822 typedef enum VmaMemoryUsage
1823 {
1872 } VmaMemoryUsage;
1873 
1888 
1943 
1956 
1966 
1973 
1977 
1979 {
1992  VkMemoryPropertyFlags requiredFlags;
1997  VkMemoryPropertyFlags preferredFlags;
2005  uint32_t memoryTypeBits;
2018  void* pUserData;
2020 
2037 VkResult vmaFindMemoryTypeIndex(
2038  VmaAllocator allocator,
2039  uint32_t memoryTypeBits,
2040  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2041  uint32_t* pMemoryTypeIndex);
2042 
2056  VmaAllocator allocator,
2057  const VkBufferCreateInfo* pBufferCreateInfo,
2058  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2059  uint32_t* pMemoryTypeIndex);
2060 
2074  VmaAllocator allocator,
2075  const VkImageCreateInfo* pImageCreateInfo,
2076  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2077  uint32_t* pMemoryTypeIndex);
2078 
2099 
2116 
2127 
2133 
2136 typedef VkFlags VmaPoolCreateFlags;
2137 
2140 typedef struct VmaPoolCreateInfo {
2155  VkDeviceSize blockSize;
2184 
2187 typedef struct VmaPoolStats {
2190  VkDeviceSize size;
2193  VkDeviceSize unusedSize;
2206  VkDeviceSize unusedRangeSizeMax;
2209  size_t blockCount;
2210 } VmaPoolStats;
2211 
2218 VkResult vmaCreatePool(
2219  VmaAllocator allocator,
2220  const VmaPoolCreateInfo* pCreateInfo,
2221  VmaPool* pPool);
2222 
2225 void vmaDestroyPool(
2226  VmaAllocator allocator,
2227  VmaPool pool);
2228 
2235 void vmaGetPoolStats(
2236  VmaAllocator allocator,
2237  VmaPool pool,
2238  VmaPoolStats* pPoolStats);
2239 
2247  VmaAllocator allocator,
2248  VmaPool pool,
2249  size_t* pLostAllocationCount);
2250 
2265 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2266 
2291 VK_DEFINE_HANDLE(VmaAllocation)
2292 
2293 
2295 typedef struct VmaAllocationInfo {
2300  uint32_t memoryType;
2309  VkDeviceMemory deviceMemory;
2314  VkDeviceSize offset;
2319  VkDeviceSize size;
2333  void* pUserData;
2335 
2346 VkResult vmaAllocateMemory(
2347  VmaAllocator allocator,
2348  const VkMemoryRequirements* pVkMemoryRequirements,
2349  const VmaAllocationCreateInfo* pCreateInfo,
2350  VmaAllocation* pAllocation,
2351  VmaAllocationInfo* pAllocationInfo);
2352 
2360  VmaAllocator allocator,
2361  VkBuffer buffer,
2362  const VmaAllocationCreateInfo* pCreateInfo,
2363  VmaAllocation* pAllocation,
2364  VmaAllocationInfo* pAllocationInfo);
2365 
2367 VkResult vmaAllocateMemoryForImage(
2368  VmaAllocator allocator,
2369  VkImage image,
2370  const VmaAllocationCreateInfo* pCreateInfo,
2371  VmaAllocation* pAllocation,
2372  VmaAllocationInfo* pAllocationInfo);
2373 
2375 void vmaFreeMemory(
2376  VmaAllocator allocator,
2377  VmaAllocation allocation);
2378 
2399 VkResult vmaResizeAllocation(
2400  VmaAllocator allocator,
2401  VmaAllocation allocation,
2402  VkDeviceSize newSize);
2403 
2421  VmaAllocator allocator,
2422  VmaAllocation allocation,
2423  VmaAllocationInfo* pAllocationInfo);
2424 
2439 VkBool32 vmaTouchAllocation(
2440  VmaAllocator allocator,
2441  VmaAllocation allocation);
2442 
2457  VmaAllocator allocator,
2458  VmaAllocation allocation,
2459  void* pUserData);
2460 
2472  VmaAllocator allocator,
2473  VmaAllocation* pAllocation);
2474 
2509 VkResult vmaMapMemory(
2510  VmaAllocator allocator,
2511  VmaAllocation allocation,
2512  void** ppData);
2513 
2518 void vmaUnmapMemory(
2519  VmaAllocator allocator,
2520  VmaAllocation allocation);
2521 
2534 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2535 
2548 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2549 
2566 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2567 
2569 typedef struct VmaDefragmentationInfo {
2574  VkDeviceSize maxBytesToMove;
2581 
2583 typedef struct VmaDefragmentationStats {
2585  VkDeviceSize bytesMoved;
2587  VkDeviceSize bytesFreed;
2593 
2632 VkResult vmaDefragment(
2633  VmaAllocator allocator,
2634  VmaAllocation* pAllocations,
2635  size_t allocationCount,
2636  VkBool32* pAllocationsChanged,
2637  const VmaDefragmentationInfo *pDefragmentationInfo,
2638  VmaDefragmentationStats* pDefragmentationStats);
2639 
2652 VkResult vmaBindBufferMemory(
2653  VmaAllocator allocator,
2654  VmaAllocation allocation,
2655  VkBuffer buffer);
2656 
2669 VkResult vmaBindImageMemory(
2670  VmaAllocator allocator,
2671  VmaAllocation allocation,
2672  VkImage image);
2673 
2700 VkResult vmaCreateBuffer(
2701  VmaAllocator allocator,
2702  const VkBufferCreateInfo* pBufferCreateInfo,
2703  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2704  VkBuffer* pBuffer,
2705  VmaAllocation* pAllocation,
2706  VmaAllocationInfo* pAllocationInfo);
2707 
2719 void vmaDestroyBuffer(
2720  VmaAllocator allocator,
2721  VkBuffer buffer,
2722  VmaAllocation allocation);
2723 
2725 VkResult vmaCreateImage(
2726  VmaAllocator allocator,
2727  const VkImageCreateInfo* pImageCreateInfo,
2728  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2729  VkImage* pImage,
2730  VmaAllocation* pAllocation,
2731  VmaAllocationInfo* pAllocationInfo);
2732 
2744 void vmaDestroyImage(
2745  VmaAllocator allocator,
2746  VkImage image,
2747  VmaAllocation allocation);
2748 
2749 #ifdef __cplusplus
2750 }
2751 #endif
2752 
2753 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2754 
2755 // For Visual Studio IntelliSense.
2756 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2757 #define VMA_IMPLEMENTATION
2758 #endif
2759 
2760 #ifdef VMA_IMPLEMENTATION
2761 #undef VMA_IMPLEMENTATION
2762 
2763 #include <cstdint>
2764 #include <cstdlib>
2765 #include <cstring>
2766 
2767 /*******************************************************************************
2768 CONFIGURATION SECTION
2769 
2770 Define some of these macros before each #include of this header or change them
2771 here if you need other then default behavior depending on your environment.
2772 */
2773 
2774 /*
2775 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2776 internally, like:
2777 
2778  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2779 
2780 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2781 VmaAllocatorCreateInfo::pVulkanFunctions.
2782 */
2783 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2784 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2785 #endif
2786 
2787 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2788 //#define VMA_USE_STL_CONTAINERS 1
2789 
2790 /* Set this macro to 1 to make the library including and using STL containers:
2791 std::pair, std::vector, std::list, std::unordered_map.
2792 
2793 Set it to 0 or undefined to make the library using its own implementation of
2794 the containers.
2795 */
2796 #if VMA_USE_STL_CONTAINERS
2797  #define VMA_USE_STL_VECTOR 1
2798  #define VMA_USE_STL_UNORDERED_MAP 1
2799  #define VMA_USE_STL_LIST 1
2800 #endif
2801 
2802 #if VMA_USE_STL_VECTOR
2803  #include <vector>
2804 #endif
2805 
2806 #if VMA_USE_STL_UNORDERED_MAP
2807  #include <unordered_map>
2808 #endif
2809 
2810 #if VMA_USE_STL_LIST
2811  #include <list>
2812 #endif
2813 
2814 /*
2815 Following headers are used in this CONFIGURATION section only, so feel free to
2816 remove them if not needed.
2817 */
2818 #include <cassert> // for assert
2819 #include <algorithm> // for min, max
2820 #include <mutex> // for std::mutex
2821 #include <atomic> // for std::atomic
2822 
2823 #ifndef VMA_NULL
2824  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2825  #define VMA_NULL nullptr
2826 #endif
2827 
2828 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2829 #include <cstdlib>
2830 void *aligned_alloc(size_t alignment, size_t size)
2831 {
2832  // alignment must be >= sizeof(void*)
2833  if(alignment < sizeof(void*))
2834  {
2835  alignment = sizeof(void*);
2836  }
2837 
2838  return memalign(alignment, size);
2839 }
2840 #elif defined(__APPLE__) || defined(__ANDROID__)
2841 #include <cstdlib>
2842 void *aligned_alloc(size_t alignment, size_t size)
2843 {
2844  // alignment must be >= sizeof(void*)
2845  if(alignment < sizeof(void*))
2846  {
2847  alignment = sizeof(void*);
2848  }
2849 
2850  void *pointer;
2851  if(posix_memalign(&pointer, alignment, size) == 0)
2852  return pointer;
2853  return VMA_NULL;
2854 }
2855 #endif
2856 
2857 // If your compiler is not compatible with C++11 and definition of
2858 // aligned_alloc() function is missing, uncommeting following line may help:
2859 
2860 //#include <malloc.h>
2861 
2862 // Normal assert to check for programmer's errors, especially in Debug configuration.
2863 #ifndef VMA_ASSERT
2864  #ifdef _DEBUG
2865  #define VMA_ASSERT(expr) assert(expr)
2866  #else
2867  #define VMA_ASSERT(expr)
2868  #endif
2869 #endif
2870 
2871 // Assert that will be called very often, like inside data structures e.g. operator[].
2872 // Making it non-empty can make program slow.
2873 #ifndef VMA_HEAVY_ASSERT
2874  #ifdef _DEBUG
2875  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2876  #else
2877  #define VMA_HEAVY_ASSERT(expr)
2878  #endif
2879 #endif
2880 
2881 #ifndef VMA_ALIGN_OF
2882  #define VMA_ALIGN_OF(type) (__alignof(type))
2883 #endif
2884 
2885 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2886  #if defined(_WIN32)
2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2888  #else
2889  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2890  #endif
2891 #endif
2892 
2893 #ifndef VMA_SYSTEM_FREE
2894  #if defined(_WIN32)
2895  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2896  #else
2897  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2898  #endif
2899 #endif
2900 
2901 #ifndef VMA_MIN
2902  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2903 #endif
2904 
2905 #ifndef VMA_MAX
2906  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2907 #endif
2908 
2909 #ifndef VMA_SWAP
2910  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2911 #endif
2912 
2913 #ifndef VMA_SORT
2914  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2915 #endif
2916 
2917 #ifndef VMA_DEBUG_LOG
2918  #define VMA_DEBUG_LOG(format, ...)
2919  /*
2920  #define VMA_DEBUG_LOG(format, ...) do { \
2921  printf(format, __VA_ARGS__); \
2922  printf("\n"); \
2923  } while(false)
2924  */
2925 #endif
2926 
2927 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2928 #if VMA_STATS_STRING_ENABLED
2929  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2930  {
2931  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2932  }
2933  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2934  {
2935  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2936  }
2937  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2938  {
2939  snprintf(outStr, strLen, "%p", ptr);
2940  }
2941 #endif
2942 
2943 #ifndef VMA_MUTEX
2944  class VmaMutex
2945  {
2946  public:
2947  VmaMutex() { }
2948  ~VmaMutex() { }
2949  void Lock() { m_Mutex.lock(); }
2950  void Unlock() { m_Mutex.unlock(); }
2951  private:
2952  std::mutex m_Mutex;
2953  };
2954  #define VMA_MUTEX VmaMutex
2955 #endif
2956 
2957 /*
2958 If providing your own implementation, you need to implement a subset of std::atomic:
2959 
2960 - Constructor(uint32_t desired)
2961 - uint32_t load() const
2962 - void store(uint32_t desired)
2963 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2964 */
2965 #ifndef VMA_ATOMIC_UINT32
2966  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2967 #endif
2968 
2969 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2970 
2974  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2975 #endif
2976 
2977 #ifndef VMA_DEBUG_ALIGNMENT
2978 
2982  #define VMA_DEBUG_ALIGNMENT (1)
2983 #endif
2984 
2985 #ifndef VMA_DEBUG_MARGIN
2986 
2990  #define VMA_DEBUG_MARGIN (0)
2991 #endif
2992 
2993 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2994 
2998  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2999 #endif
3000 
3001 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3002 
3007  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3008 #endif
3009 
3010 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3011 
3015  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3016 #endif
3017 
3018 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3019 
3023  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3024 #endif
3025 
3026 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3027  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3029 #endif
3030 
3031 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3032  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3034 #endif
3035 
3036 #ifndef VMA_CLASS_NO_COPY
3037  #define VMA_CLASS_NO_COPY(className) \
3038  private: \
3039  className(const className&) = delete; \
3040  className& operator=(const className&) = delete;
3041 #endif
3042 
3043 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3044 
3045 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3046 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3047 
3048 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3049 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3050 
3051 /*******************************************************************************
3052 END OF CONFIGURATION
3053 */
3054 
3055 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3056  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3057 
3058 // Returns number of bits set to 1 in (v).
3059 static inline uint32_t VmaCountBitsSet(uint32_t v)
3060 {
3061  uint32_t c = v - ((v >> 1) & 0x55555555);
3062  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3063  c = ((c >> 4) + c) & 0x0F0F0F0F;
3064  c = ((c >> 8) + c) & 0x00FF00FF;
3065  c = ((c >> 16) + c) & 0x0000FFFF;
3066  return c;
3067 }
3068 
3069 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3070 // Use types like uint32_t, uint64_t as T.
3071 template <typename T>
3072 static inline T VmaAlignUp(T val, T align)
3073 {
3074  return (val + align - 1) / align * align;
3075 }
3076 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3077 // Use types like uint32_t, uint64_t as T.
3078 template <typename T>
3079 static inline T VmaAlignDown(T val, T align)
3080 {
3081  return val / align * align;
3082 }
3083 
3084 // Division with mathematical rounding to nearest number.
3085 template <typename T>
3086 static inline T VmaRoundDiv(T x, T y)
3087 {
3088  return (x + (y / (T)2)) / y;
3089 }
3090 
3091 /*
3092 Returns true if given number is a power of two.
3093 T must be unsigned integer number or signed integer but always nonnegative.
3094 For 0 returns true.
3095 */
3096 template <typename T>
3097 inline bool VmaIsPow2(T x)
3098 {
3099  return (x & (x-1)) == 0;
3100 }
3101 
3102 // Returns smallest power of 2 greater or equal to v.
3103 static inline uint32_t VmaNextPow2(uint32_t v)
3104 {
3105  v--;
3106  v |= v >> 1;
3107  v |= v >> 2;
3108  v |= v >> 4;
3109  v |= v >> 8;
3110  v |= v >> 16;
3111  v++;
3112  return v;
3113 }
3114 static inline uint64_t VmaNextPow2(uint64_t v)
3115 {
3116  v--;
3117  v |= v >> 1;
3118  v |= v >> 2;
3119  v |= v >> 4;
3120  v |= v >> 8;
3121  v |= v >> 16;
3122  v |= v >> 32;
3123  v++;
3124  return v;
3125 }
3126 
3127 // Returns largest power of 2 less or equal to v.
3128 static inline uint32_t VmaPrevPow2(uint32_t v)
3129 {
3130  v |= v >> 1;
3131  v |= v >> 2;
3132  v |= v >> 4;
3133  v |= v >> 8;
3134  v |= v >> 16;
3135  v = v ^ (v >> 1);
3136  return v;
3137 }
3138 static inline uint64_t VmaPrevPow2(uint64_t v)
3139 {
3140  v |= v >> 1;
3141  v |= v >> 2;
3142  v |= v >> 4;
3143  v |= v >> 8;
3144  v |= v >> 16;
3145  v |= v >> 32;
3146  v = v ^ (v >> 1);
3147  return v;
3148 }
3149 
3150 static inline bool VmaStrIsEmpty(const char* pStr)
3151 {
3152  return pStr == VMA_NULL || *pStr == '\0';
3153 }
3154 
3155 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3156 {
3157  switch(algorithm)
3158  {
3160  return "Linear";
3162  return "Buddy";
3163  case 0:
3164  return "Default";
3165  default:
3166  VMA_ASSERT(0);
3167  return "";
3168  }
3169 }
3170 
3171 #ifndef VMA_SORT
3172 
3173 template<typename Iterator, typename Compare>
3174 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3175 {
3176  Iterator centerValue = end; --centerValue;
3177  Iterator insertIndex = beg;
3178  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3179  {
3180  if(cmp(*memTypeIndex, *centerValue))
3181  {
3182  if(insertIndex != memTypeIndex)
3183  {
3184  VMA_SWAP(*memTypeIndex, *insertIndex);
3185  }
3186  ++insertIndex;
3187  }
3188  }
3189  if(insertIndex != centerValue)
3190  {
3191  VMA_SWAP(*insertIndex, *centerValue);
3192  }
3193  return insertIndex;
3194 }
3195 
3196 template<typename Iterator, typename Compare>
3197 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3198 {
3199  if(beg < end)
3200  {
3201  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3202  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3203  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3204  }
3205 }
3206 
3207 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3208 
3209 #endif // #ifndef VMA_SORT
3210 
3211 /*
3212 Returns true if two memory blocks occupy overlapping pages.
3213 ResourceA must be in less memory offset than ResourceB.
3214 
3215 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3216 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3217 */
3218 static inline bool VmaBlocksOnSamePage(
3219  VkDeviceSize resourceAOffset,
3220  VkDeviceSize resourceASize,
3221  VkDeviceSize resourceBOffset,
3222  VkDeviceSize pageSize)
3223 {
3224  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3225  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3226  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3227  VkDeviceSize resourceBStart = resourceBOffset;
3228  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3229  return resourceAEndPage == resourceBStartPage;
3230 }
3231 
3232 enum VmaSuballocationType
3233 {
3234  VMA_SUBALLOCATION_TYPE_FREE = 0,
3235  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3236  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3237  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3238  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3239  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3240  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3241 };
3242 
3243 /*
3244 Returns true if given suballocation types could conflict and must respect
3245 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3246 or linear image and another one is optimal image. If type is unknown, behave
3247 conservatively.
3248 */
3249 static inline bool VmaIsBufferImageGranularityConflict(
3250  VmaSuballocationType suballocType1,
3251  VmaSuballocationType suballocType2)
3252 {
3253  if(suballocType1 > suballocType2)
3254  {
3255  VMA_SWAP(suballocType1, suballocType2);
3256  }
3257 
3258  switch(suballocType1)
3259  {
3260  case VMA_SUBALLOCATION_TYPE_FREE:
3261  return false;
3262  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3263  return true;
3264  case VMA_SUBALLOCATION_TYPE_BUFFER:
3265  return
3266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3268  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3269  return
3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3272  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3273  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3274  return
3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3276  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3277  return false;
3278  default:
3279  VMA_ASSERT(0);
3280  return true;
3281  }
3282 }
3283 
3284 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3285 {
3286  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3287  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3288  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3289  {
3290  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3291  }
3292 }
3293 
3294 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3295 {
3296  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3297  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3298  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3299  {
3300  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3301  {
3302  return false;
3303  }
3304  }
3305  return true;
3306 }
3307 
3308 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3309 struct VmaMutexLock
3310 {
3311  VMA_CLASS_NO_COPY(VmaMutexLock)
3312 public:
3313  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3314  m_pMutex(useMutex ? &mutex : VMA_NULL)
3315  {
3316  if(m_pMutex)
3317  {
3318  m_pMutex->Lock();
3319  }
3320  }
3321 
3322  ~VmaMutexLock()
3323  {
3324  if(m_pMutex)
3325  {
3326  m_pMutex->Unlock();
3327  }
3328  }
3329 
3330 private:
3331  VMA_MUTEX* m_pMutex;
3332 };
3333 
3334 #if VMA_DEBUG_GLOBAL_MUTEX
3335  static VMA_MUTEX gDebugGlobalMutex;
3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3337 #else
3338  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3339 #endif
3340 
3341 // Minimum size of a free suballocation to register it in the free suballocation collection.
3342 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3343 
3344 /*
3345 Performs binary search and returns iterator to first element that is greater or
3346 equal to (key), according to comparison (cmp).
3347 
3348 Cmp should return true if first argument is less than second argument.
3349 
3350 Returned value is the found element, if present in the collection or place where
3351 new element with value (key) should be inserted.
3352 */
3353 template <typename CmpLess, typename IterT, typename KeyT>
3354 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3355 {
3356  size_t down = 0, up = (end - beg);
3357  while(down < up)
3358  {
3359  const size_t mid = (down + up) / 2;
3360  if(cmp(*(beg+mid), key))
3361  {
3362  down = mid + 1;
3363  }
3364  else
3365  {
3366  up = mid;
3367  }
3368  }
3369  return beg + down;
3370 }
3371 
3373 // Memory allocation
3374 
3375 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3376 {
3377  if((pAllocationCallbacks != VMA_NULL) &&
3378  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3379  {
3380  return (*pAllocationCallbacks->pfnAllocation)(
3381  pAllocationCallbacks->pUserData,
3382  size,
3383  alignment,
3384  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3385  }
3386  else
3387  {
3388  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3389  }
3390 }
3391 
3392 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3393 {
3394  if((pAllocationCallbacks != VMA_NULL) &&
3395  (pAllocationCallbacks->pfnFree != VMA_NULL))
3396  {
3397  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3398  }
3399  else
3400  {
3401  VMA_SYSTEM_FREE(ptr);
3402  }
3403 }
3404 
3405 template<typename T>
3406 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3407 {
3408  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3409 }
3410 
3411 template<typename T>
3412 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3413 {
3414  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3415 }
3416 
3417 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3418 
3419 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3420 
3421 template<typename T>
3422 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3423 {
3424  ptr->~T();
3425  VmaFree(pAllocationCallbacks, ptr);
3426 }
3427 
3428 template<typename T>
3429 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3430 {
3431  if(ptr != VMA_NULL)
3432  {
3433  for(size_t i = count; i--; )
3434  {
3435  ptr[i].~T();
3436  }
3437  VmaFree(pAllocationCallbacks, ptr);
3438  }
3439 }
3440 
3441 // STL-compatible allocator.
3442 template<typename T>
3443 class VmaStlAllocator
3444 {
3445 public:
3446  const VkAllocationCallbacks* const m_pCallbacks;
3447  typedef T value_type;
3448 
3449  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3450  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3451 
3452  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3453  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3454 
3455  template<typename U>
3456  bool operator==(const VmaStlAllocator<U>& rhs) const
3457  {
3458  return m_pCallbacks == rhs.m_pCallbacks;
3459  }
3460  template<typename U>
3461  bool operator!=(const VmaStlAllocator<U>& rhs) const
3462  {
3463  return m_pCallbacks != rhs.m_pCallbacks;
3464  }
3465 
3466  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3467 };
3468 
3469 #if VMA_USE_STL_VECTOR
3470 
3471 #define VmaVector std::vector
3472 
3473 template<typename T, typename allocatorT>
3474 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3475 {
3476  vec.insert(vec.begin() + index, item);
3477 }
3478 
3479 template<typename T, typename allocatorT>
3480 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3481 {
3482  vec.erase(vec.begin() + index);
3483 }
3484 
3485 #else // #if VMA_USE_STL_VECTOR
3486 
3487 /* Class with interface compatible with subset of std::vector.
3488 T must be POD because constructors and destructors are not called and memcpy is
3489 used for these objects. */
3490 template<typename T, typename AllocatorT>
3491 class VmaVector
3492 {
3493 public:
3494  typedef T value_type;
3495 
3496  VmaVector(const AllocatorT& allocator) :
3497  m_Allocator(allocator),
3498  m_pArray(VMA_NULL),
3499  m_Count(0),
3500  m_Capacity(0)
3501  {
3502  }
3503 
3504  VmaVector(size_t count, const AllocatorT& allocator) :
3505  m_Allocator(allocator),
3506  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3507  m_Count(count),
3508  m_Capacity(count)
3509  {
3510  }
3511 
3512  VmaVector(const VmaVector<T, AllocatorT>& src) :
3513  m_Allocator(src.m_Allocator),
3514  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3515  m_Count(src.m_Count),
3516  m_Capacity(src.m_Count)
3517  {
3518  if(m_Count != 0)
3519  {
3520  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3521  }
3522  }
3523 
3524  ~VmaVector()
3525  {
3526  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3527  }
3528 
3529  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3530  {
3531  if(&rhs != this)
3532  {
3533  resize(rhs.m_Count);
3534  if(m_Count != 0)
3535  {
3536  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3537  }
3538  }
3539  return *this;
3540  }
3541 
3542  bool empty() const { return m_Count == 0; }
3543  size_t size() const { return m_Count; }
3544  T* data() { return m_pArray; }
3545  const T* data() const { return m_pArray; }
3546 
3547  T& operator[](size_t index)
3548  {
3549  VMA_HEAVY_ASSERT(index < m_Count);
3550  return m_pArray[index];
3551  }
3552  const T& operator[](size_t index) const
3553  {
3554  VMA_HEAVY_ASSERT(index < m_Count);
3555  return m_pArray[index];
3556  }
3557 
3558  T& front()
3559  {
3560  VMA_HEAVY_ASSERT(m_Count > 0);
3561  return m_pArray[0];
3562  }
3563  const T& front() const
3564  {
3565  VMA_HEAVY_ASSERT(m_Count > 0);
3566  return m_pArray[0];
3567  }
3568  T& back()
3569  {
3570  VMA_HEAVY_ASSERT(m_Count > 0);
3571  return m_pArray[m_Count - 1];
3572  }
3573  const T& back() const
3574  {
3575  VMA_HEAVY_ASSERT(m_Count > 0);
3576  return m_pArray[m_Count - 1];
3577  }
3578 
3579  void reserve(size_t newCapacity, bool freeMemory = false)
3580  {
3581  newCapacity = VMA_MAX(newCapacity, m_Count);
3582 
3583  if((newCapacity < m_Capacity) && !freeMemory)
3584  {
3585  newCapacity = m_Capacity;
3586  }
3587 
3588  if(newCapacity != m_Capacity)
3589  {
3590  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3591  if(m_Count != 0)
3592  {
3593  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3594  }
3595  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3596  m_Capacity = newCapacity;
3597  m_pArray = newArray;
3598  }
3599  }
3600 
3601  void resize(size_t newCount, bool freeMemory = false)
3602  {
3603  size_t newCapacity = m_Capacity;
3604  if(newCount > m_Capacity)
3605  {
3606  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3607  }
3608  else if(freeMemory)
3609  {
3610  newCapacity = newCount;
3611  }
3612 
3613  if(newCapacity != m_Capacity)
3614  {
3615  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3616  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3617  if(elementsToCopy != 0)
3618  {
3619  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3620  }
3621  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3622  m_Capacity = newCapacity;
3623  m_pArray = newArray;
3624  }
3625 
3626  m_Count = newCount;
3627  }
3628 
3629  void clear(bool freeMemory = false)
3630  {
3631  resize(0, freeMemory);
3632  }
3633 
3634  void insert(size_t index, const T& src)
3635  {
3636  VMA_HEAVY_ASSERT(index <= m_Count);
3637  const size_t oldCount = size();
3638  resize(oldCount + 1);
3639  if(index < oldCount)
3640  {
3641  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3642  }
3643  m_pArray[index] = src;
3644  }
3645 
3646  void remove(size_t index)
3647  {
3648  VMA_HEAVY_ASSERT(index < m_Count);
3649  const size_t oldCount = size();
3650  if(index < oldCount - 1)
3651  {
3652  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3653  }
3654  resize(oldCount - 1);
3655  }
3656 
3657  void push_back(const T& src)
3658  {
3659  const size_t newIndex = size();
3660  resize(newIndex + 1);
3661  m_pArray[newIndex] = src;
3662  }
3663 
3664  void pop_back()
3665  {
3666  VMA_HEAVY_ASSERT(m_Count > 0);
3667  resize(size() - 1);
3668  }
3669 
3670  void push_front(const T& src)
3671  {
3672  insert(0, src);
3673  }
3674 
3675  void pop_front()
3676  {
3677  VMA_HEAVY_ASSERT(m_Count > 0);
3678  remove(0);
3679  }
3680 
3681  typedef T* iterator;
3682 
3683  iterator begin() { return m_pArray; }
3684  iterator end() { return m_pArray + m_Count; }
3685 
3686 private:
3687  AllocatorT m_Allocator;
3688  T* m_pArray;
3689  size_t m_Count;
3690  size_t m_Capacity;
3691 };
3692 
3693 template<typename T, typename allocatorT>
3694 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3695 {
3696  vec.insert(index, item);
3697 }
3698 
3699 template<typename T, typename allocatorT>
3700 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3701 {
3702  vec.remove(index);
3703 }
3704 
3705 #endif // #if VMA_USE_STL_VECTOR
3706 
3707 template<typename CmpLess, typename VectorT>
3708 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3709 {
3710  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3711  vector.data(),
3712  vector.data() + vector.size(),
3713  value,
3714  CmpLess()) - vector.data();
3715  VmaVectorInsert(vector, indexToInsert, value);
3716  return indexToInsert;
3717 }
3718 
3719 template<typename CmpLess, typename VectorT>
3720 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3721 {
3722  CmpLess comparator;
3723  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3724  vector.begin(),
3725  vector.end(),
3726  value,
3727  comparator);
3728  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3729  {
3730  size_t indexToRemove = it - vector.begin();
3731  VmaVectorRemove(vector, indexToRemove);
3732  return true;
3733  }
3734  return false;
3735 }
3736 
3737 template<typename CmpLess, typename IterT, typename KeyT>
3738 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3739 {
3740  CmpLess comparator;
3741  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3742  beg, end, value, comparator);
3743  if(it == end ||
3744  (!comparator(*it, value) && !comparator(value, *it)))
3745  {
3746  return it;
3747  }
3748  return end;
3749 }
3750 
3752 // class VmaPoolAllocator
3753 
3754 /*
3755 Allocator for objects of type T using a list of arrays (pools) to speed up
3756 allocation. Number of elements that can be allocated is not bounded because
3757 allocator can create multiple blocks.
3758 */
3759 template<typename T>
3760 class VmaPoolAllocator
3761 {
3762  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3763 public:
3764  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3765  ~VmaPoolAllocator();
3766  void Clear();
3767  T* Alloc();
3768  void Free(T* ptr);
3769 
3770 private:
3771  union Item
3772  {
3773  uint32_t NextFreeIndex;
3774  T Value;
3775  };
3776 
3777  struct ItemBlock
3778  {
3779  Item* pItems;
3780  uint32_t FirstFreeIndex;
3781  };
3782 
3783  const VkAllocationCallbacks* m_pAllocationCallbacks;
3784  size_t m_ItemsPerBlock;
3785  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3786 
3787  ItemBlock& CreateNewBlock();
3788 };
3789 
3790 template<typename T>
3791 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3792  m_pAllocationCallbacks(pAllocationCallbacks),
3793  m_ItemsPerBlock(itemsPerBlock),
3794  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3795 {
3796  VMA_ASSERT(itemsPerBlock > 0);
3797 }
3798 
3799 template<typename T>
3800 VmaPoolAllocator<T>::~VmaPoolAllocator()
3801 {
3802  Clear();
3803 }
3804 
3805 template<typename T>
3806 void VmaPoolAllocator<T>::Clear()
3807 {
3808  for(size_t i = m_ItemBlocks.size(); i--; )
3809  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3810  m_ItemBlocks.clear();
3811 }
3812 
3813 template<typename T>
3814 T* VmaPoolAllocator<T>::Alloc()
3815 {
3816  for(size_t i = m_ItemBlocks.size(); i--; )
3817  {
3818  ItemBlock& block = m_ItemBlocks[i];
3819  // This block has some free items: Use first one.
3820  if(block.FirstFreeIndex != UINT32_MAX)
3821  {
3822  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3823  block.FirstFreeIndex = pItem->NextFreeIndex;
3824  return &pItem->Value;
3825  }
3826  }
3827 
3828  // No block has free item: Create new one and use it.
3829  ItemBlock& newBlock = CreateNewBlock();
3830  Item* const pItem = &newBlock.pItems[0];
3831  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3832  return &pItem->Value;
3833 }
3834 
3835 template<typename T>
3836 void VmaPoolAllocator<T>::Free(T* ptr)
3837 {
3838  // Search all memory blocks to find ptr.
3839  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3840  {
3841  ItemBlock& block = m_ItemBlocks[i];
3842 
3843  // Casting to union.
3844  Item* pItemPtr;
3845  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3846 
3847  // Check if pItemPtr is in address range of this block.
3848  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3849  {
3850  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3851  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3852  block.FirstFreeIndex = index;
3853  return;
3854  }
3855  }
3856  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3857 }
3858 
3859 template<typename T>
3860 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3861 {
3862  ItemBlock newBlock = {
3863  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3864 
3865  m_ItemBlocks.push_back(newBlock);
3866 
3867  // Setup singly-linked list of all free items in this block.
3868  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3869  newBlock.pItems[i].NextFreeIndex = i + 1;
3870  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3871  return m_ItemBlocks.back();
3872 }
3873 
3875 // class VmaRawList, VmaList
3876 
3877 #if VMA_USE_STL_LIST
3878 
3879 #define VmaList std::list
3880 
3881 #else // #if VMA_USE_STL_LIST
3882 
3883 template<typename T>
3884 struct VmaListItem
3885 {
3886  VmaListItem* pPrev;
3887  VmaListItem* pNext;
3888  T Value;
3889 };
3890 
3891 // Doubly linked list.
3892 template<typename T>
3893 class VmaRawList
3894 {
3895  VMA_CLASS_NO_COPY(VmaRawList)
3896 public:
3897  typedef VmaListItem<T> ItemType;
3898 
3899  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3900  ~VmaRawList();
3901  void Clear();
3902 
3903  size_t GetCount() const { return m_Count; }
3904  bool IsEmpty() const { return m_Count == 0; }
3905 
3906  ItemType* Front() { return m_pFront; }
3907  const ItemType* Front() const { return m_pFront; }
3908  ItemType* Back() { return m_pBack; }
3909  const ItemType* Back() const { return m_pBack; }
3910 
3911  ItemType* PushBack();
3912  ItemType* PushFront();
3913  ItemType* PushBack(const T& value);
3914  ItemType* PushFront(const T& value);
3915  void PopBack();
3916  void PopFront();
3917 
3918  // Item can be null - it means PushBack.
3919  ItemType* InsertBefore(ItemType* pItem);
3920  // Item can be null - it means PushFront.
3921  ItemType* InsertAfter(ItemType* pItem);
3922 
3923  ItemType* InsertBefore(ItemType* pItem, const T& value);
3924  ItemType* InsertAfter(ItemType* pItem, const T& value);
3925 
3926  void Remove(ItemType* pItem);
3927 
3928 private:
3929  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3930  VmaPoolAllocator<ItemType> m_ItemAllocator;
3931  ItemType* m_pFront;
3932  ItemType* m_pBack;
3933  size_t m_Count;
3934 };
3935 
3936 template<typename T>
3937 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3938  m_pAllocationCallbacks(pAllocationCallbacks),
3939  m_ItemAllocator(pAllocationCallbacks, 128),
3940  m_pFront(VMA_NULL),
3941  m_pBack(VMA_NULL),
3942  m_Count(0)
3943 {
3944 }
3945 
3946 template<typename T>
3947 VmaRawList<T>::~VmaRawList()
3948 {
3949  // Intentionally not calling Clear, because that would be unnecessary
3950  // computations to return all items to m_ItemAllocator as free.
3951 }
3952 
3953 template<typename T>
3954 void VmaRawList<T>::Clear()
3955 {
3956  if(IsEmpty() == false)
3957  {
3958  ItemType* pItem = m_pBack;
3959  while(pItem != VMA_NULL)
3960  {
3961  ItemType* const pPrevItem = pItem->pPrev;
3962  m_ItemAllocator.Free(pItem);
3963  pItem = pPrevItem;
3964  }
3965  m_pFront = VMA_NULL;
3966  m_pBack = VMA_NULL;
3967  m_Count = 0;
3968  }
3969 }
3970 
3971 template<typename T>
3972 VmaListItem<T>* VmaRawList<T>::PushBack()
3973 {
3974  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3975  pNewItem->pNext = VMA_NULL;
3976  if(IsEmpty())
3977  {
3978  pNewItem->pPrev = VMA_NULL;
3979  m_pFront = pNewItem;
3980  m_pBack = pNewItem;
3981  m_Count = 1;
3982  }
3983  else
3984  {
3985  pNewItem->pPrev = m_pBack;
3986  m_pBack->pNext = pNewItem;
3987  m_pBack = pNewItem;
3988  ++m_Count;
3989  }
3990  return pNewItem;
3991 }
3992 
3993 template<typename T>
3994 VmaListItem<T>* VmaRawList<T>::PushFront()
3995 {
3996  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3997  pNewItem->pPrev = VMA_NULL;
3998  if(IsEmpty())
3999  {
4000  pNewItem->pNext = VMA_NULL;
4001  m_pFront = pNewItem;
4002  m_pBack = pNewItem;
4003  m_Count = 1;
4004  }
4005  else
4006  {
4007  pNewItem->pNext = m_pFront;
4008  m_pFront->pPrev = pNewItem;
4009  m_pFront = pNewItem;
4010  ++m_Count;
4011  }
4012  return pNewItem;
4013 }
4014 
4015 template<typename T>
4016 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4017 {
4018  ItemType* const pNewItem = PushBack();
4019  pNewItem->Value = value;
4020  return pNewItem;
4021 }
4022 
4023 template<typename T>
4024 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4025 {
4026  ItemType* const pNewItem = PushFront();
4027  pNewItem->Value = value;
4028  return pNewItem;
4029 }
4030 
4031 template<typename T>
4032 void VmaRawList<T>::PopBack()
4033 {
4034  VMA_HEAVY_ASSERT(m_Count > 0);
4035  ItemType* const pBackItem = m_pBack;
4036  ItemType* const pPrevItem = pBackItem->pPrev;
4037  if(pPrevItem != VMA_NULL)
4038  {
4039  pPrevItem->pNext = VMA_NULL;
4040  }
4041  m_pBack = pPrevItem;
4042  m_ItemAllocator.Free(pBackItem);
4043  --m_Count;
4044 }
4045 
4046 template<typename T>
4047 void VmaRawList<T>::PopFront()
4048 {
4049  VMA_HEAVY_ASSERT(m_Count > 0);
4050  ItemType* const pFrontItem = m_pFront;
4051  ItemType* const pNextItem = pFrontItem->pNext;
4052  if(pNextItem != VMA_NULL)
4053  {
4054  pNextItem->pPrev = VMA_NULL;
4055  }
4056  m_pFront = pNextItem;
4057  m_ItemAllocator.Free(pFrontItem);
4058  --m_Count;
4059 }
4060 
4061 template<typename T>
4062 void VmaRawList<T>::Remove(ItemType* pItem)
4063 {
4064  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4065  VMA_HEAVY_ASSERT(m_Count > 0);
4066 
4067  if(pItem->pPrev != VMA_NULL)
4068  {
4069  pItem->pPrev->pNext = pItem->pNext;
4070  }
4071  else
4072  {
4073  VMA_HEAVY_ASSERT(m_pFront == pItem);
4074  m_pFront = pItem->pNext;
4075  }
4076 
4077  if(pItem->pNext != VMA_NULL)
4078  {
4079  pItem->pNext->pPrev = pItem->pPrev;
4080  }
4081  else
4082  {
4083  VMA_HEAVY_ASSERT(m_pBack == pItem);
4084  m_pBack = pItem->pPrev;
4085  }
4086 
4087  m_ItemAllocator.Free(pItem);
4088  --m_Count;
4089 }
4090 
4091 template<typename T>
4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4093 {
4094  if(pItem != VMA_NULL)
4095  {
4096  ItemType* const prevItem = pItem->pPrev;
4097  ItemType* const newItem = m_ItemAllocator.Alloc();
4098  newItem->pPrev = prevItem;
4099  newItem->pNext = pItem;
4100  pItem->pPrev = newItem;
4101  if(prevItem != VMA_NULL)
4102  {
4103  prevItem->pNext = newItem;
4104  }
4105  else
4106  {
4107  VMA_HEAVY_ASSERT(m_pFront == pItem);
4108  m_pFront = newItem;
4109  }
4110  ++m_Count;
4111  return newItem;
4112  }
4113  else
4114  return PushBack();
4115 }
4116 
4117 template<typename T>
4118 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4119 {
4120  if(pItem != VMA_NULL)
4121  {
4122  ItemType* const nextItem = pItem->pNext;
4123  ItemType* const newItem = m_ItemAllocator.Alloc();
4124  newItem->pNext = nextItem;
4125  newItem->pPrev = pItem;
4126  pItem->pNext = newItem;
4127  if(nextItem != VMA_NULL)
4128  {
4129  nextItem->pPrev = newItem;
4130  }
4131  else
4132  {
4133  VMA_HEAVY_ASSERT(m_pBack == pItem);
4134  m_pBack = newItem;
4135  }
4136  ++m_Count;
4137  return newItem;
4138  }
4139  else
4140  return PushFront();
4141 }
4142 
4143 template<typename T>
4144 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4145 {
4146  ItemType* const newItem = InsertBefore(pItem);
4147  newItem->Value = value;
4148  return newItem;
4149 }
4150 
4151 template<typename T>
4152 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4153 {
4154  ItemType* const newItem = InsertAfter(pItem);
4155  newItem->Value = value;
4156  return newItem;
4157 }
4158 
4159 template<typename T, typename AllocatorT>
4160 class VmaList
4161 {
4162  VMA_CLASS_NO_COPY(VmaList)
4163 public:
4164  class iterator
4165  {
4166  public:
4167  iterator() :
4168  m_pList(VMA_NULL),
4169  m_pItem(VMA_NULL)
4170  {
4171  }
4172 
4173  T& operator*() const
4174  {
4175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4176  return m_pItem->Value;
4177  }
4178  T* operator->() const
4179  {
4180  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4181  return &m_pItem->Value;
4182  }
4183 
4184  iterator& operator++()
4185  {
4186  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4187  m_pItem = m_pItem->pNext;
4188  return *this;
4189  }
4190  iterator& operator--()
4191  {
4192  if(m_pItem != VMA_NULL)
4193  {
4194  m_pItem = m_pItem->pPrev;
4195  }
4196  else
4197  {
4198  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4199  m_pItem = m_pList->Back();
4200  }
4201  return *this;
4202  }
4203 
4204  iterator operator++(int)
4205  {
4206  iterator result = *this;
4207  ++*this;
4208  return result;
4209  }
4210  iterator operator--(int)
4211  {
4212  iterator result = *this;
4213  --*this;
4214  return result;
4215  }
4216 
4217  bool operator==(const iterator& rhs) const
4218  {
4219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4220  return m_pItem == rhs.m_pItem;
4221  }
4222  bool operator!=(const iterator& rhs) const
4223  {
4224  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4225  return m_pItem != rhs.m_pItem;
4226  }
4227 
4228  private:
4229  VmaRawList<T>* m_pList;
4230  VmaListItem<T>* m_pItem;
4231 
4232  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4233  m_pList(pList),
4234  m_pItem(pItem)
4235  {
4236  }
4237 
4238  friend class VmaList<T, AllocatorT>;
4239  };
4240 
4241  class const_iterator
4242  {
4243  public:
4244  const_iterator() :
4245  m_pList(VMA_NULL),
4246  m_pItem(VMA_NULL)
4247  {
4248  }
4249 
4250  const_iterator(const iterator& src) :
4251  m_pList(src.m_pList),
4252  m_pItem(src.m_pItem)
4253  {
4254  }
4255 
4256  const T& operator*() const
4257  {
4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4259  return m_pItem->Value;
4260  }
4261  const T* operator->() const
4262  {
4263  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4264  return &m_pItem->Value;
4265  }
4266 
4267  const_iterator& operator++()
4268  {
4269  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4270  m_pItem = m_pItem->pNext;
4271  return *this;
4272  }
4273  const_iterator& operator--()
4274  {
4275  if(m_pItem != VMA_NULL)
4276  {
4277  m_pItem = m_pItem->pPrev;
4278  }
4279  else
4280  {
4281  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4282  m_pItem = m_pList->Back();
4283  }
4284  return *this;
4285  }
4286 
4287  const_iterator operator++(int)
4288  {
4289  const_iterator result = *this;
4290  ++*this;
4291  return result;
4292  }
4293  const_iterator operator--(int)
4294  {
4295  const_iterator result = *this;
4296  --*this;
4297  return result;
4298  }
4299 
4300  bool operator==(const const_iterator& rhs) const
4301  {
4302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4303  return m_pItem == rhs.m_pItem;
4304  }
4305  bool operator!=(const const_iterator& rhs) const
4306  {
4307  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4308  return m_pItem != rhs.m_pItem;
4309  }
4310 
4311  private:
4312  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4313  m_pList(pList),
4314  m_pItem(pItem)
4315  {
4316  }
4317 
4318  const VmaRawList<T>* m_pList;
4319  const VmaListItem<T>* m_pItem;
4320 
4321  friend class VmaList<T, AllocatorT>;
4322  };
4323 
4324  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4325 
4326  bool empty() const { return m_RawList.IsEmpty(); }
4327  size_t size() const { return m_RawList.GetCount(); }
4328 
4329  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4330  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4331 
4332  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4333  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4334 
4335  void clear() { m_RawList.Clear(); }
4336  void push_back(const T& value) { m_RawList.PushBack(value); }
4337  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4338  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4339 
4340 private:
4341  VmaRawList<T> m_RawList;
4342 };
4343 
4344 #endif // #if VMA_USE_STL_LIST
4345 
4347 // class VmaMap
4348 
4349 // Unused in this version.
4350 #if 0
4351 
4352 #if VMA_USE_STL_UNORDERED_MAP
4353 
4354 #define VmaPair std::pair
4355 
4356 #define VMA_MAP_TYPE(KeyT, ValueT) \
4357  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4358 
4359 #else // #if VMA_USE_STL_UNORDERED_MAP
4360 
4361 template<typename T1, typename T2>
4362 struct VmaPair
4363 {
4364  T1 first;
4365  T2 second;
4366 
4367  VmaPair() : first(), second() { }
4368  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4369 };
4370 
4371 /* Class compatible with subset of interface of std::unordered_map.
4372 KeyT, ValueT must be POD because they will be stored in VmaVector.
4373 */
4374 template<typename KeyT, typename ValueT>
4375 class VmaMap
4376 {
4377 public:
4378  typedef VmaPair<KeyT, ValueT> PairType;
4379  typedef PairType* iterator;
4380 
4381  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4382 
4383  iterator begin() { return m_Vector.begin(); }
4384  iterator end() { return m_Vector.end(); }
4385 
4386  void insert(const PairType& pair);
4387  iterator find(const KeyT& key);
4388  void erase(iterator it);
4389 
4390 private:
4391  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4392 };
4393 
4394 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4395 
4396 template<typename FirstT, typename SecondT>
4397 struct VmaPairFirstLess
4398 {
4399  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4400  {
4401  return lhs.first < rhs.first;
4402  }
4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4404  {
4405  return lhs.first < rhsFirst;
4406  }
4407 };
4408 
4409 template<typename KeyT, typename ValueT>
4410 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4411 {
4412  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4413  m_Vector.data(),
4414  m_Vector.data() + m_Vector.size(),
4415  pair,
4416  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4417  VmaVectorInsert(m_Vector, indexToInsert, pair);
4418 }
4419 
4420 template<typename KeyT, typename ValueT>
4421 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4422 {
4423  PairType* it = VmaBinaryFindFirstNotLess(
4424  m_Vector.data(),
4425  m_Vector.data() + m_Vector.size(),
4426  key,
4427  VmaPairFirstLess<KeyT, ValueT>());
4428  if((it != m_Vector.end()) && (it->first == key))
4429  {
4430  return it;
4431  }
4432  else
4433  {
4434  return m_Vector.end();
4435  }
4436 }
4437 
4438 template<typename KeyT, typename ValueT>
4439 void VmaMap<KeyT, ValueT>::erase(iterator it)
4440 {
4441  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4442 }
4443 
4444 #endif // #if VMA_USE_STL_UNORDERED_MAP
4445 
4446 #endif // #if 0
4447 
4449 
4450 class VmaDeviceMemoryBlock;
4451 
4452 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4453 
4454 struct VmaAllocation_T
4455 {
4456  VMA_CLASS_NO_COPY(VmaAllocation_T)
4457 private:
4458  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4459 
4460  enum FLAGS
4461  {
4462  FLAG_USER_DATA_STRING = 0x01,
4463  };
4464 
4465 public:
4466  enum ALLOCATION_TYPE
4467  {
4468  ALLOCATION_TYPE_NONE,
4469  ALLOCATION_TYPE_BLOCK,
4470  ALLOCATION_TYPE_DEDICATED,
4471  };
4472 
4473  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4474  m_Alignment(1),
4475  m_Size(0),
4476  m_pUserData(VMA_NULL),
4477  m_LastUseFrameIndex(currentFrameIndex),
4478  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4479  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4480  m_MapCount(0),
4481  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4482  {
4483 #if VMA_STATS_STRING_ENABLED
4484  m_CreationFrameIndex = currentFrameIndex;
4485  m_BufferImageUsage = 0;
4486 #endif
4487  }
4488 
4489  ~VmaAllocation_T()
4490  {
4491  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4492 
4493  // Check if owned string was freed.
4494  VMA_ASSERT(m_pUserData == VMA_NULL);
4495  }
4496 
4497  void InitBlockAllocation(
4498  VmaPool hPool,
4499  VmaDeviceMemoryBlock* block,
4500  VkDeviceSize offset,
4501  VkDeviceSize alignment,
4502  VkDeviceSize size,
4503  VmaSuballocationType suballocationType,
4504  bool mapped,
4505  bool canBecomeLost)
4506  {
4507  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4508  VMA_ASSERT(block != VMA_NULL);
4509  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4510  m_Alignment = alignment;
4511  m_Size = size;
4512  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4513  m_SuballocationType = (uint8_t)suballocationType;
4514  m_BlockAllocation.m_hPool = hPool;
4515  m_BlockAllocation.m_Block = block;
4516  m_BlockAllocation.m_Offset = offset;
4517  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4518  }
4519 
4520  void InitLost()
4521  {
4522  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4523  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4524  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4525  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4526  m_BlockAllocation.m_Block = VMA_NULL;
4527  m_BlockAllocation.m_Offset = 0;
4528  m_BlockAllocation.m_CanBecomeLost = true;
4529  }
4530 
4531  void ChangeBlockAllocation(
4532  VmaAllocator hAllocator,
4533  VmaDeviceMemoryBlock* block,
4534  VkDeviceSize offset);
4535 
4536  void ChangeSize(VkDeviceSize newSize);
4537 
4538  // pMappedData not null means allocation is created with MAPPED flag.
4539  void InitDedicatedAllocation(
4540  uint32_t memoryTypeIndex,
4541  VkDeviceMemory hMemory,
4542  VmaSuballocationType suballocationType,
4543  void* pMappedData,
4544  VkDeviceSize size)
4545  {
4546  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4547  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4548  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4549  m_Alignment = 0;
4550  m_Size = size;
4551  m_SuballocationType = (uint8_t)suballocationType;
4552  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4553  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4554  m_DedicatedAllocation.m_hMemory = hMemory;
4555  m_DedicatedAllocation.m_pMappedData = pMappedData;
4556  }
4557 
4558  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4559  VkDeviceSize GetAlignment() const { return m_Alignment; }
4560  VkDeviceSize GetSize() const { return m_Size; }
4561  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4562  void* GetUserData() const { return m_pUserData; }
4563  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4564  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4565 
4566  VmaDeviceMemoryBlock* GetBlock() const
4567  {
4568  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4569  return m_BlockAllocation.m_Block;
4570  }
4571  VkDeviceSize GetOffset() const;
4572  VkDeviceMemory GetMemory() const;
4573  uint32_t GetMemoryTypeIndex() const;
4574  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4575  void* GetMappedData() const;
4576  bool CanBecomeLost() const;
4577  VmaPool GetPool() const;
4578 
4579  uint32_t GetLastUseFrameIndex() const
4580  {
4581  return m_LastUseFrameIndex.load();
4582  }
4583  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4584  {
4585  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4586  }
4587  /*
4588  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4589  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4590  - Else, returns false.
4591 
4592  If hAllocation is already lost, assert - you should not call it then.
4593  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4594  */
4595  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4596 
4597  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4598  {
4599  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4600  outInfo.blockCount = 1;
4601  outInfo.allocationCount = 1;
4602  outInfo.unusedRangeCount = 0;
4603  outInfo.usedBytes = m_Size;
4604  outInfo.unusedBytes = 0;
4605  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4606  outInfo.unusedRangeSizeMin = UINT64_MAX;
4607  outInfo.unusedRangeSizeMax = 0;
4608  }
4609 
4610  void BlockAllocMap();
4611  void BlockAllocUnmap();
4612  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4613  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4614 
4615 #if VMA_STATS_STRING_ENABLED
4616  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4617  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4618 
4619  void InitBufferImageUsage(uint32_t bufferImageUsage)
4620  {
4621  VMA_ASSERT(m_BufferImageUsage == 0);
4622  m_BufferImageUsage = bufferImageUsage;
4623  }
4624 
4625  void PrintParameters(class VmaJsonWriter& json) const;
4626 #endif
4627 
4628 private:
4629  VkDeviceSize m_Alignment;
4630  VkDeviceSize m_Size;
4631  void* m_pUserData;
4632  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4633  uint8_t m_Type; // ALLOCATION_TYPE
4634  uint8_t m_SuballocationType; // VmaSuballocationType
4635  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4636  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4637  uint8_t m_MapCount;
4638  uint8_t m_Flags; // enum FLAGS
4639 
4640  // Allocation out of VmaDeviceMemoryBlock.
4641  struct BlockAllocation
4642  {
4643  VmaPool m_hPool; // Null if belongs to general memory.
4644  VmaDeviceMemoryBlock* m_Block;
4645  VkDeviceSize m_Offset;
4646  bool m_CanBecomeLost;
4647  };
4648 
4649  // Allocation for an object that has its own private VkDeviceMemory.
4650  struct DedicatedAllocation
4651  {
4652  uint32_t m_MemoryTypeIndex;
4653  VkDeviceMemory m_hMemory;
4654  void* m_pMappedData; // Not null means memory is mapped.
4655  };
4656 
4657  union
4658  {
4659  // Allocation out of VmaDeviceMemoryBlock.
4660  BlockAllocation m_BlockAllocation;
4661  // Allocation for an object that has its own private VkDeviceMemory.
4662  DedicatedAllocation m_DedicatedAllocation;
4663  };
4664 
4665 #if VMA_STATS_STRING_ENABLED
4666  uint32_t m_CreationFrameIndex;
4667  uint32_t m_BufferImageUsage; // 0 if unknown.
4668 #endif
4669 
4670  void FreeUserDataString(VmaAllocator hAllocator);
4671 };
4672 
4673 /*
4674 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4675 allocated memory block or free.
4676 */
4677 struct VmaSuballocation
4678 {
4679  VkDeviceSize offset;
4680  VkDeviceSize size;
4681  VmaAllocation hAllocation;
4682  VmaSuballocationType type;
4683 };
4684 
4685 // Comparator for offsets.
4686 struct VmaSuballocationOffsetLess
4687 {
4688  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4689  {
4690  return lhs.offset < rhs.offset;
4691  }
4692 };
4693 struct VmaSuballocationOffsetGreater
4694 {
4695  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4696  {
4697  return lhs.offset > rhs.offset;
4698  }
4699 };
4700 
4701 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4702 
4703 // Cost of one additional allocation lost, as equivalent in bytes.
4704 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4705 
4706 /*
4707 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4708 
4709 If canMakeOtherLost was false:
4710 - item points to a FREE suballocation.
4711 - itemsToMakeLostCount is 0.
4712 
4713 If canMakeOtherLost was true:
4714 - item points to first of sequence of suballocations, which are either FREE,
4715  or point to VmaAllocations that can become lost.
4716 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4717  the requested allocation to succeed.
4718 */
4719 struct VmaAllocationRequest
4720 {
4721  VkDeviceSize offset;
4722  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4723  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4724  VmaSuballocationList::iterator item;
4725  size_t itemsToMakeLostCount;
4726  void* customData;
4727 
4728  VkDeviceSize CalcCost() const
4729  {
4730  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4731  }
4732 };
4733 
4734 /*
4735 Data structure used for bookkeeping of allocations and unused ranges of memory
4736 in a single VkDeviceMemory block.
4737 */
4738 class VmaBlockMetadata
4739 {
4740 public:
4741  VmaBlockMetadata(VmaAllocator hAllocator);
4742  virtual ~VmaBlockMetadata() { }
4743  virtual void Init(VkDeviceSize size) { m_Size = size; }
4744 
4745  // Validates all data structures inside this object. If not valid, returns false.
4746  virtual bool Validate() const = 0;
4747  VkDeviceSize GetSize() const { return m_Size; }
4748  virtual size_t GetAllocationCount() const = 0;
4749  virtual VkDeviceSize GetSumFreeSize() const = 0;
4750  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4751  // Returns true if this block is empty - contains only single free suballocation.
4752  virtual bool IsEmpty() const = 0;
4753 
4754  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4755  // Shouldn't modify blockCount.
4756  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4757 
4758 #if VMA_STATS_STRING_ENABLED
4759  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4760 #endif
4761 
4762  // Tries to find a place for suballocation with given parameters inside this block.
4763  // If succeeded, fills pAllocationRequest and returns true.
4764  // If failed, returns false.
4765  virtual bool CreateAllocationRequest(
4766  uint32_t currentFrameIndex,
4767  uint32_t frameInUseCount,
4768  VkDeviceSize bufferImageGranularity,
4769  VkDeviceSize allocSize,
4770  VkDeviceSize allocAlignment,
4771  bool upperAddress,
4772  VmaSuballocationType allocType,
4773  bool canMakeOtherLost,
4774  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4775  VmaAllocationRequest* pAllocationRequest) = 0;
4776 
4777  virtual bool MakeRequestedAllocationsLost(
4778  uint32_t currentFrameIndex,
4779  uint32_t frameInUseCount,
4780  VmaAllocationRequest* pAllocationRequest) = 0;
4781 
4782  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4783 
4784  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4785 
4786  // Makes actual allocation based on request. Request must already be checked and valid.
4787  virtual void Alloc(
4788  const VmaAllocationRequest& request,
4789  VmaSuballocationType type,
4790  VkDeviceSize allocSize,
4791  bool upperAddress,
4792  VmaAllocation hAllocation) = 0;
4793 
4794  // Frees suballocation assigned to given memory region.
4795  virtual void Free(const VmaAllocation allocation) = 0;
4796  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4797 
4798  // Tries to resize (grow or shrink) space for given allocation, in place.
4799  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
4800 
4801 protected:
4802  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4803 
4804 #if VMA_STATS_STRING_ENABLED
4805  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4806  VkDeviceSize unusedBytes,
4807  size_t allocationCount,
4808  size_t unusedRangeCount) const;
4809  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4810  VkDeviceSize offset,
4811  VmaAllocation hAllocation) const;
4812  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4813  VkDeviceSize offset,
4814  VkDeviceSize size) const;
4815  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4816 #endif
4817 
4818 private:
4819  VkDeviceSize m_Size;
4820  const VkAllocationCallbacks* m_pAllocationCallbacks;
4821 };
4822 
4823 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4824  VMA_ASSERT(0 && "Validation failed: " #cond); \
4825  return false; \
4826  } } while(false)
4827 
4828 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4829 {
4830  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4831 public:
4832  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4833  virtual ~VmaBlockMetadata_Generic();
4834  virtual void Init(VkDeviceSize size);
4835 
4836  virtual bool Validate() const;
4837  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4838  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4839  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4840  virtual bool IsEmpty() const;
4841 
4842  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4843  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4844 
4845 #if VMA_STATS_STRING_ENABLED
4846  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4847 #endif
4848 
4849  virtual bool CreateAllocationRequest(
4850  uint32_t currentFrameIndex,
4851  uint32_t frameInUseCount,
4852  VkDeviceSize bufferImageGranularity,
4853  VkDeviceSize allocSize,
4854  VkDeviceSize allocAlignment,
4855  bool upperAddress,
4856  VmaSuballocationType allocType,
4857  bool canMakeOtherLost,
4858  uint32_t strategy,
4859  VmaAllocationRequest* pAllocationRequest);
4860 
4861  virtual bool MakeRequestedAllocationsLost(
4862  uint32_t currentFrameIndex,
4863  uint32_t frameInUseCount,
4864  VmaAllocationRequest* pAllocationRequest);
4865 
4866  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4867 
4868  virtual VkResult CheckCorruption(const void* pBlockData);
4869 
4870  virtual void Alloc(
4871  const VmaAllocationRequest& request,
4872  VmaSuballocationType type,
4873  VkDeviceSize allocSize,
4874  bool upperAddress,
4875  VmaAllocation hAllocation);
4876 
4877  virtual void Free(const VmaAllocation allocation);
4878  virtual void FreeAtOffset(VkDeviceSize offset);
4879 
4880  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
4881 
4882 private:
4883  uint32_t m_FreeCount;
4884  VkDeviceSize m_SumFreeSize;
4885  VmaSuballocationList m_Suballocations;
4886  // Suballocations that are free and have size greater than certain threshold.
4887  // Sorted by size, ascending.
4888  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4889 
4890  bool ValidateFreeSuballocationList() const;
4891 
4892  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4893  // If yes, fills pOffset and returns true. If no, returns false.
4894  bool CheckAllocation(
4895  uint32_t currentFrameIndex,
4896  uint32_t frameInUseCount,
4897  VkDeviceSize bufferImageGranularity,
4898  VkDeviceSize allocSize,
4899  VkDeviceSize allocAlignment,
4900  VmaSuballocationType allocType,
4901  VmaSuballocationList::const_iterator suballocItem,
4902  bool canMakeOtherLost,
4903  VkDeviceSize* pOffset,
4904  size_t* itemsToMakeLostCount,
4905  VkDeviceSize* pSumFreeSize,
4906  VkDeviceSize* pSumItemSize) const;
4907  // Given free suballocation, it merges it with following one, which must also be free.
4908  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4909  // Releases given suballocation, making it free.
4910  // Merges it with adjacent free suballocations if applicable.
4911  // Returns iterator to new free suballocation at this place.
4912  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4913  // Given free suballocation, it inserts it into sorted list of
4914  // m_FreeSuballocationsBySize if it's suitable.
4915  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4916  // Given free suballocation, it removes it from sorted list of
4917  // m_FreeSuballocationsBySize if it's suitable.
4918  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4919 };
4920 
4921 /*
4922 Allocations and their references in internal data structure look like this:
4923 
4924 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4925 
4926  0 +-------+
4927  | |
4928  | |
4929  | |
4930  +-------+
4931  | Alloc | 1st[m_1stNullItemsBeginCount]
4932  +-------+
4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4934  +-------+
4935  | ... |
4936  +-------+
4937  | Alloc | 1st[1st.size() - 1]
4938  +-------+
4939  | |
4940  | |
4941  | |
4942 GetSize() +-------+
4943 
4944 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4945 
4946  0 +-------+
4947  | Alloc | 2nd[0]
4948  +-------+
4949  | Alloc | 2nd[1]
4950  +-------+
4951  | ... |
4952  +-------+
4953  | Alloc | 2nd[2nd.size() - 1]
4954  +-------+
4955  | |
4956  | |
4957  | |
4958  +-------+
4959  | Alloc | 1st[m_1stNullItemsBeginCount]
4960  +-------+
4961  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4962  +-------+
4963  | ... |
4964  +-------+
4965  | Alloc | 1st[1st.size() - 1]
4966  +-------+
4967  | |
4968 GetSize() +-------+
4969 
4970 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4971 
4972  0 +-------+
4973  | |
4974  | |
4975  | |
4976  +-------+
4977  | Alloc | 1st[m_1stNullItemsBeginCount]
4978  +-------+
4979  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4980  +-------+
4981  | ... |
4982  +-------+
4983  | Alloc | 1st[1st.size() - 1]
4984  +-------+
4985  | |
4986  | |
4987  | |
4988  +-------+
4989  | Alloc | 2nd[2nd.size() - 1]
4990  +-------+
4991  | ... |
4992  +-------+
4993  | Alloc | 2nd[1]
4994  +-------+
4995  | Alloc | 2nd[0]
4996 GetSize() +-------+
4997 
4998 */
4999 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5000 {
5001  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5002 public:
5003  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5004  virtual ~VmaBlockMetadata_Linear();
5005  virtual void Init(VkDeviceSize size);
5006 
5007  virtual bool Validate() const;
5008  virtual size_t GetAllocationCount() const;
5009  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5010  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5011  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5012 
5013  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5014  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5015 
5016 #if VMA_STATS_STRING_ENABLED
5017  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5018 #endif
5019 
5020  virtual bool CreateAllocationRequest(
5021  uint32_t currentFrameIndex,
5022  uint32_t frameInUseCount,
5023  VkDeviceSize bufferImageGranularity,
5024  VkDeviceSize allocSize,
5025  VkDeviceSize allocAlignment,
5026  bool upperAddress,
5027  VmaSuballocationType allocType,
5028  bool canMakeOtherLost,
5029  uint32_t strategy,
5030  VmaAllocationRequest* pAllocationRequest);
5031 
5032  virtual bool MakeRequestedAllocationsLost(
5033  uint32_t currentFrameIndex,
5034  uint32_t frameInUseCount,
5035  VmaAllocationRequest* pAllocationRequest);
5036 
5037  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5038 
5039  virtual VkResult CheckCorruption(const void* pBlockData);
5040 
5041  virtual void Alloc(
5042  const VmaAllocationRequest& request,
5043  VmaSuballocationType type,
5044  VkDeviceSize allocSize,
5045  bool upperAddress,
5046  VmaAllocation hAllocation);
5047 
5048  virtual void Free(const VmaAllocation allocation);
5049  virtual void FreeAtOffset(VkDeviceSize offset);
5050 
5051 private:
5052  /*
5053  There are two suballocation vectors, used in ping-pong way.
5054  The one with index m_1stVectorIndex is called 1st.
5055  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5056  2nd can be non-empty only when 1st is not empty.
5057  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5058  */
5059  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5060 
5061  enum SECOND_VECTOR_MODE
5062  {
5063  SECOND_VECTOR_EMPTY,
5064  /*
5065  Suballocations in 2nd vector are created later than the ones in 1st, but they
5066  all have smaller offset.
5067  */
5068  SECOND_VECTOR_RING_BUFFER,
5069  /*
5070  Suballocations in 2nd vector are upper side of double stack.
5071  They all have offsets higher than those in 1st vector.
5072  Top of this stack means smaller offsets, but higher indices in this vector.
5073  */
5074  SECOND_VECTOR_DOUBLE_STACK,
5075  };
5076 
5077  VkDeviceSize m_SumFreeSize;
5078  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5079  uint32_t m_1stVectorIndex;
5080  SECOND_VECTOR_MODE m_2ndVectorMode;
5081 
5082  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5083  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5084  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5085  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5086 
5087  // Number of items in 1st vector with hAllocation = null at the beginning.
5088  size_t m_1stNullItemsBeginCount;
5089  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5090  size_t m_1stNullItemsMiddleCount;
5091  // Number of items in 2nd vector with hAllocation = null.
5092  size_t m_2ndNullItemsCount;
5093 
5094  bool ShouldCompact1st() const;
5095  void CleanupAfterFree();
5096 };
5097 
5098 /*
5099 - GetSize() is the original size of allocated memory block.
5100 - m_UsableSize is this size aligned down to a power of two.
5101  All allocations and calculations happen relative to m_UsableSize.
5102 - GetUnusableSize() is the difference between them.
5103  It is repoted as separate, unused range, not available for allocations.
5104 
5105 Node at level 0 has size = m_UsableSize.
5106 Each next level contains nodes with size 2 times smaller than current level.
5107 m_LevelCount is the maximum number of levels to use in the current object.
5108 */
5109 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5110 {
5111  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5112 public:
5113  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5114  virtual ~VmaBlockMetadata_Buddy();
5115  virtual void Init(VkDeviceSize size);
5116 
5117  virtual bool Validate() const;
5118  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5119  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5120  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5121  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5122 
5123  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5124  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5125 
5126 #if VMA_STATS_STRING_ENABLED
5127  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5128 #endif
5129 
5130  virtual bool CreateAllocationRequest(
5131  uint32_t currentFrameIndex,
5132  uint32_t frameInUseCount,
5133  VkDeviceSize bufferImageGranularity,
5134  VkDeviceSize allocSize,
5135  VkDeviceSize allocAlignment,
5136  bool upperAddress,
5137  VmaSuballocationType allocType,
5138  bool canMakeOtherLost,
5139  uint32_t strategy,
5140  VmaAllocationRequest* pAllocationRequest);
5141 
5142  virtual bool MakeRequestedAllocationsLost(
5143  uint32_t currentFrameIndex,
5144  uint32_t frameInUseCount,
5145  VmaAllocationRequest* pAllocationRequest);
5146 
5147  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5148 
5149  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5150 
5151  virtual void Alloc(
5152  const VmaAllocationRequest& request,
5153  VmaSuballocationType type,
5154  VkDeviceSize allocSize,
5155  bool upperAddress,
5156  VmaAllocation hAllocation);
5157 
5158  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5159  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5160 
5161 private:
5162  static const VkDeviceSize MIN_NODE_SIZE = 32;
5163  static const size_t MAX_LEVELS = 30;
5164 
5165  struct ValidationContext
5166  {
5167  size_t calculatedAllocationCount;
5168  size_t calculatedFreeCount;
5169  VkDeviceSize calculatedSumFreeSize;
5170 
5171  ValidationContext() :
5172  calculatedAllocationCount(0),
5173  calculatedFreeCount(0),
5174  calculatedSumFreeSize(0) { }
5175  };
5176 
5177  struct Node
5178  {
5179  VkDeviceSize offset;
5180  enum TYPE
5181  {
5182  TYPE_FREE,
5183  TYPE_ALLOCATION,
5184  TYPE_SPLIT,
5185  TYPE_COUNT
5186  } type;
5187  Node* parent;
5188  Node* buddy;
5189 
5190  union
5191  {
5192  struct
5193  {
5194  Node* prev;
5195  Node* next;
5196  } free;
5197  struct
5198  {
5199  VmaAllocation alloc;
5200  } allocation;
5201  struct
5202  {
5203  Node* leftChild;
5204  } split;
5205  };
5206  };
5207 
5208  // Size of the memory block aligned down to a power of two.
5209  VkDeviceSize m_UsableSize;
5210  uint32_t m_LevelCount;
5211 
5212  Node* m_Root;
5213  struct {
5214  Node* front;
5215  Node* back;
5216  } m_FreeList[MAX_LEVELS];
5217  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5218  size_t m_AllocationCount;
5219  // Number of nodes in the tree with type == TYPE_FREE.
5220  size_t m_FreeCount;
5221  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5222  VkDeviceSize m_SumFreeSize;
5223 
5224  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5225  void DeleteNode(Node* node);
5226  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5227  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5228  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5229  // Alloc passed just for validation. Can be null.
5230  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5231  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5232  // Adds node to the front of FreeList at given level.
5233  // node->type must be FREE.
5234  // node->free.prev, next can be undefined.
5235  void AddToFreeListFront(uint32_t level, Node* node);
5236  // Removes node from FreeList at given level.
5237  // node->type must be FREE.
5238  // node->free.prev, next stay untouched.
5239  void RemoveFromFreeList(uint32_t level, Node* node);
5240 
5241 #if VMA_STATS_STRING_ENABLED
5242  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5243 #endif
5244 };
5245 
5246 /*
5247 Represents a single block of device memory (`VkDeviceMemory`) with all the
5248 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5249 
5250 Thread-safety: This class must be externally synchronized.
5251 */
5252 class VmaDeviceMemoryBlock
5253 {
5254  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5255 public:
5256  VmaBlockMetadata* m_pMetadata;
5257 
5258  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5259 
5260  ~VmaDeviceMemoryBlock()
5261  {
5262  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5263  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5264  }
5265 
5266  // Always call after construction.
5267  void Init(
5268  VmaAllocator hAllocator,
5269  uint32_t newMemoryTypeIndex,
5270  VkDeviceMemory newMemory,
5271  VkDeviceSize newSize,
5272  uint32_t id,
5273  uint32_t algorithm);
5274  // Always call before destruction.
5275  void Destroy(VmaAllocator allocator);
5276 
5277  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5278  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5279  uint32_t GetId() const { return m_Id; }
5280  void* GetMappedData() const { return m_pMappedData; }
5281 
5282  // Validates all data structures inside this object. If not valid, returns false.
5283  bool Validate() const;
5284 
5285  VkResult CheckCorruption(VmaAllocator hAllocator);
5286 
5287  // ppData can be null.
5288  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5289  void Unmap(VmaAllocator hAllocator, uint32_t count);
5290 
5291  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5292  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5293 
5294  VkResult BindBufferMemory(
5295  const VmaAllocator hAllocator,
5296  const VmaAllocation hAllocation,
5297  VkBuffer hBuffer);
5298  VkResult BindImageMemory(
5299  const VmaAllocator hAllocator,
5300  const VmaAllocation hAllocation,
5301  VkImage hImage);
5302 
5303 private:
5304  uint32_t m_MemoryTypeIndex;
5305  uint32_t m_Id;
5306  VkDeviceMemory m_hMemory;
5307 
5308  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5309  // Also protects m_MapCount, m_pMappedData.
5310  VMA_MUTEX m_Mutex;
5311  uint32_t m_MapCount;
5312  void* m_pMappedData;
5313 };
5314 
5315 struct VmaPointerLess
5316 {
5317  bool operator()(const void* lhs, const void* rhs) const
5318  {
5319  return lhs < rhs;
5320  }
5321 };
5322 
5323 class VmaDefragmentator;
5324 
5325 /*
5326 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5327 Vulkan memory type.
5328 
5329 Synchronized internally with a mutex.
5330 */
5331 struct VmaBlockVector
5332 {
5333  VMA_CLASS_NO_COPY(VmaBlockVector)
5334 public:
5335  VmaBlockVector(
5336  VmaAllocator hAllocator,
5337  uint32_t memoryTypeIndex,
5338  VkDeviceSize preferredBlockSize,
5339  size_t minBlockCount,
5340  size_t maxBlockCount,
5341  VkDeviceSize bufferImageGranularity,
5342  uint32_t frameInUseCount,
5343  bool isCustomPool,
5344  bool explicitBlockSize,
5345  uint32_t algorithm);
5346  ~VmaBlockVector();
5347 
5348  VkResult CreateMinBlocks();
5349 
5350  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5351  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5352  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5353  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5354  uint32_t GetAlgorithm() const { return m_Algorithm; }
5355 
5356  void GetPoolStats(VmaPoolStats* pStats);
5357 
5358  bool IsEmpty() const { return m_Blocks.empty(); }
5359  bool IsCorruptionDetectionEnabled() const;
5360 
5361  VkResult Allocate(
5362  VmaPool hCurrentPool,
5363  uint32_t currentFrameIndex,
5364  VkDeviceSize size,
5365  VkDeviceSize alignment,
5366  const VmaAllocationCreateInfo& createInfo,
5367  VmaSuballocationType suballocType,
5368  VmaAllocation* pAllocation);
5369 
5370  void Free(
5371  VmaAllocation hAllocation);
5372 
5373  // Adds statistics of this BlockVector to pStats.
5374  void AddStats(VmaStats* pStats);
5375 
5376 #if VMA_STATS_STRING_ENABLED
5377  void PrintDetailedMap(class VmaJsonWriter& json);
5378 #endif
5379 
5380  void MakePoolAllocationsLost(
5381  uint32_t currentFrameIndex,
5382  size_t* pLostAllocationCount);
5383  VkResult CheckCorruption();
5384 
5385  VmaDefragmentator* EnsureDefragmentator(
5386  VmaAllocator hAllocator,
5387  uint32_t currentFrameIndex);
5388 
5389  VkResult Defragment(
5390  VmaDefragmentationStats* pDefragmentationStats,
5391  VkDeviceSize& maxBytesToMove,
5392  uint32_t& maxAllocationsToMove);
5393 
5394  void DestroyDefragmentator();
5395 
5396 private:
5397  friend class VmaDefragmentator;
5398 
5399  const VmaAllocator m_hAllocator;
5400  const uint32_t m_MemoryTypeIndex;
5401  const VkDeviceSize m_PreferredBlockSize;
5402  const size_t m_MinBlockCount;
5403  const size_t m_MaxBlockCount;
5404  const VkDeviceSize m_BufferImageGranularity;
5405  const uint32_t m_FrameInUseCount;
5406  const bool m_IsCustomPool;
5407  const bool m_ExplicitBlockSize;
5408  const uint32_t m_Algorithm;
5409  bool m_HasEmptyBlock;
5410  VMA_MUTEX m_Mutex;
5411  // Incrementally sorted by sumFreeSize, ascending.
5412  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5413  /* There can be at most one allocation that is completely empty - a
5414  hysteresis to avoid pessimistic case of alternating creation and destruction
5415  of a VkDeviceMemory. */
5416  VmaDefragmentator* m_pDefragmentator;
5417  uint32_t m_NextBlockId;
5418 
5419  VkDeviceSize CalcMaxBlockSize() const;
5420 
5421  // Finds and removes given block from vector.
5422  void Remove(VmaDeviceMemoryBlock* pBlock);
5423 
5424  // Performs single step in sorting m_Blocks. They may not be fully sorted
5425  // after this call.
5426  void IncrementallySortBlocks();
5427 
5428  // To be used only without CAN_MAKE_OTHER_LOST flag.
5429  VkResult AllocateFromBlock(
5430  VmaDeviceMemoryBlock* pBlock,
5431  VmaPool hCurrentPool,
5432  uint32_t currentFrameIndex,
5433  VkDeviceSize size,
5434  VkDeviceSize alignment,
5435  VmaAllocationCreateFlags allocFlags,
5436  void* pUserData,
5437  VmaSuballocationType suballocType,
5438  uint32_t strategy,
5439  VmaAllocation* pAllocation);
5440 
5441  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5442 };
5443 
5444 struct VmaPool_T
5445 {
5446  VMA_CLASS_NO_COPY(VmaPool_T)
5447 public:
5448  VmaBlockVector m_BlockVector;
5449 
5450  VmaPool_T(
5451  VmaAllocator hAllocator,
5452  const VmaPoolCreateInfo& createInfo,
5453  VkDeviceSize preferredBlockSize);
5454  ~VmaPool_T();
5455 
5456  uint32_t GetId() const { return m_Id; }
5457  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5458 
5459 #if VMA_STATS_STRING_ENABLED
5460  //void PrintDetailedMap(class VmaStringBuilder& sb);
5461 #endif
5462 
5463 private:
5464  uint32_t m_Id;
5465 };
5466 
5467 class VmaDefragmentator
5468 {
5469  VMA_CLASS_NO_COPY(VmaDefragmentator)
5470 private:
5471  const VmaAllocator m_hAllocator;
5472  VmaBlockVector* const m_pBlockVector;
5473  uint32_t m_CurrentFrameIndex;
5474  VkDeviceSize m_BytesMoved;
5475  uint32_t m_AllocationsMoved;
5476 
5477  struct AllocationInfo
5478  {
5479  VmaAllocation m_hAllocation;
5480  VkBool32* m_pChanged;
5481 
5482  AllocationInfo() :
5483  m_hAllocation(VK_NULL_HANDLE),
5484  m_pChanged(VMA_NULL)
5485  {
5486  }
5487  };
5488 
5489  struct AllocationInfoSizeGreater
5490  {
5491  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5492  {
5493  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5494  }
5495  };
5496 
5497  // Used between AddAllocation and Defragment.
5498  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5499 
5500  struct BlockInfo
5501  {
5502  VmaDeviceMemoryBlock* m_pBlock;
5503  bool m_HasNonMovableAllocations;
5504  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5505 
5506  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5507  m_pBlock(VMA_NULL),
5508  m_HasNonMovableAllocations(true),
5509  m_Allocations(pAllocationCallbacks),
5510  m_pMappedDataForDefragmentation(VMA_NULL)
5511  {
5512  }
5513 
5514  void CalcHasNonMovableAllocations()
5515  {
5516  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5517  const size_t defragmentAllocCount = m_Allocations.size();
5518  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5519  }
5520 
5521  void SortAllocationsBySizeDescecnding()
5522  {
5523  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5524  }
5525 
5526  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5527  void Unmap(VmaAllocator hAllocator);
5528 
5529  private:
5530  // Not null if mapped for defragmentation only, not originally mapped.
5531  void* m_pMappedDataForDefragmentation;
5532  };
5533 
5534  struct BlockPointerLess
5535  {
5536  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5537  {
5538  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5539  }
5540  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5541  {
5542  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5543  }
5544  };
5545 
5546  // 1. Blocks with some non-movable allocations go first.
5547  // 2. Blocks with smaller sumFreeSize go first.
5548  struct BlockInfoCompareMoveDestination
5549  {
5550  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5551  {
5552  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5553  {
5554  return true;
5555  }
5556  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5557  {
5558  return false;
5559  }
5560  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5561  {
5562  return true;
5563  }
5564  return false;
5565  }
5566  };
5567 
5568  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5569  BlockInfoVector m_Blocks;
5570 
5571  VkResult DefragmentRound(
5572  VkDeviceSize maxBytesToMove,
5573  uint32_t maxAllocationsToMove);
5574 
5575  static bool MoveMakesSense(
5576  size_t dstBlockIndex, VkDeviceSize dstOffset,
5577  size_t srcBlockIndex, VkDeviceSize srcOffset);
5578 
5579 public:
5580  VmaDefragmentator(
5581  VmaAllocator hAllocator,
5582  VmaBlockVector* pBlockVector,
5583  uint32_t currentFrameIndex);
5584 
5585  ~VmaDefragmentator();
5586 
5587  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5588  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5589 
5590  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5591 
5592  VkResult Defragment(
5593  VkDeviceSize maxBytesToMove,
5594  uint32_t maxAllocationsToMove);
5595 };
5596 
5597 #if VMA_RECORDING_ENABLED
5598 
5599 class VmaRecorder
5600 {
5601 public:
5602  VmaRecorder();
5603  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5604  void WriteConfiguration(
5605  const VkPhysicalDeviceProperties& devProps,
5606  const VkPhysicalDeviceMemoryProperties& memProps,
5607  bool dedicatedAllocationExtensionEnabled);
5608  ~VmaRecorder();
5609 
5610  void RecordCreateAllocator(uint32_t frameIndex);
5611  void RecordDestroyAllocator(uint32_t frameIndex);
5612  void RecordCreatePool(uint32_t frameIndex,
5613  const VmaPoolCreateInfo& createInfo,
5614  VmaPool pool);
5615  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5616  void RecordAllocateMemory(uint32_t frameIndex,
5617  const VkMemoryRequirements& vkMemReq,
5618  const VmaAllocationCreateInfo& createInfo,
5619  VmaAllocation allocation);
5620  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5621  const VkMemoryRequirements& vkMemReq,
5622  bool requiresDedicatedAllocation,
5623  bool prefersDedicatedAllocation,
5624  const VmaAllocationCreateInfo& createInfo,
5625  VmaAllocation allocation);
5626  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5627  const VkMemoryRequirements& vkMemReq,
5628  bool requiresDedicatedAllocation,
5629  bool prefersDedicatedAllocation,
5630  const VmaAllocationCreateInfo& createInfo,
5631  VmaAllocation allocation);
5632  void RecordFreeMemory(uint32_t frameIndex,
5633  VmaAllocation allocation);
5634  void RecordResizeAllocation(
5635  uint32_t frameIndex,
5636  VmaAllocation allocation,
5637  VkDeviceSize newSize);
5638  void RecordSetAllocationUserData(uint32_t frameIndex,
5639  VmaAllocation allocation,
5640  const void* pUserData);
5641  void RecordCreateLostAllocation(uint32_t frameIndex,
5642  VmaAllocation allocation);
5643  void RecordMapMemory(uint32_t frameIndex,
5644  VmaAllocation allocation);
5645  void RecordUnmapMemory(uint32_t frameIndex,
5646  VmaAllocation allocation);
5647  void RecordFlushAllocation(uint32_t frameIndex,
5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5649  void RecordInvalidateAllocation(uint32_t frameIndex,
5650  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5651  void RecordCreateBuffer(uint32_t frameIndex,
5652  const VkBufferCreateInfo& bufCreateInfo,
5653  const VmaAllocationCreateInfo& allocCreateInfo,
5654  VmaAllocation allocation);
5655  void RecordCreateImage(uint32_t frameIndex,
5656  const VkImageCreateInfo& imageCreateInfo,
5657  const VmaAllocationCreateInfo& allocCreateInfo,
5658  VmaAllocation allocation);
5659  void RecordDestroyBuffer(uint32_t frameIndex,
5660  VmaAllocation allocation);
5661  void RecordDestroyImage(uint32_t frameIndex,
5662  VmaAllocation allocation);
5663  void RecordTouchAllocation(uint32_t frameIndex,
5664  VmaAllocation allocation);
5665  void RecordGetAllocationInfo(uint32_t frameIndex,
5666  VmaAllocation allocation);
5667  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5668  VmaPool pool);
5669 
5670 private:
5671  struct CallParams
5672  {
5673  uint32_t threadId;
5674  double time;
5675  };
5676 
5677  class UserDataString
5678  {
5679  public:
5680  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5681  const char* GetString() const { return m_Str; }
5682 
5683  private:
5684  char m_PtrStr[17];
5685  const char* m_Str;
5686  };
5687 
5688  bool m_UseMutex;
5689  VmaRecordFlags m_Flags;
5690  FILE* m_File;
5691  VMA_MUTEX m_FileMutex;
5692  int64_t m_Freq;
5693  int64_t m_StartCounter;
5694 
5695  void GetBasicParams(CallParams& outParams);
5696  void Flush();
5697 };
5698 
5699 #endif // #if VMA_RECORDING_ENABLED
5700 
5701 // Main allocator object.
5702 struct VmaAllocator_T
5703 {
5704  VMA_CLASS_NO_COPY(VmaAllocator_T)
5705 public:
5706  bool m_UseMutex;
5707  bool m_UseKhrDedicatedAllocation;
5708  VkDevice m_hDevice;
5709  bool m_AllocationCallbacksSpecified;
5710  VkAllocationCallbacks m_AllocationCallbacks;
5711  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5712 
5713  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5714  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5715  VMA_MUTEX m_HeapSizeLimitMutex;
5716 
5717  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5718  VkPhysicalDeviceMemoryProperties m_MemProps;
5719 
5720  // Default pools.
5721  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5722 
5723  // Each vector is sorted by memory (handle value).
5724  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5725  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5726  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5727 
5728  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5729  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5730  ~VmaAllocator_T();
5731 
5732  const VkAllocationCallbacks* GetAllocationCallbacks() const
5733  {
5734  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5735  }
5736  const VmaVulkanFunctions& GetVulkanFunctions() const
5737  {
5738  return m_VulkanFunctions;
5739  }
5740 
5741  VkDeviceSize GetBufferImageGranularity() const
5742  {
5743  return VMA_MAX(
5744  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5745  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5746  }
5747 
5748  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5749  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5750 
5751  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5752  {
5753  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5754  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5755  }
5756  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5757  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5758  {
5759  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5760  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5761  }
5762  // Minimum alignment for all allocations in specific memory type.
5763  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5764  {
5765  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5766  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5767  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5768  }
5769 
5770  bool IsIntegratedGpu() const
5771  {
5772  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5773  }
5774 
5775 #if VMA_RECORDING_ENABLED
5776  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5777 #endif
5778 
5779  void GetBufferMemoryRequirements(
5780  VkBuffer hBuffer,
5781  VkMemoryRequirements& memReq,
5782  bool& requiresDedicatedAllocation,
5783  bool& prefersDedicatedAllocation) const;
5784  void GetImageMemoryRequirements(
5785  VkImage hImage,
5786  VkMemoryRequirements& memReq,
5787  bool& requiresDedicatedAllocation,
5788  bool& prefersDedicatedAllocation) const;
5789 
5790  // Main allocation function.
5791  VkResult AllocateMemory(
5792  const VkMemoryRequirements& vkMemReq,
5793  bool requiresDedicatedAllocation,
5794  bool prefersDedicatedAllocation,
5795  VkBuffer dedicatedBuffer,
5796  VkImage dedicatedImage,
5797  const VmaAllocationCreateInfo& createInfo,
5798  VmaSuballocationType suballocType,
5799  VmaAllocation* pAllocation);
5800 
5801  // Main deallocation function.
5802  void FreeMemory(const VmaAllocation allocation);
5803 
5804  VkResult ResizeAllocation(
5805  const VmaAllocation alloc,
5806  VkDeviceSize newSize);
5807 
5808  void CalculateStats(VmaStats* pStats);
5809 
5810 #if VMA_STATS_STRING_ENABLED
5811  void PrintDetailedMap(class VmaJsonWriter& json);
5812 #endif
5813 
5814  VkResult Defragment(
5815  VmaAllocation* pAllocations,
5816  size_t allocationCount,
5817  VkBool32* pAllocationsChanged,
5818  const VmaDefragmentationInfo* pDefragmentationInfo,
5819  VmaDefragmentationStats* pDefragmentationStats);
5820 
5821  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5822  bool TouchAllocation(VmaAllocation hAllocation);
5823 
5824  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5825  void DestroyPool(VmaPool pool);
5826  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5827 
5828  void SetCurrentFrameIndex(uint32_t frameIndex);
5829  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5830 
5831  void MakePoolAllocationsLost(
5832  VmaPool hPool,
5833  size_t* pLostAllocationCount);
5834  VkResult CheckPoolCorruption(VmaPool hPool);
5835  VkResult CheckCorruption(uint32_t memoryTypeBits);
5836 
5837  void CreateLostAllocation(VmaAllocation* pAllocation);
5838 
5839  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5840  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5841 
5842  VkResult Map(VmaAllocation hAllocation, void** ppData);
5843  void Unmap(VmaAllocation hAllocation);
5844 
5845  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5846  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5847 
5848  void FlushOrInvalidateAllocation(
5849  VmaAllocation hAllocation,
5850  VkDeviceSize offset, VkDeviceSize size,
5851  VMA_CACHE_OPERATION op);
5852 
5853  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5854 
5855 private:
5856  VkDeviceSize m_PreferredLargeHeapBlockSize;
5857 
5858  VkPhysicalDevice m_PhysicalDevice;
5859  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5860 
5861  VMA_MUTEX m_PoolsMutex;
5862  // Protected by m_PoolsMutex. Sorted by pointer value.
5863  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5864  uint32_t m_NextPoolId;
5865 
5866  VmaVulkanFunctions m_VulkanFunctions;
5867 
5868 #if VMA_RECORDING_ENABLED
5869  VmaRecorder* m_pRecorder;
5870 #endif
5871 
5872  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5873 
5874  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5875 
5876  VkResult AllocateMemoryOfType(
5877  VkDeviceSize size,
5878  VkDeviceSize alignment,
5879  bool dedicatedAllocation,
5880  VkBuffer dedicatedBuffer,
5881  VkImage dedicatedImage,
5882  const VmaAllocationCreateInfo& createInfo,
5883  uint32_t memTypeIndex,
5884  VmaSuballocationType suballocType,
5885  VmaAllocation* pAllocation);
5886 
5887  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5888  VkResult AllocateDedicatedMemory(
5889  VkDeviceSize size,
5890  VmaSuballocationType suballocType,
5891  uint32_t memTypeIndex,
5892  bool map,
5893  bool isUserDataString,
5894  void* pUserData,
5895  VkBuffer dedicatedBuffer,
5896  VkImage dedicatedImage,
5897  VmaAllocation* pAllocation);
5898 
5899  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5900  void FreeDedicatedMemory(VmaAllocation allocation);
5901 };
5902 
5904 // Memory allocation #2 after VmaAllocator_T definition
5905 
5906 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5907 {
5908  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5909 }
5910 
5911 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5912 {
5913  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5914 }
5915 
5916 template<typename T>
5917 static T* VmaAllocate(VmaAllocator hAllocator)
5918 {
5919  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5920 }
5921 
5922 template<typename T>
5923 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5924 {
5925  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5926 }
5927 
5928 template<typename T>
5929 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5930 {
5931  if(ptr != VMA_NULL)
5932  {
5933  ptr->~T();
5934  VmaFree(hAllocator, ptr);
5935  }
5936 }
5937 
5938 template<typename T>
5939 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5940 {
5941  if(ptr != VMA_NULL)
5942  {
5943  for(size_t i = count; i--; )
5944  ptr[i].~T();
5945  VmaFree(hAllocator, ptr);
5946  }
5947 }
5948 
5950 // VmaStringBuilder
5951 
5952 #if VMA_STATS_STRING_ENABLED
5953 
5954 class VmaStringBuilder
5955 {
5956 public:
5957  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5958  size_t GetLength() const { return m_Data.size(); }
5959  const char* GetData() const { return m_Data.data(); }
5960 
5961  void Add(char ch) { m_Data.push_back(ch); }
5962  void Add(const char* pStr);
5963  void AddNewLine() { Add('\n'); }
5964  void AddNumber(uint32_t num);
5965  void AddNumber(uint64_t num);
5966  void AddPointer(const void* ptr);
5967 
5968 private:
5969  VmaVector< char, VmaStlAllocator<char> > m_Data;
5970 };
5971 
5972 void VmaStringBuilder::Add(const char* pStr)
5973 {
5974  const size_t strLen = strlen(pStr);
5975  if(strLen > 0)
5976  {
5977  const size_t oldCount = m_Data.size();
5978  m_Data.resize(oldCount + strLen);
5979  memcpy(m_Data.data() + oldCount, pStr, strLen);
5980  }
5981 }
5982 
5983 void VmaStringBuilder::AddNumber(uint32_t num)
5984 {
5985  char buf[11];
5986  VmaUint32ToStr(buf, sizeof(buf), num);
5987  Add(buf);
5988 }
5989 
5990 void VmaStringBuilder::AddNumber(uint64_t num)
5991 {
5992  char buf[21];
5993  VmaUint64ToStr(buf, sizeof(buf), num);
5994  Add(buf);
5995 }
5996 
5997 void VmaStringBuilder::AddPointer(const void* ptr)
5998 {
5999  char buf[21];
6000  VmaPtrToStr(buf, sizeof(buf), ptr);
6001  Add(buf);
6002 }
6003 
6004 #endif // #if VMA_STATS_STRING_ENABLED
6005 
6007 // VmaJsonWriter
6008 
6009 #if VMA_STATS_STRING_ENABLED
6010 
6011 class VmaJsonWriter
6012 {
6013  VMA_CLASS_NO_COPY(VmaJsonWriter)
6014 public:
6015  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6016  ~VmaJsonWriter();
6017 
6018  void BeginObject(bool singleLine = false);
6019  void EndObject();
6020 
6021  void BeginArray(bool singleLine = false);
6022  void EndArray();
6023 
6024  void WriteString(const char* pStr);
6025  void BeginString(const char* pStr = VMA_NULL);
6026  void ContinueString(const char* pStr);
6027  void ContinueString(uint32_t n);
6028  void ContinueString(uint64_t n);
6029  void ContinueString_Pointer(const void* ptr);
6030  void EndString(const char* pStr = VMA_NULL);
6031 
6032  void WriteNumber(uint32_t n);
6033  void WriteNumber(uint64_t n);
6034  void WriteBool(bool b);
6035  void WriteNull();
6036 
6037 private:
6038  static const char* const INDENT;
6039 
6040  enum COLLECTION_TYPE
6041  {
6042  COLLECTION_TYPE_OBJECT,
6043  COLLECTION_TYPE_ARRAY,
6044  };
6045  struct StackItem
6046  {
6047  COLLECTION_TYPE type;
6048  uint32_t valueCount;
6049  bool singleLineMode;
6050  };
6051 
6052  VmaStringBuilder& m_SB;
6053  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6054  bool m_InsideString;
6055 
6056  void BeginValue(bool isString);
6057  void WriteIndent(bool oneLess = false);
6058 };
6059 
6060 const char* const VmaJsonWriter::INDENT = " ";
6061 
6062 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6063  m_SB(sb),
6064  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6065  m_InsideString(false)
6066 {
6067 }
6068 
6069 VmaJsonWriter::~VmaJsonWriter()
6070 {
6071  VMA_ASSERT(!m_InsideString);
6072  VMA_ASSERT(m_Stack.empty());
6073 }
6074 
6075 void VmaJsonWriter::BeginObject(bool singleLine)
6076 {
6077  VMA_ASSERT(!m_InsideString);
6078 
6079  BeginValue(false);
6080  m_SB.Add('{');
6081 
6082  StackItem item;
6083  item.type = COLLECTION_TYPE_OBJECT;
6084  item.valueCount = 0;
6085  item.singleLineMode = singleLine;
6086  m_Stack.push_back(item);
6087 }
6088 
6089 void VmaJsonWriter::EndObject()
6090 {
6091  VMA_ASSERT(!m_InsideString);
6092 
6093  WriteIndent(true);
6094  m_SB.Add('}');
6095 
6096  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6097  m_Stack.pop_back();
6098 }
6099 
6100 void VmaJsonWriter::BeginArray(bool singleLine)
6101 {
6102  VMA_ASSERT(!m_InsideString);
6103 
6104  BeginValue(false);
6105  m_SB.Add('[');
6106 
6107  StackItem item;
6108  item.type = COLLECTION_TYPE_ARRAY;
6109  item.valueCount = 0;
6110  item.singleLineMode = singleLine;
6111  m_Stack.push_back(item);
6112 }
6113 
6114 void VmaJsonWriter::EndArray()
6115 {
6116  VMA_ASSERT(!m_InsideString);
6117 
6118  WriteIndent(true);
6119  m_SB.Add(']');
6120 
6121  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6122  m_Stack.pop_back();
6123 }
6124 
6125 void VmaJsonWriter::WriteString(const char* pStr)
6126 {
6127  BeginString(pStr);
6128  EndString();
6129 }
6130 
6131 void VmaJsonWriter::BeginString(const char* pStr)
6132 {
6133  VMA_ASSERT(!m_InsideString);
6134 
6135  BeginValue(true);
6136  m_SB.Add('"');
6137  m_InsideString = true;
6138  if(pStr != VMA_NULL && pStr[0] != '\0')
6139  {
6140  ContinueString(pStr);
6141  }
6142 }
6143 
6144 void VmaJsonWriter::ContinueString(const char* pStr)
6145 {
6146  VMA_ASSERT(m_InsideString);
6147 
6148  const size_t strLen = strlen(pStr);
6149  for(size_t i = 0; i < strLen; ++i)
6150  {
6151  char ch = pStr[i];
6152  if(ch == '\\')
6153  {
6154  m_SB.Add("\\\\");
6155  }
6156  else if(ch == '"')
6157  {
6158  m_SB.Add("\\\"");
6159  }
6160  else if(ch >= 32)
6161  {
6162  m_SB.Add(ch);
6163  }
6164  else switch(ch)
6165  {
6166  case '\b':
6167  m_SB.Add("\\b");
6168  break;
6169  case '\f':
6170  m_SB.Add("\\f");
6171  break;
6172  case '\n':
6173  m_SB.Add("\\n");
6174  break;
6175  case '\r':
6176  m_SB.Add("\\r");
6177  break;
6178  case '\t':
6179  m_SB.Add("\\t");
6180  break;
6181  default:
6182  VMA_ASSERT(0 && "Character not currently supported.");
6183  break;
6184  }
6185  }
6186 }
6187 
6188 void VmaJsonWriter::ContinueString(uint32_t n)
6189 {
6190  VMA_ASSERT(m_InsideString);
6191  m_SB.AddNumber(n);
6192 }
6193 
6194 void VmaJsonWriter::ContinueString(uint64_t n)
6195 {
6196  VMA_ASSERT(m_InsideString);
6197  m_SB.AddNumber(n);
6198 }
6199 
6200 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6201 {
6202  VMA_ASSERT(m_InsideString);
6203  m_SB.AddPointer(ptr);
6204 }
6205 
6206 void VmaJsonWriter::EndString(const char* pStr)
6207 {
6208  VMA_ASSERT(m_InsideString);
6209  if(pStr != VMA_NULL && pStr[0] != '\0')
6210  {
6211  ContinueString(pStr);
6212  }
6213  m_SB.Add('"');
6214  m_InsideString = false;
6215 }
6216 
6217 void VmaJsonWriter::WriteNumber(uint32_t n)
6218 {
6219  VMA_ASSERT(!m_InsideString);
6220  BeginValue(false);
6221  m_SB.AddNumber(n);
6222 }
6223 
6224 void VmaJsonWriter::WriteNumber(uint64_t n)
6225 {
6226  VMA_ASSERT(!m_InsideString);
6227  BeginValue(false);
6228  m_SB.AddNumber(n);
6229 }
6230 
6231 void VmaJsonWriter::WriteBool(bool b)
6232 {
6233  VMA_ASSERT(!m_InsideString);
6234  BeginValue(false);
6235  m_SB.Add(b ? "true" : "false");
6236 }
6237 
6238 void VmaJsonWriter::WriteNull()
6239 {
6240  VMA_ASSERT(!m_InsideString);
6241  BeginValue(false);
6242  m_SB.Add("null");
6243 }
6244 
6245 void VmaJsonWriter::BeginValue(bool isString)
6246 {
6247  if(!m_Stack.empty())
6248  {
6249  StackItem& currItem = m_Stack.back();
6250  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6251  currItem.valueCount % 2 == 0)
6252  {
6253  VMA_ASSERT(isString);
6254  }
6255 
6256  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6257  currItem.valueCount % 2 != 0)
6258  {
6259  m_SB.Add(": ");
6260  }
6261  else if(currItem.valueCount > 0)
6262  {
6263  m_SB.Add(", ");
6264  WriteIndent();
6265  }
6266  else
6267  {
6268  WriteIndent();
6269  }
6270  ++currItem.valueCount;
6271  }
6272 }
6273 
6274 void VmaJsonWriter::WriteIndent(bool oneLess)
6275 {
6276  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6277  {
6278  m_SB.AddNewLine();
6279 
6280  size_t count = m_Stack.size();
6281  if(count > 0 && oneLess)
6282  {
6283  --count;
6284  }
6285  for(size_t i = 0; i < count; ++i)
6286  {
6287  m_SB.Add(INDENT);
6288  }
6289  }
6290 }
6291 
6292 #endif // #if VMA_STATS_STRING_ENABLED
6293 
6295 
6296 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6297 {
6298  if(IsUserDataString())
6299  {
6300  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6301 
6302  FreeUserDataString(hAllocator);
6303 
6304  if(pUserData != VMA_NULL)
6305  {
6306  const char* const newStrSrc = (char*)pUserData;
6307  const size_t newStrLen = strlen(newStrSrc);
6308  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6309  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6310  m_pUserData = newStrDst;
6311  }
6312  }
6313  else
6314  {
6315  m_pUserData = pUserData;
6316  }
6317 }
6318 
6319 void VmaAllocation_T::ChangeBlockAllocation(
6320  VmaAllocator hAllocator,
6321  VmaDeviceMemoryBlock* block,
6322  VkDeviceSize offset)
6323 {
6324  VMA_ASSERT(block != VMA_NULL);
6325  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6326 
6327  // Move mapping reference counter from old block to new block.
6328  if(block != m_BlockAllocation.m_Block)
6329  {
6330  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6331  if(IsPersistentMap())
6332  ++mapRefCount;
6333  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6334  block->Map(hAllocator, mapRefCount, VMA_NULL);
6335  }
6336 
6337  m_BlockAllocation.m_Block = block;
6338  m_BlockAllocation.m_Offset = offset;
6339 }
6340 
6341 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
6342 {
6343  VMA_ASSERT(newSize > 0);
6344  m_Size = newSize;
6345 }
6346 
6347 VkDeviceSize VmaAllocation_T::GetOffset() const
6348 {
6349  switch(m_Type)
6350  {
6351  case ALLOCATION_TYPE_BLOCK:
6352  return m_BlockAllocation.m_Offset;
6353  case ALLOCATION_TYPE_DEDICATED:
6354  return 0;
6355  default:
6356  VMA_ASSERT(0);
6357  return 0;
6358  }
6359 }
6360 
6361 VkDeviceMemory VmaAllocation_T::GetMemory() const
6362 {
6363  switch(m_Type)
6364  {
6365  case ALLOCATION_TYPE_BLOCK:
6366  return m_BlockAllocation.m_Block->GetDeviceMemory();
6367  case ALLOCATION_TYPE_DEDICATED:
6368  return m_DedicatedAllocation.m_hMemory;
6369  default:
6370  VMA_ASSERT(0);
6371  return VK_NULL_HANDLE;
6372  }
6373 }
6374 
6375 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6376 {
6377  switch(m_Type)
6378  {
6379  case ALLOCATION_TYPE_BLOCK:
6380  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6381  case ALLOCATION_TYPE_DEDICATED:
6382  return m_DedicatedAllocation.m_MemoryTypeIndex;
6383  default:
6384  VMA_ASSERT(0);
6385  return UINT32_MAX;
6386  }
6387 }
6388 
6389 void* VmaAllocation_T::GetMappedData() const
6390 {
6391  switch(m_Type)
6392  {
6393  case ALLOCATION_TYPE_BLOCK:
6394  if(m_MapCount != 0)
6395  {
6396  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6397  VMA_ASSERT(pBlockData != VMA_NULL);
6398  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6399  }
6400  else
6401  {
6402  return VMA_NULL;
6403  }
6404  break;
6405  case ALLOCATION_TYPE_DEDICATED:
6406  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6407  return m_DedicatedAllocation.m_pMappedData;
6408  default:
6409  VMA_ASSERT(0);
6410  return VMA_NULL;
6411  }
6412 }
6413 
6414 bool VmaAllocation_T::CanBecomeLost() const
6415 {
6416  switch(m_Type)
6417  {
6418  case ALLOCATION_TYPE_BLOCK:
6419  return m_BlockAllocation.m_CanBecomeLost;
6420  case ALLOCATION_TYPE_DEDICATED:
6421  return false;
6422  default:
6423  VMA_ASSERT(0);
6424  return false;
6425  }
6426 }
6427 
6428 VmaPool VmaAllocation_T::GetPool() const
6429 {
6430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6431  return m_BlockAllocation.m_hPool;
6432 }
6433 
6434 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6435 {
6436  VMA_ASSERT(CanBecomeLost());
6437 
6438  /*
6439  Warning: This is a carefully designed algorithm.
6440  Do not modify unless you really know what you're doing :)
6441  */
6442  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6443  for(;;)
6444  {
6445  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6446  {
6447  VMA_ASSERT(0);
6448  return false;
6449  }
6450  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6451  {
6452  return false;
6453  }
6454  else // Last use time earlier than current time.
6455  {
6456  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6457  {
6458  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6459  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6460  return true;
6461  }
6462  }
6463  }
6464 }
6465 
6466 #if VMA_STATS_STRING_ENABLED
6467 
6468 // Correspond to values of enum VmaSuballocationType.
6469 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6470  "FREE",
6471  "UNKNOWN",
6472  "BUFFER",
6473  "IMAGE_UNKNOWN",
6474  "IMAGE_LINEAR",
6475  "IMAGE_OPTIMAL",
6476 };
6477 
6478 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6479 {
6480  json.WriteString("Type");
6481  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6482 
6483  json.WriteString("Size");
6484  json.WriteNumber(m_Size);
6485 
6486  if(m_pUserData != VMA_NULL)
6487  {
6488  json.WriteString("UserData");
6489  if(IsUserDataString())
6490  {
6491  json.WriteString((const char*)m_pUserData);
6492  }
6493  else
6494  {
6495  json.BeginString();
6496  json.ContinueString_Pointer(m_pUserData);
6497  json.EndString();
6498  }
6499  }
6500 
6501  json.WriteString("CreationFrameIndex");
6502  json.WriteNumber(m_CreationFrameIndex);
6503 
6504  json.WriteString("LastUseFrameIndex");
6505  json.WriteNumber(GetLastUseFrameIndex());
6506 
6507  if(m_BufferImageUsage != 0)
6508  {
6509  json.WriteString("Usage");
6510  json.WriteNumber(m_BufferImageUsage);
6511  }
6512 }
6513 
6514 #endif
6515 
6516 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6517 {
6518  VMA_ASSERT(IsUserDataString());
6519  if(m_pUserData != VMA_NULL)
6520  {
6521  char* const oldStr = (char*)m_pUserData;
6522  const size_t oldStrLen = strlen(oldStr);
6523  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6524  m_pUserData = VMA_NULL;
6525  }
6526 }
6527 
6528 void VmaAllocation_T::BlockAllocMap()
6529 {
6530  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6531 
6532  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6533  {
6534  ++m_MapCount;
6535  }
6536  else
6537  {
6538  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6539  }
6540 }
6541 
6542 void VmaAllocation_T::BlockAllocUnmap()
6543 {
6544  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6545 
6546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6547  {
6548  --m_MapCount;
6549  }
6550  else
6551  {
6552  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6553  }
6554 }
6555 
6556 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6557 {
6558  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6559 
6560  if(m_MapCount != 0)
6561  {
6562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6563  {
6564  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6565  *ppData = m_DedicatedAllocation.m_pMappedData;
6566  ++m_MapCount;
6567  return VK_SUCCESS;
6568  }
6569  else
6570  {
6571  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6572  return VK_ERROR_MEMORY_MAP_FAILED;
6573  }
6574  }
6575  else
6576  {
6577  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6578  hAllocator->m_hDevice,
6579  m_DedicatedAllocation.m_hMemory,
6580  0, // offset
6581  VK_WHOLE_SIZE,
6582  0, // flags
6583  ppData);
6584  if(result == VK_SUCCESS)
6585  {
6586  m_DedicatedAllocation.m_pMappedData = *ppData;
6587  m_MapCount = 1;
6588  }
6589  return result;
6590  }
6591 }
6592 
6593 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6594 {
6595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6596 
6597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6598  {
6599  --m_MapCount;
6600  if(m_MapCount == 0)
6601  {
6602  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6603  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6604  hAllocator->m_hDevice,
6605  m_DedicatedAllocation.m_hMemory);
6606  }
6607  }
6608  else
6609  {
6610  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6611  }
6612 }
6613 
6614 #if VMA_STATS_STRING_ENABLED
6615 
6616 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6617 {
6618  json.BeginObject();
6619 
6620  json.WriteString("Blocks");
6621  json.WriteNumber(stat.blockCount);
6622 
6623  json.WriteString("Allocations");
6624  json.WriteNumber(stat.allocationCount);
6625 
6626  json.WriteString("UnusedRanges");
6627  json.WriteNumber(stat.unusedRangeCount);
6628 
6629  json.WriteString("UsedBytes");
6630  json.WriteNumber(stat.usedBytes);
6631 
6632  json.WriteString("UnusedBytes");
6633  json.WriteNumber(stat.unusedBytes);
6634 
6635  if(stat.allocationCount > 1)
6636  {
6637  json.WriteString("AllocationSize");
6638  json.BeginObject(true);
6639  json.WriteString("Min");
6640  json.WriteNumber(stat.allocationSizeMin);
6641  json.WriteString("Avg");
6642  json.WriteNumber(stat.allocationSizeAvg);
6643  json.WriteString("Max");
6644  json.WriteNumber(stat.allocationSizeMax);
6645  json.EndObject();
6646  }
6647 
6648  if(stat.unusedRangeCount > 1)
6649  {
6650  json.WriteString("UnusedRangeSize");
6651  json.BeginObject(true);
6652  json.WriteString("Min");
6653  json.WriteNumber(stat.unusedRangeSizeMin);
6654  json.WriteString("Avg");
6655  json.WriteNumber(stat.unusedRangeSizeAvg);
6656  json.WriteString("Max");
6657  json.WriteNumber(stat.unusedRangeSizeMax);
6658  json.EndObject();
6659  }
6660 
6661  json.EndObject();
6662 }
6663 
6664 #endif // #if VMA_STATS_STRING_ENABLED
6665 
6666 struct VmaSuballocationItemSizeLess
6667 {
6668  bool operator()(
6669  const VmaSuballocationList::iterator lhs,
6670  const VmaSuballocationList::iterator rhs) const
6671  {
6672  return lhs->size < rhs->size;
6673  }
6674  bool operator()(
6675  const VmaSuballocationList::iterator lhs,
6676  VkDeviceSize rhsSize) const
6677  {
6678  return lhs->size < rhsSize;
6679  }
6680 };
6681 
6682 
6684 // class VmaBlockMetadata
6685 
6686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6687  m_Size(0),
6688  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6689 {
6690 }
6691 
6692 #if VMA_STATS_STRING_ENABLED
6693 
6694 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6695  VkDeviceSize unusedBytes,
6696  size_t allocationCount,
6697  size_t unusedRangeCount) const
6698 {
6699  json.BeginObject();
6700 
6701  json.WriteString("TotalBytes");
6702  json.WriteNumber(GetSize());
6703 
6704  json.WriteString("UnusedBytes");
6705  json.WriteNumber(unusedBytes);
6706 
6707  json.WriteString("Allocations");
6708  json.WriteNumber((uint64_t)allocationCount);
6709 
6710  json.WriteString("UnusedRanges");
6711  json.WriteNumber((uint64_t)unusedRangeCount);
6712 
6713  json.WriteString("Suballocations");
6714  json.BeginArray();
6715 }
6716 
6717 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6718  VkDeviceSize offset,
6719  VmaAllocation hAllocation) const
6720 {
6721  json.BeginObject(true);
6722 
6723  json.WriteString("Offset");
6724  json.WriteNumber(offset);
6725 
6726  hAllocation->PrintParameters(json);
6727 
6728  json.EndObject();
6729 }
6730 
6731 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6732  VkDeviceSize offset,
6733  VkDeviceSize size) const
6734 {
6735  json.BeginObject(true);
6736 
6737  json.WriteString("Offset");
6738  json.WriteNumber(offset);
6739 
6740  json.WriteString("Type");
6741  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6742 
6743  json.WriteString("Size");
6744  json.WriteNumber(size);
6745 
6746  json.EndObject();
6747 }
6748 
6749 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6750 {
6751  json.EndArray();
6752  json.EndObject();
6753 }
6754 
6755 #endif // #if VMA_STATS_STRING_ENABLED
6756 
6758 // class VmaBlockMetadata_Generic
6759 
6760 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6761  VmaBlockMetadata(hAllocator),
6762  m_FreeCount(0),
6763  m_SumFreeSize(0),
6764  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6765  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6766 {
6767 }
6768 
6769 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6770 {
6771 }
6772 
6773 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6774 {
6775  VmaBlockMetadata::Init(size);
6776 
6777  m_FreeCount = 1;
6778  m_SumFreeSize = size;
6779 
6780  VmaSuballocation suballoc = {};
6781  suballoc.offset = 0;
6782  suballoc.size = size;
6783  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6784  suballoc.hAllocation = VK_NULL_HANDLE;
6785 
6786  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6787  m_Suballocations.push_back(suballoc);
6788  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6789  --suballocItem;
6790  m_FreeSuballocationsBySize.push_back(suballocItem);
6791 }
6792 
6793 bool VmaBlockMetadata_Generic::Validate() const
6794 {
6795  VMA_VALIDATE(!m_Suballocations.empty());
6796 
6797  // Expected offset of new suballocation as calculated from previous ones.
6798  VkDeviceSize calculatedOffset = 0;
6799  // Expected number of free suballocations as calculated from traversing their list.
6800  uint32_t calculatedFreeCount = 0;
6801  // Expected sum size of free suballocations as calculated from traversing their list.
6802  VkDeviceSize calculatedSumFreeSize = 0;
6803  // Expected number of free suballocations that should be registered in
6804  // m_FreeSuballocationsBySize calculated from traversing their list.
6805  size_t freeSuballocationsToRegister = 0;
6806  // True if previous visited suballocation was free.
6807  bool prevFree = false;
6808 
6809  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6810  suballocItem != m_Suballocations.cend();
6811  ++suballocItem)
6812  {
6813  const VmaSuballocation& subAlloc = *suballocItem;
6814 
6815  // Actual offset of this suballocation doesn't match expected one.
6816  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6817 
6818  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6819  // Two adjacent free suballocations are invalid. They should be merged.
6820  VMA_VALIDATE(!prevFree || !currFree);
6821 
6822  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6823 
6824  if(currFree)
6825  {
6826  calculatedSumFreeSize += subAlloc.size;
6827  ++calculatedFreeCount;
6828  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6829  {
6830  ++freeSuballocationsToRegister;
6831  }
6832 
6833  // Margin required between allocations - every free space must be at least that large.
6834  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6835  }
6836  else
6837  {
6838  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6839  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6840 
6841  // Margin required between allocations - previous allocation must be free.
6842  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6843  }
6844 
6845  calculatedOffset += subAlloc.size;
6846  prevFree = currFree;
6847  }
6848 
6849  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6850  // match expected one.
6851  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6852 
6853  VkDeviceSize lastSize = 0;
6854  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6855  {
6856  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6857 
6858  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6859  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6860  // They must be sorted by size ascending.
6861  VMA_VALIDATE(suballocItem->size >= lastSize);
6862 
6863  lastSize = suballocItem->size;
6864  }
6865 
6866  // Check if totals match calculacted values.
6867  VMA_VALIDATE(ValidateFreeSuballocationList());
6868  VMA_VALIDATE(calculatedOffset == GetSize());
6869  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6870  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6871 
6872  return true;
6873 }
6874 
6875 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6876 {
6877  if(!m_FreeSuballocationsBySize.empty())
6878  {
6879  return m_FreeSuballocationsBySize.back()->size;
6880  }
6881  else
6882  {
6883  return 0;
6884  }
6885 }
6886 
6887 bool VmaBlockMetadata_Generic::IsEmpty() const
6888 {
6889  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6890 }
6891 
6892 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6893 {
6894  outInfo.blockCount = 1;
6895 
6896  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6897  outInfo.allocationCount = rangeCount - m_FreeCount;
6898  outInfo.unusedRangeCount = m_FreeCount;
6899 
6900  outInfo.unusedBytes = m_SumFreeSize;
6901  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6902 
6903  outInfo.allocationSizeMin = UINT64_MAX;
6904  outInfo.allocationSizeMax = 0;
6905  outInfo.unusedRangeSizeMin = UINT64_MAX;
6906  outInfo.unusedRangeSizeMax = 0;
6907 
6908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6909  suballocItem != m_Suballocations.cend();
6910  ++suballocItem)
6911  {
6912  const VmaSuballocation& suballoc = *suballocItem;
6913  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6914  {
6915  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6916  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6917  }
6918  else
6919  {
6920  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6921  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6922  }
6923  }
6924 }
6925 
6926 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6927 {
6928  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6929 
6930  inoutStats.size += GetSize();
6931  inoutStats.unusedSize += m_SumFreeSize;
6932  inoutStats.allocationCount += rangeCount - m_FreeCount;
6933  inoutStats.unusedRangeCount += m_FreeCount;
6934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6935 }
6936 
6937 #if VMA_STATS_STRING_ENABLED
6938 
6939 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6940 {
6941  PrintDetailedMap_Begin(json,
6942  m_SumFreeSize, // unusedBytes
6943  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6944  m_FreeCount); // unusedRangeCount
6945 
6946  size_t i = 0;
6947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6948  suballocItem != m_Suballocations.cend();
6949  ++suballocItem, ++i)
6950  {
6951  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6952  {
6953  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6954  }
6955  else
6956  {
6957  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6958  }
6959  }
6960 
6961  PrintDetailedMap_End(json);
6962 }
6963 
6964 #endif // #if VMA_STATS_STRING_ENABLED
6965 
6966 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6967  uint32_t currentFrameIndex,
6968  uint32_t frameInUseCount,
6969  VkDeviceSize bufferImageGranularity,
6970  VkDeviceSize allocSize,
6971  VkDeviceSize allocAlignment,
6972  bool upperAddress,
6973  VmaSuballocationType allocType,
6974  bool canMakeOtherLost,
6975  uint32_t strategy,
6976  VmaAllocationRequest* pAllocationRequest)
6977 {
6978  VMA_ASSERT(allocSize > 0);
6979  VMA_ASSERT(!upperAddress);
6980  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6981  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6982  VMA_HEAVY_ASSERT(Validate());
6983 
6984  // There is not enough total free space in this block to fullfill the request: Early return.
6985  if(canMakeOtherLost == false &&
6986  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6987  {
6988  return false;
6989  }
6990 
6991  // New algorithm, efficiently searching freeSuballocationsBySize.
6992  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6993  if(freeSuballocCount > 0)
6994  {
6996  {
6997  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6998  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6999  m_FreeSuballocationsBySize.data(),
7000  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7001  allocSize + 2 * VMA_DEBUG_MARGIN,
7002  VmaSuballocationItemSizeLess());
7003  size_t index = it - m_FreeSuballocationsBySize.data();
7004  for(; index < freeSuballocCount; ++index)
7005  {
7006  if(CheckAllocation(
7007  currentFrameIndex,
7008  frameInUseCount,
7009  bufferImageGranularity,
7010  allocSize,
7011  allocAlignment,
7012  allocType,
7013  m_FreeSuballocationsBySize[index],
7014  false, // canMakeOtherLost
7015  &pAllocationRequest->offset,
7016  &pAllocationRequest->itemsToMakeLostCount,
7017  &pAllocationRequest->sumFreeSize,
7018  &pAllocationRequest->sumItemSize))
7019  {
7020  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7021  return true;
7022  }
7023  }
7024  }
7025  else // WORST_FIT, FIRST_FIT
7026  {
7027  // Search staring from biggest suballocations.
7028  for(size_t index = freeSuballocCount; index--; )
7029  {
7030  if(CheckAllocation(
7031  currentFrameIndex,
7032  frameInUseCount,
7033  bufferImageGranularity,
7034  allocSize,
7035  allocAlignment,
7036  allocType,
7037  m_FreeSuballocationsBySize[index],
7038  false, // canMakeOtherLost
7039  &pAllocationRequest->offset,
7040  &pAllocationRequest->itemsToMakeLostCount,
7041  &pAllocationRequest->sumFreeSize,
7042  &pAllocationRequest->sumItemSize))
7043  {
7044  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7045  return true;
7046  }
7047  }
7048  }
7049  }
7050 
7051  if(canMakeOtherLost)
7052  {
7053  // Brute-force algorithm. TODO: Come up with something better.
7054 
7055  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7056  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7057 
7058  VmaAllocationRequest tmpAllocRequest = {};
7059  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7060  suballocIt != m_Suballocations.end();
7061  ++suballocIt)
7062  {
7063  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7064  suballocIt->hAllocation->CanBecomeLost())
7065  {
7066  if(CheckAllocation(
7067  currentFrameIndex,
7068  frameInUseCount,
7069  bufferImageGranularity,
7070  allocSize,
7071  allocAlignment,
7072  allocType,
7073  suballocIt,
7074  canMakeOtherLost,
7075  &tmpAllocRequest.offset,
7076  &tmpAllocRequest.itemsToMakeLostCount,
7077  &tmpAllocRequest.sumFreeSize,
7078  &tmpAllocRequest.sumItemSize))
7079  {
7080  tmpAllocRequest.item = suballocIt;
7081 
7082  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7084  {
7085  *pAllocationRequest = tmpAllocRequest;
7086  }
7087  }
7088  }
7089  }
7090 
7091  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7092  {
7093  return true;
7094  }
7095  }
7096 
7097  return false;
7098 }
7099 
7100 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7101  uint32_t currentFrameIndex,
7102  uint32_t frameInUseCount,
7103  VmaAllocationRequest* pAllocationRequest)
7104 {
7105  while(pAllocationRequest->itemsToMakeLostCount > 0)
7106  {
7107  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7108  {
7109  ++pAllocationRequest->item;
7110  }
7111  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7112  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7113  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7114  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7115  {
7116  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7117  --pAllocationRequest->itemsToMakeLostCount;
7118  }
7119  else
7120  {
7121  return false;
7122  }
7123  }
7124 
7125  VMA_HEAVY_ASSERT(Validate());
7126  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7127  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7128 
7129  return true;
7130 }
7131 
7132 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7133 {
7134  uint32_t lostAllocationCount = 0;
7135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7136  it != m_Suballocations.end();
7137  ++it)
7138  {
7139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7140  it->hAllocation->CanBecomeLost() &&
7141  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7142  {
7143  it = FreeSuballocation(it);
7144  ++lostAllocationCount;
7145  }
7146  }
7147  return lostAllocationCount;
7148 }
7149 
7150 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7151 {
7152  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7153  it != m_Suballocations.end();
7154  ++it)
7155  {
7156  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7157  {
7158  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7159  {
7160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7161  return VK_ERROR_VALIDATION_FAILED_EXT;
7162  }
7163  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7164  {
7165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7166  return VK_ERROR_VALIDATION_FAILED_EXT;
7167  }
7168  }
7169  }
7170 
7171  return VK_SUCCESS;
7172 }
7173 
7174 void VmaBlockMetadata_Generic::Alloc(
7175  const VmaAllocationRequest& request,
7176  VmaSuballocationType type,
7177  VkDeviceSize allocSize,
7178  bool upperAddress,
7179  VmaAllocation hAllocation)
7180 {
7181  VMA_ASSERT(!upperAddress);
7182  VMA_ASSERT(request.item != m_Suballocations.end());
7183  VmaSuballocation& suballoc = *request.item;
7184  // Given suballocation is a free block.
7185  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7186  // Given offset is inside this suballocation.
7187  VMA_ASSERT(request.offset >= suballoc.offset);
7188  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7189  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7190  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7191 
7192  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7193  // it to become used.
7194  UnregisterFreeSuballocation(request.item);
7195 
7196  suballoc.offset = request.offset;
7197  suballoc.size = allocSize;
7198  suballoc.type = type;
7199  suballoc.hAllocation = hAllocation;
7200 
7201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7202  if(paddingEnd)
7203  {
7204  VmaSuballocation paddingSuballoc = {};
7205  paddingSuballoc.offset = request.offset + allocSize;
7206  paddingSuballoc.size = paddingEnd;
7207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7208  VmaSuballocationList::iterator next = request.item;
7209  ++next;
7210  const VmaSuballocationList::iterator paddingEndItem =
7211  m_Suballocations.insert(next, paddingSuballoc);
7212  RegisterFreeSuballocation(paddingEndItem);
7213  }
7214 
7215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7216  if(paddingBegin)
7217  {
7218  VmaSuballocation paddingSuballoc = {};
7219  paddingSuballoc.offset = request.offset - paddingBegin;
7220  paddingSuballoc.size = paddingBegin;
7221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7222  const VmaSuballocationList::iterator paddingBeginItem =
7223  m_Suballocations.insert(request.item, paddingSuballoc);
7224  RegisterFreeSuballocation(paddingBeginItem);
7225  }
7226 
7227  // Update totals.
7228  m_FreeCount = m_FreeCount - 1;
7229  if(paddingBegin > 0)
7230  {
7231  ++m_FreeCount;
7232  }
7233  if(paddingEnd > 0)
7234  {
7235  ++m_FreeCount;
7236  }
7237  m_SumFreeSize -= allocSize;
7238 }
7239 
7240 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7241 {
7242  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7243  suballocItem != m_Suballocations.end();
7244  ++suballocItem)
7245  {
7246  VmaSuballocation& suballoc = *suballocItem;
7247  if(suballoc.hAllocation == allocation)
7248  {
7249  FreeSuballocation(suballocItem);
7250  VMA_HEAVY_ASSERT(Validate());
7251  return;
7252  }
7253  }
7254  VMA_ASSERT(0 && "Not found!");
7255 }
7256 
7257 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7258 {
7259  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7260  suballocItem != m_Suballocations.end();
7261  ++suballocItem)
7262  {
7263  VmaSuballocation& suballoc = *suballocItem;
7264  if(suballoc.offset == offset)
7265  {
7266  FreeSuballocation(suballocItem);
7267  return;
7268  }
7269  }
7270  VMA_ASSERT(0 && "Not found!");
7271 }
7272 
7273 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7274 {
7275  typedef VmaSuballocationList::iterator iter_type;
7276  for(iter_type suballocItem = m_Suballocations.begin();
7277  suballocItem != m_Suballocations.end();
7278  ++suballocItem)
7279  {
7280  VmaSuballocation& suballoc = *suballocItem;
7281  if(suballoc.hAllocation == alloc)
7282  {
7283  iter_type nextItem = suballocItem;
7284  ++nextItem;
7285 
7286  // Should have been ensured on higher level.
7287  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
7288 
7289  // Shrinking.
7290  if(newSize < alloc->GetSize())
7291  {
7292  const VkDeviceSize sizeDiff = suballoc.size - newSize;
7293 
7294  // There is next item.
7295  if(nextItem != m_Suballocations.end())
7296  {
7297  // Next item is free.
7298  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7299  {
7300  // Grow this next item backward.
7301  UnregisterFreeSuballocation(nextItem);
7302  nextItem->offset -= sizeDiff;
7303  nextItem->size += sizeDiff;
7304  RegisterFreeSuballocation(nextItem);
7305  }
7306  // Next item is not free.
7307  else
7308  {
7309  // Create free item after current one.
7310  VmaSuballocation newFreeSuballoc;
7311  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7312  newFreeSuballoc.offset = suballoc.offset + newSize;
7313  newFreeSuballoc.size = sizeDiff;
7314  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7315  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
7316  RegisterFreeSuballocation(newFreeSuballocIt);
7317 
7318  ++m_FreeCount;
7319  }
7320  }
7321  // This is the last item.
7322  else
7323  {
7324  // Create free item at the end.
7325  VmaSuballocation newFreeSuballoc;
7326  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7327  newFreeSuballoc.offset = suballoc.offset + newSize;
7328  newFreeSuballoc.size = sizeDiff;
7329  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7330  m_Suballocations.push_back(newFreeSuballoc);
7331 
7332  iter_type newFreeSuballocIt = m_Suballocations.end();
7333  RegisterFreeSuballocation(--newFreeSuballocIt);
7334 
7335  ++m_FreeCount;
7336  }
7337 
7338  suballoc.size = newSize;
7339  m_SumFreeSize += sizeDiff;
7340  }
7341  // Growing.
7342  else
7343  {
7344  const VkDeviceSize sizeDiff = newSize - suballoc.size;
7345 
7346  // There is next item.
7347  if(nextItem != m_Suballocations.end())
7348  {
7349  // Next item is free.
7350  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7351  {
7352  // There is not enough free space, including margin.
7353  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
7354  {
7355  return false;
7356  }
7357 
7358  // There is more free space than required.
7359  if(nextItem->size > sizeDiff)
7360  {
7361  // Move and shrink this next item.
7362  UnregisterFreeSuballocation(nextItem);
7363  nextItem->offset += sizeDiff;
7364  nextItem->size -= sizeDiff;
7365  RegisterFreeSuballocation(nextItem);
7366  }
7367  // There is exactly the amount of free space required.
7368  else
7369  {
7370  // Remove this next free item.
7371  UnregisterFreeSuballocation(nextItem);
7372  m_Suballocations.erase(nextItem);
7373  --m_FreeCount;
7374  }
7375  }
7376  // Next item is not free - there is no space to grow.
7377  else
7378  {
7379  return false;
7380  }
7381  }
7382  // This is the last item - there is no space to grow.
7383  else
7384  {
7385  return false;
7386  }
7387 
7388  suballoc.size = newSize;
7389  m_SumFreeSize -= sizeDiff;
7390  }
7391 
7392  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
7393  return true;
7394  }
7395  }
7396  VMA_ASSERT(0 && "Not found!");
7397  return false;
7398 }
7399 
7400 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7401 {
7402  VkDeviceSize lastSize = 0;
7403  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7404  {
7405  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7406 
7407  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7408  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7409  VMA_VALIDATE(it->size >= lastSize);
7410  lastSize = it->size;
7411  }
7412  return true;
7413 }
7414 
7415 bool VmaBlockMetadata_Generic::CheckAllocation(
7416  uint32_t currentFrameIndex,
7417  uint32_t frameInUseCount,
7418  VkDeviceSize bufferImageGranularity,
7419  VkDeviceSize allocSize,
7420  VkDeviceSize allocAlignment,
7421  VmaSuballocationType allocType,
7422  VmaSuballocationList::const_iterator suballocItem,
7423  bool canMakeOtherLost,
7424  VkDeviceSize* pOffset,
7425  size_t* itemsToMakeLostCount,
7426  VkDeviceSize* pSumFreeSize,
7427  VkDeviceSize* pSumItemSize) const
7428 {
7429  VMA_ASSERT(allocSize > 0);
7430  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7431  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7432  VMA_ASSERT(pOffset != VMA_NULL);
7433 
7434  *itemsToMakeLostCount = 0;
7435  *pSumFreeSize = 0;
7436  *pSumItemSize = 0;
7437 
7438  if(canMakeOtherLost)
7439  {
7440  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7441  {
7442  *pSumFreeSize = suballocItem->size;
7443  }
7444  else
7445  {
7446  if(suballocItem->hAllocation->CanBecomeLost() &&
7447  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7448  {
7449  ++*itemsToMakeLostCount;
7450  *pSumItemSize = suballocItem->size;
7451  }
7452  else
7453  {
7454  return false;
7455  }
7456  }
7457 
7458  // Remaining size is too small for this request: Early return.
7459  if(GetSize() - suballocItem->offset < allocSize)
7460  {
7461  return false;
7462  }
7463 
7464  // Start from offset equal to beginning of this suballocation.
7465  *pOffset = suballocItem->offset;
7466 
7467  // Apply VMA_DEBUG_MARGIN at the beginning.
7468  if(VMA_DEBUG_MARGIN > 0)
7469  {
7470  *pOffset += VMA_DEBUG_MARGIN;
7471  }
7472 
7473  // Apply alignment.
7474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7475 
7476  // Check previous suballocations for BufferImageGranularity conflicts.
7477  // Make bigger alignment if necessary.
7478  if(bufferImageGranularity > 1)
7479  {
7480  bool bufferImageGranularityConflict = false;
7481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7482  while(prevSuballocItem != m_Suballocations.cbegin())
7483  {
7484  --prevSuballocItem;
7485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7487  {
7488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7489  {
7490  bufferImageGranularityConflict = true;
7491  break;
7492  }
7493  }
7494  else
7495  // Already on previous page.
7496  break;
7497  }
7498  if(bufferImageGranularityConflict)
7499  {
7500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7501  }
7502  }
7503 
7504  // Now that we have final *pOffset, check if we are past suballocItem.
7505  // If yes, return false - this function should be called for another suballocItem as starting point.
7506  if(*pOffset >= suballocItem->offset + suballocItem->size)
7507  {
7508  return false;
7509  }
7510 
7511  // Calculate padding at the beginning based on current offset.
7512  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7513 
7514  // Calculate required margin at the end.
7515  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7516 
7517  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7518  // Another early return check.
7519  if(suballocItem->offset + totalSize > GetSize())
7520  {
7521  return false;
7522  }
7523 
7524  // Advance lastSuballocItem until desired size is reached.
7525  // Update itemsToMakeLostCount.
7526  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7527  if(totalSize > suballocItem->size)
7528  {
7529  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7530  while(remainingSize > 0)
7531  {
7532  ++lastSuballocItem;
7533  if(lastSuballocItem == m_Suballocations.cend())
7534  {
7535  return false;
7536  }
7537  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7538  {
7539  *pSumFreeSize += lastSuballocItem->size;
7540  }
7541  else
7542  {
7543  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7544  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7545  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7546  {
7547  ++*itemsToMakeLostCount;
7548  *pSumItemSize += lastSuballocItem->size;
7549  }
7550  else
7551  {
7552  return false;
7553  }
7554  }
7555  remainingSize = (lastSuballocItem->size < remainingSize) ?
7556  remainingSize - lastSuballocItem->size : 0;
7557  }
7558  }
7559 
7560  // Check next suballocations for BufferImageGranularity conflicts.
7561  // If conflict exists, we must mark more allocations lost or fail.
7562  if(bufferImageGranularity > 1)
7563  {
7564  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7565  ++nextSuballocItem;
7566  while(nextSuballocItem != m_Suballocations.cend())
7567  {
7568  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7569  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7570  {
7571  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7572  {
7573  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7574  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7575  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7576  {
7577  ++*itemsToMakeLostCount;
7578  }
7579  else
7580  {
7581  return false;
7582  }
7583  }
7584  }
7585  else
7586  {
7587  // Already on next page.
7588  break;
7589  }
7590  ++nextSuballocItem;
7591  }
7592  }
7593  }
7594  else
7595  {
7596  const VmaSuballocation& suballoc = *suballocItem;
7597  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7598 
7599  *pSumFreeSize = suballoc.size;
7600 
7601  // Size of this suballocation is too small for this request: Early return.
7602  if(suballoc.size < allocSize)
7603  {
7604  return false;
7605  }
7606 
7607  // Start from offset equal to beginning of this suballocation.
7608  *pOffset = suballoc.offset;
7609 
7610  // Apply VMA_DEBUG_MARGIN at the beginning.
7611  if(VMA_DEBUG_MARGIN > 0)
7612  {
7613  *pOffset += VMA_DEBUG_MARGIN;
7614  }
7615 
7616  // Apply alignment.
7617  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7618 
7619  // Check previous suballocations for BufferImageGranularity conflicts.
7620  // Make bigger alignment if necessary.
7621  if(bufferImageGranularity > 1)
7622  {
7623  bool bufferImageGranularityConflict = false;
7624  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7625  while(prevSuballocItem != m_Suballocations.cbegin())
7626  {
7627  --prevSuballocItem;
7628  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7629  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7630  {
7631  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7632  {
7633  bufferImageGranularityConflict = true;
7634  break;
7635  }
7636  }
7637  else
7638  // Already on previous page.
7639  break;
7640  }
7641  if(bufferImageGranularityConflict)
7642  {
7643  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7644  }
7645  }
7646 
7647  // Calculate padding at the beginning based on current offset.
7648  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7649 
7650  // Calculate required margin at the end.
7651  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7652 
7653  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7654  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7655  {
7656  return false;
7657  }
7658 
7659  // Check next suballocations for BufferImageGranularity conflicts.
7660  // If conflict exists, allocation cannot be made here.
7661  if(bufferImageGranularity > 1)
7662  {
7663  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7664  ++nextSuballocItem;
7665  while(nextSuballocItem != m_Suballocations.cend())
7666  {
7667  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7668  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7669  {
7670  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7671  {
7672  return false;
7673  }
7674  }
7675  else
7676  {
7677  // Already on next page.
7678  break;
7679  }
7680  ++nextSuballocItem;
7681  }
7682  }
7683  }
7684 
7685  // All tests passed: Success. pOffset is already filled.
7686  return true;
7687 }
7688 
7689 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7690 {
7691  VMA_ASSERT(item != m_Suballocations.end());
7692  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7693 
7694  VmaSuballocationList::iterator nextItem = item;
7695  ++nextItem;
7696  VMA_ASSERT(nextItem != m_Suballocations.end());
7697  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7698 
7699  item->size += nextItem->size;
7700  --m_FreeCount;
7701  m_Suballocations.erase(nextItem);
7702 }
7703 
7704 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7705 {
7706  // Change this suballocation to be marked as free.
7707  VmaSuballocation& suballoc = *suballocItem;
7708  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7709  suballoc.hAllocation = VK_NULL_HANDLE;
7710 
7711  // Update totals.
7712  ++m_FreeCount;
7713  m_SumFreeSize += suballoc.size;
7714 
7715  // Merge with previous and/or next suballocation if it's also free.
7716  bool mergeWithNext = false;
7717  bool mergeWithPrev = false;
7718 
7719  VmaSuballocationList::iterator nextItem = suballocItem;
7720  ++nextItem;
7721  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7722  {
7723  mergeWithNext = true;
7724  }
7725 
7726  VmaSuballocationList::iterator prevItem = suballocItem;
7727  if(suballocItem != m_Suballocations.begin())
7728  {
7729  --prevItem;
7730  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7731  {
7732  mergeWithPrev = true;
7733  }
7734  }
7735 
7736  if(mergeWithNext)
7737  {
7738  UnregisterFreeSuballocation(nextItem);
7739  MergeFreeWithNext(suballocItem);
7740  }
7741 
7742  if(mergeWithPrev)
7743  {
7744  UnregisterFreeSuballocation(prevItem);
7745  MergeFreeWithNext(prevItem);
7746  RegisterFreeSuballocation(prevItem);
7747  return prevItem;
7748  }
7749  else
7750  {
7751  RegisterFreeSuballocation(suballocItem);
7752  return suballocItem;
7753  }
7754 }
7755 
7756 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7757 {
7758  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7759  VMA_ASSERT(item->size > 0);
7760 
7761  // You may want to enable this validation at the beginning or at the end of
7762  // this function, depending on what do you want to check.
7763  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7764 
7765  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7766  {
7767  if(m_FreeSuballocationsBySize.empty())
7768  {
7769  m_FreeSuballocationsBySize.push_back(item);
7770  }
7771  else
7772  {
7773  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7774  }
7775  }
7776 
7777  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7778 }
7779 
7780 
7781 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7782 {
7783  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7784  VMA_ASSERT(item->size > 0);
7785 
7786  // You may want to enable this validation at the beginning or at the end of
7787  // this function, depending on what do you want to check.
7788  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7789 
7790  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7791  {
7792  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7793  m_FreeSuballocationsBySize.data(),
7794  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7795  item,
7796  VmaSuballocationItemSizeLess());
7797  for(size_t index = it - m_FreeSuballocationsBySize.data();
7798  index < m_FreeSuballocationsBySize.size();
7799  ++index)
7800  {
7801  if(m_FreeSuballocationsBySize[index] == item)
7802  {
7803  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7804  return;
7805  }
7806  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7807  }
7808  VMA_ASSERT(0 && "Not found.");
7809  }
7810 
7811  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7812 }
7813 
7815 // class VmaBlockMetadata_Linear
7816 
7817 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7818  VmaBlockMetadata(hAllocator),
7819  m_SumFreeSize(0),
7820  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7821  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7822  m_1stVectorIndex(0),
7823  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7824  m_1stNullItemsBeginCount(0),
7825  m_1stNullItemsMiddleCount(0),
7826  m_2ndNullItemsCount(0)
7827 {
7828 }
7829 
7830 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7831 {
7832 }
7833 
7834 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7835 {
7836  VmaBlockMetadata::Init(size);
7837  m_SumFreeSize = size;
7838 }
7839 
7840 bool VmaBlockMetadata_Linear::Validate() const
7841 {
7842  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7843  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7844 
7845  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7846  VMA_VALIDATE(!suballocations1st.empty() ||
7847  suballocations2nd.empty() ||
7848  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7849 
7850  if(!suballocations1st.empty())
7851  {
7852  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7853  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7854  // Null item at the end should be just pop_back().
7855  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7856  }
7857  if(!suballocations2nd.empty())
7858  {
7859  // Null item at the end should be just pop_back().
7860  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7861  }
7862 
7863  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7864  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7865 
7866  VkDeviceSize sumUsedSize = 0;
7867  const size_t suballoc1stCount = suballocations1st.size();
7868  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7869 
7870  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7871  {
7872  const size_t suballoc2ndCount = suballocations2nd.size();
7873  size_t nullItem2ndCount = 0;
7874  for(size_t i = 0; i < suballoc2ndCount; ++i)
7875  {
7876  const VmaSuballocation& suballoc = suballocations2nd[i];
7877  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7878 
7879  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7880  VMA_VALIDATE(suballoc.offset >= offset);
7881 
7882  if(!currFree)
7883  {
7884  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7885  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7886  sumUsedSize += suballoc.size;
7887  }
7888  else
7889  {
7890  ++nullItem2ndCount;
7891  }
7892 
7893  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7894  }
7895 
7896  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7897  }
7898 
7899  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7900  {
7901  const VmaSuballocation& suballoc = suballocations1st[i];
7902  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7903  suballoc.hAllocation == VK_NULL_HANDLE);
7904  }
7905 
7906  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7907 
7908  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7909  {
7910  const VmaSuballocation& suballoc = suballocations1st[i];
7911  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7912 
7913  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7914  VMA_VALIDATE(suballoc.offset >= offset);
7915  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7916 
7917  if(!currFree)
7918  {
7919  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7920  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7921  sumUsedSize += suballoc.size;
7922  }
7923  else
7924  {
7925  ++nullItem1stCount;
7926  }
7927 
7928  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7929  }
7930  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7931 
7932  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7933  {
7934  const size_t suballoc2ndCount = suballocations2nd.size();
7935  size_t nullItem2ndCount = 0;
7936  for(size_t i = suballoc2ndCount; i--; )
7937  {
7938  const VmaSuballocation& suballoc = suballocations2nd[i];
7939  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7940 
7941  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7942  VMA_VALIDATE(suballoc.offset >= offset);
7943 
7944  if(!currFree)
7945  {
7946  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7947  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7948  sumUsedSize += suballoc.size;
7949  }
7950  else
7951  {
7952  ++nullItem2ndCount;
7953  }
7954 
7955  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7956  }
7957 
7958  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7959  }
7960 
7961  VMA_VALIDATE(offset <= GetSize());
7962  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7963 
7964  return true;
7965 }
7966 
7967 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7968 {
7969  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7970  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7971 }
7972 
7973 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7974 {
7975  const VkDeviceSize size = GetSize();
7976 
7977  /*
7978  We don't consider gaps inside allocation vectors with freed allocations because
7979  they are not suitable for reuse in linear allocator. We consider only space that
7980  is available for new allocations.
7981  */
7982  if(IsEmpty())
7983  {
7984  return size;
7985  }
7986 
7987  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7988 
7989  switch(m_2ndVectorMode)
7990  {
7991  case SECOND_VECTOR_EMPTY:
7992  /*
7993  Available space is after end of 1st, as well as before beginning of 1st (which
7994  whould make it a ring buffer).
7995  */
7996  {
7997  const size_t suballocations1stCount = suballocations1st.size();
7998  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7999  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8000  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8001  return VMA_MAX(
8002  firstSuballoc.offset,
8003  size - (lastSuballoc.offset + lastSuballoc.size));
8004  }
8005  break;
8006 
8007  case SECOND_VECTOR_RING_BUFFER:
8008  /*
8009  Available space is only between end of 2nd and beginning of 1st.
8010  */
8011  {
8012  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8013  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8014  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8015  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8016  }
8017  break;
8018 
8019  case SECOND_VECTOR_DOUBLE_STACK:
8020  /*
8021  Available space is only between end of 1st and top of 2nd.
8022  */
8023  {
8024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8025  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8026  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8027  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8028  }
8029  break;
8030 
8031  default:
8032  VMA_ASSERT(0);
8033  return 0;
8034  }
8035 }
8036 
8037 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8038 {
8039  const VkDeviceSize size = GetSize();
8040  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8041  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8042  const size_t suballoc1stCount = suballocations1st.size();
8043  const size_t suballoc2ndCount = suballocations2nd.size();
8044 
8045  outInfo.blockCount = 1;
8046  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8047  outInfo.unusedRangeCount = 0;
8048  outInfo.usedBytes = 0;
8049  outInfo.allocationSizeMin = UINT64_MAX;
8050  outInfo.allocationSizeMax = 0;
8051  outInfo.unusedRangeSizeMin = UINT64_MAX;
8052  outInfo.unusedRangeSizeMax = 0;
8053 
8054  VkDeviceSize lastOffset = 0;
8055 
8056  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8057  {
8058  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8059  size_t nextAlloc2ndIndex = 0;
8060  while(lastOffset < freeSpace2ndTo1stEnd)
8061  {
8062  // Find next non-null allocation or move nextAllocIndex to the end.
8063  while(nextAlloc2ndIndex < suballoc2ndCount &&
8064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8065  {
8066  ++nextAlloc2ndIndex;
8067  }
8068 
8069  // Found non-null allocation.
8070  if(nextAlloc2ndIndex < suballoc2ndCount)
8071  {
8072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8073 
8074  // 1. Process free space before this allocation.
8075  if(lastOffset < suballoc.offset)
8076  {
8077  // There is free space from lastOffset to suballoc.offset.
8078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8079  ++outInfo.unusedRangeCount;
8080  outInfo.unusedBytes += unusedRangeSize;
8081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8083  }
8084 
8085  // 2. Process this allocation.
8086  // There is allocation with suballoc.offset, suballoc.size.
8087  outInfo.usedBytes += suballoc.size;
8088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8090 
8091  // 3. Prepare for next iteration.
8092  lastOffset = suballoc.offset + suballoc.size;
8093  ++nextAlloc2ndIndex;
8094  }
8095  // We are at the end.
8096  else
8097  {
8098  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8099  if(lastOffset < freeSpace2ndTo1stEnd)
8100  {
8101  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8102  ++outInfo.unusedRangeCount;
8103  outInfo.unusedBytes += unusedRangeSize;
8104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8106  }
8107 
8108  // End of loop.
8109  lastOffset = freeSpace2ndTo1stEnd;
8110  }
8111  }
8112  }
8113 
8114  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8115  const VkDeviceSize freeSpace1stTo2ndEnd =
8116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8117  while(lastOffset < freeSpace1stTo2ndEnd)
8118  {
8119  // Find next non-null allocation or move nextAllocIndex to the end.
8120  while(nextAlloc1stIndex < suballoc1stCount &&
8121  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8122  {
8123  ++nextAlloc1stIndex;
8124  }
8125 
8126  // Found non-null allocation.
8127  if(nextAlloc1stIndex < suballoc1stCount)
8128  {
8129  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8130 
8131  // 1. Process free space before this allocation.
8132  if(lastOffset < suballoc.offset)
8133  {
8134  // There is free space from lastOffset to suballoc.offset.
8135  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8136  ++outInfo.unusedRangeCount;
8137  outInfo.unusedBytes += unusedRangeSize;
8138  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8139  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8140  }
8141 
8142  // 2. Process this allocation.
8143  // There is allocation with suballoc.offset, suballoc.size.
8144  outInfo.usedBytes += suballoc.size;
8145  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8146  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8147 
8148  // 3. Prepare for next iteration.
8149  lastOffset = suballoc.offset + suballoc.size;
8150  ++nextAlloc1stIndex;
8151  }
8152  // We are at the end.
8153  else
8154  {
8155  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8156  if(lastOffset < freeSpace1stTo2ndEnd)
8157  {
8158  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8159  ++outInfo.unusedRangeCount;
8160  outInfo.unusedBytes += unusedRangeSize;
8161  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8162  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8163  }
8164 
8165  // End of loop.
8166  lastOffset = freeSpace1stTo2ndEnd;
8167  }
8168  }
8169 
8170  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8171  {
8172  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8173  while(lastOffset < size)
8174  {
8175  // Find next non-null allocation or move nextAllocIndex to the end.
8176  while(nextAlloc2ndIndex != SIZE_MAX &&
8177  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8178  {
8179  --nextAlloc2ndIndex;
8180  }
8181 
8182  // Found non-null allocation.
8183  if(nextAlloc2ndIndex != SIZE_MAX)
8184  {
8185  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8186 
8187  // 1. Process free space before this allocation.
8188  if(lastOffset < suballoc.offset)
8189  {
8190  // There is free space from lastOffset to suballoc.offset.
8191  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8192  ++outInfo.unusedRangeCount;
8193  outInfo.unusedBytes += unusedRangeSize;
8194  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8195  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8196  }
8197 
8198  // 2. Process this allocation.
8199  // There is allocation with suballoc.offset, suballoc.size.
8200  outInfo.usedBytes += suballoc.size;
8201  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8202  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8203 
8204  // 3. Prepare for next iteration.
8205  lastOffset = suballoc.offset + suballoc.size;
8206  --nextAlloc2ndIndex;
8207  }
8208  // We are at the end.
8209  else
8210  {
8211  // There is free space from lastOffset to size.
8212  if(lastOffset < size)
8213  {
8214  const VkDeviceSize unusedRangeSize = size - lastOffset;
8215  ++outInfo.unusedRangeCount;
8216  outInfo.unusedBytes += unusedRangeSize;
8217  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8218  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8219  }
8220 
8221  // End of loop.
8222  lastOffset = size;
8223  }
8224  }
8225  }
8226 
8227  outInfo.unusedBytes = size - outInfo.usedBytes;
8228 }
8229 
8230 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8231 {
8232  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8233  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8234  const VkDeviceSize size = GetSize();
8235  const size_t suballoc1stCount = suballocations1st.size();
8236  const size_t suballoc2ndCount = suballocations2nd.size();
8237 
8238  inoutStats.size += size;
8239 
8240  VkDeviceSize lastOffset = 0;
8241 
8242  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8243  {
8244  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8245  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8246  while(lastOffset < freeSpace2ndTo1stEnd)
8247  {
8248  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8249  while(nextAlloc2ndIndex < suballoc2ndCount &&
8250  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8251  {
8252  ++nextAlloc2ndIndex;
8253  }
8254 
8255  // Found non-null allocation.
8256  if(nextAlloc2ndIndex < suballoc2ndCount)
8257  {
8258  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8259 
8260  // 1. Process free space before this allocation.
8261  if(lastOffset < suballoc.offset)
8262  {
8263  // There is free space from lastOffset to suballoc.offset.
8264  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8265  inoutStats.unusedSize += unusedRangeSize;
8266  ++inoutStats.unusedRangeCount;
8267  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8268  }
8269 
8270  // 2. Process this allocation.
8271  // There is allocation with suballoc.offset, suballoc.size.
8272  ++inoutStats.allocationCount;
8273 
8274  // 3. Prepare for next iteration.
8275  lastOffset = suballoc.offset + suballoc.size;
8276  ++nextAlloc2ndIndex;
8277  }
8278  // We are at the end.
8279  else
8280  {
8281  if(lastOffset < freeSpace2ndTo1stEnd)
8282  {
8283  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8284  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8285  inoutStats.unusedSize += unusedRangeSize;
8286  ++inoutStats.unusedRangeCount;
8287  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8288  }
8289 
8290  // End of loop.
8291  lastOffset = freeSpace2ndTo1stEnd;
8292  }
8293  }
8294  }
8295 
8296  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8297  const VkDeviceSize freeSpace1stTo2ndEnd =
8298  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8299  while(lastOffset < freeSpace1stTo2ndEnd)
8300  {
8301  // Find next non-null allocation or move nextAllocIndex to the end.
8302  while(nextAlloc1stIndex < suballoc1stCount &&
8303  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8304  {
8305  ++nextAlloc1stIndex;
8306  }
8307 
8308  // Found non-null allocation.
8309  if(nextAlloc1stIndex < suballoc1stCount)
8310  {
8311  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8312 
8313  // 1. Process free space before this allocation.
8314  if(lastOffset < suballoc.offset)
8315  {
8316  // There is free space from lastOffset to suballoc.offset.
8317  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8318  inoutStats.unusedSize += unusedRangeSize;
8319  ++inoutStats.unusedRangeCount;
8320  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8321  }
8322 
8323  // 2. Process this allocation.
8324  // There is allocation with suballoc.offset, suballoc.size.
8325  ++inoutStats.allocationCount;
8326 
8327  // 3. Prepare for next iteration.
8328  lastOffset = suballoc.offset + suballoc.size;
8329  ++nextAlloc1stIndex;
8330  }
8331  // We are at the end.
8332  else
8333  {
8334  if(lastOffset < freeSpace1stTo2ndEnd)
8335  {
8336  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8337  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8338  inoutStats.unusedSize += unusedRangeSize;
8339  ++inoutStats.unusedRangeCount;
8340  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8341  }
8342 
8343  // End of loop.
8344  lastOffset = freeSpace1stTo2ndEnd;
8345  }
8346  }
8347 
8348  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8349  {
8350  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8351  while(lastOffset < size)
8352  {
8353  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8354  while(nextAlloc2ndIndex != SIZE_MAX &&
8355  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8356  {
8357  --nextAlloc2ndIndex;
8358  }
8359 
8360  // Found non-null allocation.
8361  if(nextAlloc2ndIndex != SIZE_MAX)
8362  {
8363  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8364 
8365  // 1. Process free space before this allocation.
8366  if(lastOffset < suballoc.offset)
8367  {
8368  // There is free space from lastOffset to suballoc.offset.
8369  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8370  inoutStats.unusedSize += unusedRangeSize;
8371  ++inoutStats.unusedRangeCount;
8372  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8373  }
8374 
8375  // 2. Process this allocation.
8376  // There is allocation with suballoc.offset, suballoc.size.
8377  ++inoutStats.allocationCount;
8378 
8379  // 3. Prepare for next iteration.
8380  lastOffset = suballoc.offset + suballoc.size;
8381  --nextAlloc2ndIndex;
8382  }
8383  // We are at the end.
8384  else
8385  {
8386  if(lastOffset < size)
8387  {
8388  // There is free space from lastOffset to size.
8389  const VkDeviceSize unusedRangeSize = size - lastOffset;
8390  inoutStats.unusedSize += unusedRangeSize;
8391  ++inoutStats.unusedRangeCount;
8392  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8393  }
8394 
8395  // End of loop.
8396  lastOffset = size;
8397  }
8398  }
8399  }
8400 }
8401 
8402 #if VMA_STATS_STRING_ENABLED
8403 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8404 {
8405  const VkDeviceSize size = GetSize();
8406  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8407  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8408  const size_t suballoc1stCount = suballocations1st.size();
8409  const size_t suballoc2ndCount = suballocations2nd.size();
8410 
8411  // FIRST PASS
8412 
8413  size_t unusedRangeCount = 0;
8414  VkDeviceSize usedBytes = 0;
8415 
8416  VkDeviceSize lastOffset = 0;
8417 
8418  size_t alloc2ndCount = 0;
8419  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8420  {
8421  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8422  size_t nextAlloc2ndIndex = 0;
8423  while(lastOffset < freeSpace2ndTo1stEnd)
8424  {
8425  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8426  while(nextAlloc2ndIndex < suballoc2ndCount &&
8427  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8428  {
8429  ++nextAlloc2ndIndex;
8430  }
8431 
8432  // Found non-null allocation.
8433  if(nextAlloc2ndIndex < suballoc2ndCount)
8434  {
8435  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8436 
8437  // 1. Process free space before this allocation.
8438  if(lastOffset < suballoc.offset)
8439  {
8440  // There is free space from lastOffset to suballoc.offset.
8441  ++unusedRangeCount;
8442  }
8443 
8444  // 2. Process this allocation.
8445  // There is allocation with suballoc.offset, suballoc.size.
8446  ++alloc2ndCount;
8447  usedBytes += suballoc.size;
8448 
8449  // 3. Prepare for next iteration.
8450  lastOffset = suballoc.offset + suballoc.size;
8451  ++nextAlloc2ndIndex;
8452  }
8453  // We are at the end.
8454  else
8455  {
8456  if(lastOffset < freeSpace2ndTo1stEnd)
8457  {
8458  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8459  ++unusedRangeCount;
8460  }
8461 
8462  // End of loop.
8463  lastOffset = freeSpace2ndTo1stEnd;
8464  }
8465  }
8466  }
8467 
8468  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8469  size_t alloc1stCount = 0;
8470  const VkDeviceSize freeSpace1stTo2ndEnd =
8471  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8472  while(lastOffset < freeSpace1stTo2ndEnd)
8473  {
8474  // Find next non-null allocation or move nextAllocIndex to the end.
8475  while(nextAlloc1stIndex < suballoc1stCount &&
8476  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8477  {
8478  ++nextAlloc1stIndex;
8479  }
8480 
8481  // Found non-null allocation.
8482  if(nextAlloc1stIndex < suballoc1stCount)
8483  {
8484  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8485 
8486  // 1. Process free space before this allocation.
8487  if(lastOffset < suballoc.offset)
8488  {
8489  // There is free space from lastOffset to suballoc.offset.
8490  ++unusedRangeCount;
8491  }
8492 
8493  // 2. Process this allocation.
8494  // There is allocation with suballoc.offset, suballoc.size.
8495  ++alloc1stCount;
8496  usedBytes += suballoc.size;
8497 
8498  // 3. Prepare for next iteration.
8499  lastOffset = suballoc.offset + suballoc.size;
8500  ++nextAlloc1stIndex;
8501  }
8502  // We are at the end.
8503  else
8504  {
8505  if(lastOffset < size)
8506  {
8507  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8508  ++unusedRangeCount;
8509  }
8510 
8511  // End of loop.
8512  lastOffset = freeSpace1stTo2ndEnd;
8513  }
8514  }
8515 
8516  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8517  {
8518  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8519  while(lastOffset < size)
8520  {
8521  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8522  while(nextAlloc2ndIndex != SIZE_MAX &&
8523  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8524  {
8525  --nextAlloc2ndIndex;
8526  }
8527 
8528  // Found non-null allocation.
8529  if(nextAlloc2ndIndex != SIZE_MAX)
8530  {
8531  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8532 
8533  // 1. Process free space before this allocation.
8534  if(lastOffset < suballoc.offset)
8535  {
8536  // There is free space from lastOffset to suballoc.offset.
8537  ++unusedRangeCount;
8538  }
8539 
8540  // 2. Process this allocation.
8541  // There is allocation with suballoc.offset, suballoc.size.
8542  ++alloc2ndCount;
8543  usedBytes += suballoc.size;
8544 
8545  // 3. Prepare for next iteration.
8546  lastOffset = suballoc.offset + suballoc.size;
8547  --nextAlloc2ndIndex;
8548  }
8549  // We are at the end.
8550  else
8551  {
8552  if(lastOffset < size)
8553  {
8554  // There is free space from lastOffset to size.
8555  ++unusedRangeCount;
8556  }
8557 
8558  // End of loop.
8559  lastOffset = size;
8560  }
8561  }
8562  }
8563 
8564  const VkDeviceSize unusedBytes = size - usedBytes;
8565  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8566 
8567  // SECOND PASS
8568  lastOffset = 0;
8569 
8570  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8571  {
8572  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8573  size_t nextAlloc2ndIndex = 0;
8574  while(lastOffset < freeSpace2ndTo1stEnd)
8575  {
8576  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8577  while(nextAlloc2ndIndex < suballoc2ndCount &&
8578  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8579  {
8580  ++nextAlloc2ndIndex;
8581  }
8582 
8583  // Found non-null allocation.
8584  if(nextAlloc2ndIndex < suballoc2ndCount)
8585  {
8586  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8587 
8588  // 1. Process free space before this allocation.
8589  if(lastOffset < suballoc.offset)
8590  {
8591  // There is free space from lastOffset to suballoc.offset.
8592  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8594  }
8595 
8596  // 2. Process this allocation.
8597  // There is allocation with suballoc.offset, suballoc.size.
8598  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8599 
8600  // 3. Prepare for next iteration.
8601  lastOffset = suballoc.offset + suballoc.size;
8602  ++nextAlloc2ndIndex;
8603  }
8604  // We are at the end.
8605  else
8606  {
8607  if(lastOffset < freeSpace2ndTo1stEnd)
8608  {
8609  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8610  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8611  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8612  }
8613 
8614  // End of loop.
8615  lastOffset = freeSpace2ndTo1stEnd;
8616  }
8617  }
8618  }
8619 
8620  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8621  while(lastOffset < freeSpace1stTo2ndEnd)
8622  {
8623  // Find next non-null allocation or move nextAllocIndex to the end.
8624  while(nextAlloc1stIndex < suballoc1stCount &&
8625  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8626  {
8627  ++nextAlloc1stIndex;
8628  }
8629 
8630  // Found non-null allocation.
8631  if(nextAlloc1stIndex < suballoc1stCount)
8632  {
8633  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8634 
8635  // 1. Process free space before this allocation.
8636  if(lastOffset < suballoc.offset)
8637  {
8638  // There is free space from lastOffset to suballoc.offset.
8639  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8640  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8641  }
8642 
8643  // 2. Process this allocation.
8644  // There is allocation with suballoc.offset, suballoc.size.
8645  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8646 
8647  // 3. Prepare for next iteration.
8648  lastOffset = suballoc.offset + suballoc.size;
8649  ++nextAlloc1stIndex;
8650  }
8651  // We are at the end.
8652  else
8653  {
8654  if(lastOffset < freeSpace1stTo2ndEnd)
8655  {
8656  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8657  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8658  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8659  }
8660 
8661  // End of loop.
8662  lastOffset = freeSpace1stTo2ndEnd;
8663  }
8664  }
8665 
8666  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8667  {
8668  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8669  while(lastOffset < size)
8670  {
8671  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8672  while(nextAlloc2ndIndex != SIZE_MAX &&
8673  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8674  {
8675  --nextAlloc2ndIndex;
8676  }
8677 
8678  // Found non-null allocation.
8679  if(nextAlloc2ndIndex != SIZE_MAX)
8680  {
8681  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8682 
8683  // 1. Process free space before this allocation.
8684  if(lastOffset < suballoc.offset)
8685  {
8686  // There is free space from lastOffset to suballoc.offset.
8687  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8688  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8689  }
8690 
8691  // 2. Process this allocation.
8692  // There is allocation with suballoc.offset, suballoc.size.
8693  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8694 
8695  // 3. Prepare for next iteration.
8696  lastOffset = suballoc.offset + suballoc.size;
8697  --nextAlloc2ndIndex;
8698  }
8699  // We are at the end.
8700  else
8701  {
8702  if(lastOffset < size)
8703  {
8704  // There is free space from lastOffset to size.
8705  const VkDeviceSize unusedRangeSize = size - lastOffset;
8706  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8707  }
8708 
8709  // End of loop.
8710  lastOffset = size;
8711  }
8712  }
8713  }
8714 
8715  PrintDetailedMap_End(json);
8716 }
8717 #endif // #if VMA_STATS_STRING_ENABLED
8718 
8719 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8720  uint32_t currentFrameIndex,
8721  uint32_t frameInUseCount,
8722  VkDeviceSize bufferImageGranularity,
8723  VkDeviceSize allocSize,
8724  VkDeviceSize allocAlignment,
8725  bool upperAddress,
8726  VmaSuballocationType allocType,
8727  bool canMakeOtherLost,
8728  uint32_t strategy,
8729  VmaAllocationRequest* pAllocationRequest)
8730 {
8731  VMA_ASSERT(allocSize > 0);
8732  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8733  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8734  VMA_HEAVY_ASSERT(Validate());
8735 
8736  const VkDeviceSize size = GetSize();
8737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8739 
8740  if(upperAddress)
8741  {
8742  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8743  {
8744  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8745  return false;
8746  }
8747 
8748  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8749  if(allocSize > size)
8750  {
8751  return false;
8752  }
8753  VkDeviceSize resultBaseOffset = size - allocSize;
8754  if(!suballocations2nd.empty())
8755  {
8756  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8757  resultBaseOffset = lastSuballoc.offset - allocSize;
8758  if(allocSize > lastSuballoc.offset)
8759  {
8760  return false;
8761  }
8762  }
8763 
8764  // Start from offset equal to end of free space.
8765  VkDeviceSize resultOffset = resultBaseOffset;
8766 
8767  // Apply VMA_DEBUG_MARGIN at the end.
8768  if(VMA_DEBUG_MARGIN > 0)
8769  {
8770  if(resultOffset < VMA_DEBUG_MARGIN)
8771  {
8772  return false;
8773  }
8774  resultOffset -= VMA_DEBUG_MARGIN;
8775  }
8776 
8777  // Apply alignment.
8778  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8779 
8780  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8781  // Make bigger alignment if necessary.
8782  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8783  {
8784  bool bufferImageGranularityConflict = false;
8785  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8786  {
8787  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8788  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8789  {
8790  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8791  {
8792  bufferImageGranularityConflict = true;
8793  break;
8794  }
8795  }
8796  else
8797  // Already on previous page.
8798  break;
8799  }
8800  if(bufferImageGranularityConflict)
8801  {
8802  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8803  }
8804  }
8805 
8806  // There is enough free space.
8807  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8808  suballocations1st.back().offset + suballocations1st.back().size :
8809  0;
8810  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8811  {
8812  // Check previous suballocations for BufferImageGranularity conflicts.
8813  // If conflict exists, allocation cannot be made here.
8814  if(bufferImageGranularity > 1)
8815  {
8816  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8817  {
8818  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8819  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8820  {
8821  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8822  {
8823  return false;
8824  }
8825  }
8826  else
8827  {
8828  // Already on next page.
8829  break;
8830  }
8831  }
8832  }
8833 
8834  // All tests passed: Success.
8835  pAllocationRequest->offset = resultOffset;
8836  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8837  pAllocationRequest->sumItemSize = 0;
8838  // pAllocationRequest->item unused.
8839  pAllocationRequest->itemsToMakeLostCount = 0;
8840  return true;
8841  }
8842  }
8843  else // !upperAddress
8844  {
8845  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8846  {
8847  // Try to allocate at the end of 1st vector.
8848 
8849  VkDeviceSize resultBaseOffset = 0;
8850  if(!suballocations1st.empty())
8851  {
8852  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8853  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8854  }
8855 
8856  // Start from offset equal to beginning of free space.
8857  VkDeviceSize resultOffset = resultBaseOffset;
8858 
8859  // Apply VMA_DEBUG_MARGIN at the beginning.
8860  if(VMA_DEBUG_MARGIN > 0)
8861  {
8862  resultOffset += VMA_DEBUG_MARGIN;
8863  }
8864 
8865  // Apply alignment.
8866  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8867 
8868  // Check previous suballocations for BufferImageGranularity conflicts.
8869  // Make bigger alignment if necessary.
8870  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8871  {
8872  bool bufferImageGranularityConflict = false;
8873  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8874  {
8875  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8876  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8877  {
8878  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8879  {
8880  bufferImageGranularityConflict = true;
8881  break;
8882  }
8883  }
8884  else
8885  // Already on previous page.
8886  break;
8887  }
8888  if(bufferImageGranularityConflict)
8889  {
8890  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8891  }
8892  }
8893 
8894  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8895  suballocations2nd.back().offset : size;
8896 
8897  // There is enough free space at the end after alignment.
8898  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8899  {
8900  // Check next suballocations for BufferImageGranularity conflicts.
8901  // If conflict exists, allocation cannot be made here.
8902  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8903  {
8904  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8905  {
8906  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8907  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8908  {
8909  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8910  {
8911  return false;
8912  }
8913  }
8914  else
8915  {
8916  // Already on previous page.
8917  break;
8918  }
8919  }
8920  }
8921 
8922  // All tests passed: Success.
8923  pAllocationRequest->offset = resultOffset;
8924  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8925  pAllocationRequest->sumItemSize = 0;
8926  // pAllocationRequest->item unused.
8927  pAllocationRequest->itemsToMakeLostCount = 0;
8928  return true;
8929  }
8930  }
8931 
8932  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8933  // beginning of 1st vector as the end of free space.
8934  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8935  {
8936  VMA_ASSERT(!suballocations1st.empty());
8937 
8938  VkDeviceSize resultBaseOffset = 0;
8939  if(!suballocations2nd.empty())
8940  {
8941  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8942  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8943  }
8944 
8945  // Start from offset equal to beginning of free space.
8946  VkDeviceSize resultOffset = resultBaseOffset;
8947 
8948  // Apply VMA_DEBUG_MARGIN at the beginning.
8949  if(VMA_DEBUG_MARGIN > 0)
8950  {
8951  resultOffset += VMA_DEBUG_MARGIN;
8952  }
8953 
8954  // Apply alignment.
8955  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8956 
8957  // Check previous suballocations for BufferImageGranularity conflicts.
8958  // Make bigger alignment if necessary.
8959  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8960  {
8961  bool bufferImageGranularityConflict = false;
8962  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8963  {
8964  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8965  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8966  {
8967  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8968  {
8969  bufferImageGranularityConflict = true;
8970  break;
8971  }
8972  }
8973  else
8974  // Already on previous page.
8975  break;
8976  }
8977  if(bufferImageGranularityConflict)
8978  {
8979  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8980  }
8981  }
8982 
8983  pAllocationRequest->itemsToMakeLostCount = 0;
8984  pAllocationRequest->sumItemSize = 0;
8985  size_t index1st = m_1stNullItemsBeginCount;
8986 
8987  if(canMakeOtherLost)
8988  {
8989  while(index1st < suballocations1st.size() &&
8990  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8991  {
8992  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8993  const VmaSuballocation& suballoc = suballocations1st[index1st];
8994  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8995  {
8996  // No problem.
8997  }
8998  else
8999  {
9000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9001  if(suballoc.hAllocation->CanBecomeLost() &&
9002  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9003  {
9004  ++pAllocationRequest->itemsToMakeLostCount;
9005  pAllocationRequest->sumItemSize += suballoc.size;
9006  }
9007  else
9008  {
9009  return false;
9010  }
9011  }
9012  ++index1st;
9013  }
9014 
9015  // Check next suballocations for BufferImageGranularity conflicts.
9016  // If conflict exists, we must mark more allocations lost or fail.
9017  if(bufferImageGranularity > 1)
9018  {
9019  while(index1st < suballocations1st.size())
9020  {
9021  const VmaSuballocation& suballoc = suballocations1st[index1st];
9022  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9023  {
9024  if(suballoc.hAllocation != VK_NULL_HANDLE)
9025  {
9026  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9027  if(suballoc.hAllocation->CanBecomeLost() &&
9028  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9029  {
9030  ++pAllocationRequest->itemsToMakeLostCount;
9031  pAllocationRequest->sumItemSize += suballoc.size;
9032  }
9033  else
9034  {
9035  return false;
9036  }
9037  }
9038  }
9039  else
9040  {
9041  // Already on next page.
9042  break;
9043  }
9044  ++index1st;
9045  }
9046  }
9047  }
9048 
9049  // There is enough free space at the end after alignment.
9050  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9051  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9052  {
9053  // Check next suballocations for BufferImageGranularity conflicts.
9054  // If conflict exists, allocation cannot be made here.
9055  if(bufferImageGranularity > 1)
9056  {
9057  for(size_t nextSuballocIndex = index1st;
9058  nextSuballocIndex < suballocations1st.size();
9059  nextSuballocIndex++)
9060  {
9061  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9062  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9063  {
9064  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9065  {
9066  return false;
9067  }
9068  }
9069  else
9070  {
9071  // Already on next page.
9072  break;
9073  }
9074  }
9075  }
9076 
9077  // All tests passed: Success.
9078  pAllocationRequest->offset = resultOffset;
9079  pAllocationRequest->sumFreeSize =
9080  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9081  - resultBaseOffset
9082  - pAllocationRequest->sumItemSize;
9083  // pAllocationRequest->item unused.
9084  return true;
9085  }
9086  }
9087  }
9088 
9089  return false;
9090 }
9091 
9092 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9093  uint32_t currentFrameIndex,
9094  uint32_t frameInUseCount,
9095  VmaAllocationRequest* pAllocationRequest)
9096 {
9097  if(pAllocationRequest->itemsToMakeLostCount == 0)
9098  {
9099  return true;
9100  }
9101 
9102  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9103 
9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9105  size_t index1st = m_1stNullItemsBeginCount;
9106  size_t madeLostCount = 0;
9107  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9108  {
9109  VMA_ASSERT(index1st < suballocations1st.size());
9110  VmaSuballocation& suballoc = suballocations1st[index1st];
9111  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9112  {
9113  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9114  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9115  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9116  {
9117  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9118  suballoc.hAllocation = VK_NULL_HANDLE;
9119  m_SumFreeSize += suballoc.size;
9120  ++m_1stNullItemsMiddleCount;
9121  ++madeLostCount;
9122  }
9123  else
9124  {
9125  return false;
9126  }
9127  }
9128  ++index1st;
9129  }
9130 
9131  CleanupAfterFree();
9132  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9133 
9134  return true;
9135 }
9136 
9137 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9138 {
9139  uint32_t lostAllocationCount = 0;
9140 
9141  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9142  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9143  {
9144  VmaSuballocation& suballoc = suballocations1st[i];
9145  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9146  suballoc.hAllocation->CanBecomeLost() &&
9147  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9148  {
9149  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9150  suballoc.hAllocation = VK_NULL_HANDLE;
9151  ++m_1stNullItemsMiddleCount;
9152  m_SumFreeSize += suballoc.size;
9153  ++lostAllocationCount;
9154  }
9155  }
9156 
9157  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9158  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9159  {
9160  VmaSuballocation& suballoc = suballocations2nd[i];
9161  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9162  suballoc.hAllocation->CanBecomeLost() &&
9163  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9164  {
9165  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9166  suballoc.hAllocation = VK_NULL_HANDLE;
9167  ++m_2ndNullItemsCount;
9168  ++lostAllocationCount;
9169  }
9170  }
9171 
9172  if(lostAllocationCount)
9173  {
9174  CleanupAfterFree();
9175  }
9176 
9177  return lostAllocationCount;
9178 }
9179 
9180 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9181 {
9182  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9183  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9184  {
9185  const VmaSuballocation& suballoc = suballocations1st[i];
9186  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9187  {
9188  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9189  {
9190  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9191  return VK_ERROR_VALIDATION_FAILED_EXT;
9192  }
9193  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9194  {
9195  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9196  return VK_ERROR_VALIDATION_FAILED_EXT;
9197  }
9198  }
9199  }
9200 
9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9202  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9203  {
9204  const VmaSuballocation& suballoc = suballocations2nd[i];
9205  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9206  {
9207  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9208  {
9209  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9210  return VK_ERROR_VALIDATION_FAILED_EXT;
9211  }
9212  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9213  {
9214  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9215  return VK_ERROR_VALIDATION_FAILED_EXT;
9216  }
9217  }
9218  }
9219 
9220  return VK_SUCCESS;
9221 }
9222 
9223 void VmaBlockMetadata_Linear::Alloc(
9224  const VmaAllocationRequest& request,
9225  VmaSuballocationType type,
9226  VkDeviceSize allocSize,
9227  bool upperAddress,
9228  VmaAllocation hAllocation)
9229 {
9230  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9231 
9232  if(upperAddress)
9233  {
9234  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9235  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9236  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9237  suballocations2nd.push_back(newSuballoc);
9238  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9239  }
9240  else
9241  {
9242  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9243 
9244  // First allocation.
9245  if(suballocations1st.empty())
9246  {
9247  suballocations1st.push_back(newSuballoc);
9248  }
9249  else
9250  {
9251  // New allocation at the end of 1st vector.
9252  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9253  {
9254  // Check if it fits before the end of the block.
9255  VMA_ASSERT(request.offset + allocSize <= GetSize());
9256  suballocations1st.push_back(newSuballoc);
9257  }
9258  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9259  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9260  {
9261  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9262 
9263  switch(m_2ndVectorMode)
9264  {
9265  case SECOND_VECTOR_EMPTY:
9266  // First allocation from second part ring buffer.
9267  VMA_ASSERT(suballocations2nd.empty());
9268  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9269  break;
9270  case SECOND_VECTOR_RING_BUFFER:
9271  // 2-part ring buffer is already started.
9272  VMA_ASSERT(!suballocations2nd.empty());
9273  break;
9274  case SECOND_VECTOR_DOUBLE_STACK:
9275  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9276  break;
9277  default:
9278  VMA_ASSERT(0);
9279  }
9280 
9281  suballocations2nd.push_back(newSuballoc);
9282  }
9283  else
9284  {
9285  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9286  }
9287  }
9288  }
9289 
9290  m_SumFreeSize -= newSuballoc.size;
9291 }
9292 
9293 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9294 {
9295  FreeAtOffset(allocation->GetOffset());
9296 }
9297 
9298 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9299 {
9300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9301  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9302 
9303  if(!suballocations1st.empty())
9304  {
9305  // First allocation: Mark it as next empty at the beginning.
9306  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9307  if(firstSuballoc.offset == offset)
9308  {
9309  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9310  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9311  m_SumFreeSize += firstSuballoc.size;
9312  ++m_1stNullItemsBeginCount;
9313  CleanupAfterFree();
9314  return;
9315  }
9316  }
9317 
9318  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9319  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9320  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9321  {
9322  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9323  if(lastSuballoc.offset == offset)
9324  {
9325  m_SumFreeSize += lastSuballoc.size;
9326  suballocations2nd.pop_back();
9327  CleanupAfterFree();
9328  return;
9329  }
9330  }
9331  // Last allocation in 1st vector.
9332  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9333  {
9334  VmaSuballocation& lastSuballoc = suballocations1st.back();
9335  if(lastSuballoc.offset == offset)
9336  {
9337  m_SumFreeSize += lastSuballoc.size;
9338  suballocations1st.pop_back();
9339  CleanupAfterFree();
9340  return;
9341  }
9342  }
9343 
9344  // Item from the middle of 1st vector.
9345  {
9346  VmaSuballocation refSuballoc;
9347  refSuballoc.offset = offset;
9348  // Rest of members stays uninitialized intentionally for better performance.
9349  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9350  suballocations1st.begin() + m_1stNullItemsBeginCount,
9351  suballocations1st.end(),
9352  refSuballoc);
9353  if(it != suballocations1st.end())
9354  {
9355  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9356  it->hAllocation = VK_NULL_HANDLE;
9357  ++m_1stNullItemsMiddleCount;
9358  m_SumFreeSize += it->size;
9359  CleanupAfterFree();
9360  return;
9361  }
9362  }
9363 
9364  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9365  {
9366  // Item from the middle of 2nd vector.
9367  VmaSuballocation refSuballoc;
9368  refSuballoc.offset = offset;
9369  // Rest of members stays uninitialized intentionally for better performance.
9370  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9371  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9372  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9373  if(it != suballocations2nd.end())
9374  {
9375  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9376  it->hAllocation = VK_NULL_HANDLE;
9377  ++m_2ndNullItemsCount;
9378  m_SumFreeSize += it->size;
9379  CleanupAfterFree();
9380  return;
9381  }
9382  }
9383 
9384  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9385 }
9386 
9387 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9388 {
9389  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9390  const size_t suballocCount = AccessSuballocations1st().size();
9391  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9392 }
9393 
9394 void VmaBlockMetadata_Linear::CleanupAfterFree()
9395 {
9396  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9397  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9398 
9399  if(IsEmpty())
9400  {
9401  suballocations1st.clear();
9402  suballocations2nd.clear();
9403  m_1stNullItemsBeginCount = 0;
9404  m_1stNullItemsMiddleCount = 0;
9405  m_2ndNullItemsCount = 0;
9406  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9407  }
9408  else
9409  {
9410  const size_t suballoc1stCount = suballocations1st.size();
9411  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9412  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9413 
9414  // Find more null items at the beginning of 1st vector.
9415  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9416  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9417  {
9418  ++m_1stNullItemsBeginCount;
9419  --m_1stNullItemsMiddleCount;
9420  }
9421 
9422  // Find more null items at the end of 1st vector.
9423  while(m_1stNullItemsMiddleCount > 0 &&
9424  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9425  {
9426  --m_1stNullItemsMiddleCount;
9427  suballocations1st.pop_back();
9428  }
9429 
9430  // Find more null items at the end of 2nd vector.
9431  while(m_2ndNullItemsCount > 0 &&
9432  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9433  {
9434  --m_2ndNullItemsCount;
9435  suballocations2nd.pop_back();
9436  }
9437 
9438  if(ShouldCompact1st())
9439  {
9440  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9441  size_t srcIndex = m_1stNullItemsBeginCount;
9442  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9443  {
9444  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9445  {
9446  ++srcIndex;
9447  }
9448  if(dstIndex != srcIndex)
9449  {
9450  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9451  }
9452  ++srcIndex;
9453  }
9454  suballocations1st.resize(nonNullItemCount);
9455  m_1stNullItemsBeginCount = 0;
9456  m_1stNullItemsMiddleCount = 0;
9457  }
9458 
9459  // 2nd vector became empty.
9460  if(suballocations2nd.empty())
9461  {
9462  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9463  }
9464 
9465  // 1st vector became empty.
9466  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9467  {
9468  suballocations1st.clear();
9469  m_1stNullItemsBeginCount = 0;
9470 
9471  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9472  {
9473  // Swap 1st with 2nd. Now 2nd is empty.
9474  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9475  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9476  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9477  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9478  {
9479  ++m_1stNullItemsBeginCount;
9480  --m_1stNullItemsMiddleCount;
9481  }
9482  m_2ndNullItemsCount = 0;
9483  m_1stVectorIndex ^= 1;
9484  }
9485  }
9486  }
9487 
9488  VMA_HEAVY_ASSERT(Validate());
9489 }
9490 
9491 
9493 // class VmaBlockMetadata_Buddy
9494 
9495 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9496  VmaBlockMetadata(hAllocator),
9497  m_Root(VMA_NULL),
9498  m_AllocationCount(0),
9499  m_FreeCount(1),
9500  m_SumFreeSize(0)
9501 {
9502  memset(m_FreeList, 0, sizeof(m_FreeList));
9503 }
9504 
9505 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9506 {
9507  DeleteNode(m_Root);
9508 }
9509 
9510 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9511 {
9512  VmaBlockMetadata::Init(size);
9513 
9514  m_UsableSize = VmaPrevPow2(size);
9515  m_SumFreeSize = m_UsableSize;
9516 
9517  // Calculate m_LevelCount.
9518  m_LevelCount = 1;
9519  while(m_LevelCount < MAX_LEVELS &&
9520  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9521  {
9522  ++m_LevelCount;
9523  }
9524 
9525  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9526  rootNode->offset = 0;
9527  rootNode->type = Node::TYPE_FREE;
9528  rootNode->parent = VMA_NULL;
9529  rootNode->buddy = VMA_NULL;
9530 
9531  m_Root = rootNode;
9532  AddToFreeListFront(0, rootNode);
9533 }
9534 
9535 bool VmaBlockMetadata_Buddy::Validate() const
9536 {
9537  // Validate tree.
9538  ValidationContext ctx;
9539  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9540  {
9541  VMA_VALIDATE(false && "ValidateNode failed.");
9542  }
9543  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9544  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9545 
9546  // Validate free node lists.
9547  for(uint32_t level = 0; level < m_LevelCount; ++level)
9548  {
9549  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9550  m_FreeList[level].front->free.prev == VMA_NULL);
9551 
9552  for(Node* node = m_FreeList[level].front;
9553  node != VMA_NULL;
9554  node = node->free.next)
9555  {
9556  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9557 
9558  if(node->free.next == VMA_NULL)
9559  {
9560  VMA_VALIDATE(m_FreeList[level].back == node);
9561  }
9562  else
9563  {
9564  VMA_VALIDATE(node->free.next->free.prev == node);
9565  }
9566  }
9567  }
9568 
9569  // Validate that free lists ar higher levels are empty.
9570  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9571  {
9572  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9573  }
9574 
9575  return true;
9576 }
9577 
9578 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9579 {
9580  for(uint32_t level = 0; level < m_LevelCount; ++level)
9581  {
9582  if(m_FreeList[level].front != VMA_NULL)
9583  {
9584  return LevelToNodeSize(level);
9585  }
9586  }
9587  return 0;
9588 }
9589 
9590 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9591 {
9592  const VkDeviceSize unusableSize = GetUnusableSize();
9593 
9594  outInfo.blockCount = 1;
9595 
9596  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9597  outInfo.usedBytes = outInfo.unusedBytes = 0;
9598 
9599  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9600  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9601  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9602 
9603  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9604 
9605  if(unusableSize > 0)
9606  {
9607  ++outInfo.unusedRangeCount;
9608  outInfo.unusedBytes += unusableSize;
9609  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9610  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9611  }
9612 }
9613 
9614 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9615 {
9616  const VkDeviceSize unusableSize = GetUnusableSize();
9617 
9618  inoutStats.size += GetSize();
9619  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9620  inoutStats.allocationCount += m_AllocationCount;
9621  inoutStats.unusedRangeCount += m_FreeCount;
9622  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9623 
9624  if(unusableSize > 0)
9625  {
9626  ++inoutStats.unusedRangeCount;
9627  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9628  }
9629 }
9630 
9631 #if VMA_STATS_STRING_ENABLED
9632 
9633 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9634 {
9635  // TODO optimize
9636  VmaStatInfo stat;
9637  CalcAllocationStatInfo(stat);
9638 
9639  PrintDetailedMap_Begin(
9640  json,
9641  stat.unusedBytes,
9642  stat.allocationCount,
9643  stat.unusedRangeCount);
9644 
9645  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9646 
9647  const VkDeviceSize unusableSize = GetUnusableSize();
9648  if(unusableSize > 0)
9649  {
9650  PrintDetailedMap_UnusedRange(json,
9651  m_UsableSize, // offset
9652  unusableSize); // size
9653  }
9654 
9655  PrintDetailedMap_End(json);
9656 }
9657 
9658 #endif // #if VMA_STATS_STRING_ENABLED
9659 
9660 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9661  uint32_t currentFrameIndex,
9662  uint32_t frameInUseCount,
9663  VkDeviceSize bufferImageGranularity,
9664  VkDeviceSize allocSize,
9665  VkDeviceSize allocAlignment,
9666  bool upperAddress,
9667  VmaSuballocationType allocType,
9668  bool canMakeOtherLost,
9669  uint32_t strategy,
9670  VmaAllocationRequest* pAllocationRequest)
9671 {
9672  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9673 
9674  // Simple way to respect bufferImageGranularity. May be optimized some day.
9675  // Whenever it might be an OPTIMAL image...
9676  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9677  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9678  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9679  {
9680  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9681  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9682  }
9683 
9684  if(allocSize > m_UsableSize)
9685  {
9686  return false;
9687  }
9688 
9689  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9690  for(uint32_t level = targetLevel + 1; level--; )
9691  {
9692  for(Node* freeNode = m_FreeList[level].front;
9693  freeNode != VMA_NULL;
9694  freeNode = freeNode->free.next)
9695  {
9696  if(freeNode->offset % allocAlignment == 0)
9697  {
9698  pAllocationRequest->offset = freeNode->offset;
9699  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9700  pAllocationRequest->sumItemSize = 0;
9701  pAllocationRequest->itemsToMakeLostCount = 0;
9702  pAllocationRequest->customData = (void*)(uintptr_t)level;
9703  return true;
9704  }
9705  }
9706  }
9707 
9708  return false;
9709 }
9710 
9711 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9712  uint32_t currentFrameIndex,
9713  uint32_t frameInUseCount,
9714  VmaAllocationRequest* pAllocationRequest)
9715 {
9716  /*
9717  Lost allocations are not supported in buddy allocator at the moment.
9718  Support might be added in the future.
9719  */
9720  return pAllocationRequest->itemsToMakeLostCount == 0;
9721 }
9722 
9723 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9724 {
9725  /*
9726  Lost allocations are not supported in buddy allocator at the moment.
9727  Support might be added in the future.
9728  */
9729  return 0;
9730 }
9731 
9732 void VmaBlockMetadata_Buddy::Alloc(
9733  const VmaAllocationRequest& request,
9734  VmaSuballocationType type,
9735  VkDeviceSize allocSize,
9736  bool upperAddress,
9737  VmaAllocation hAllocation)
9738 {
9739  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9740  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9741 
9742  Node* currNode = m_FreeList[currLevel].front;
9743  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9744  while(currNode->offset != request.offset)
9745  {
9746  currNode = currNode->free.next;
9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9748  }
9749 
9750  // Go down, splitting free nodes.
9751  while(currLevel < targetLevel)
9752  {
9753  // currNode is already first free node at currLevel.
9754  // Remove it from list of free nodes at this currLevel.
9755  RemoveFromFreeList(currLevel, currNode);
9756 
9757  const uint32_t childrenLevel = currLevel + 1;
9758 
9759  // Create two free sub-nodes.
9760  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9761  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9762 
9763  leftChild->offset = currNode->offset;
9764  leftChild->type = Node::TYPE_FREE;
9765  leftChild->parent = currNode;
9766  leftChild->buddy = rightChild;
9767 
9768  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9769  rightChild->type = Node::TYPE_FREE;
9770  rightChild->parent = currNode;
9771  rightChild->buddy = leftChild;
9772 
9773  // Convert current currNode to split type.
9774  currNode->type = Node::TYPE_SPLIT;
9775  currNode->split.leftChild = leftChild;
9776 
9777  // Add child nodes to free list. Order is important!
9778  AddToFreeListFront(childrenLevel, rightChild);
9779  AddToFreeListFront(childrenLevel, leftChild);
9780 
9781  ++m_FreeCount;
9782  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9783  ++currLevel;
9784  currNode = m_FreeList[currLevel].front;
9785 
9786  /*
9787  We can be sure that currNode, as left child of node previously split,
9788  also fullfills the alignment requirement.
9789  */
9790  }
9791 
9792  // Remove from free list.
9793  VMA_ASSERT(currLevel == targetLevel &&
9794  currNode != VMA_NULL &&
9795  currNode->type == Node::TYPE_FREE);
9796  RemoveFromFreeList(currLevel, currNode);
9797 
9798  // Convert to allocation node.
9799  currNode->type = Node::TYPE_ALLOCATION;
9800  currNode->allocation.alloc = hAllocation;
9801 
9802  ++m_AllocationCount;
9803  --m_FreeCount;
9804  m_SumFreeSize -= allocSize;
9805 }
9806 
9807 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9808 {
9809  if(node->type == Node::TYPE_SPLIT)
9810  {
9811  DeleteNode(node->split.leftChild->buddy);
9812  DeleteNode(node->split.leftChild);
9813  }
9814 
9815  vma_delete(GetAllocationCallbacks(), node);
9816 }
9817 
9818 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9819 {
9820  VMA_VALIDATE(level < m_LevelCount);
9821  VMA_VALIDATE(curr->parent == parent);
9822  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9823  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9824  switch(curr->type)
9825  {
9826  case Node::TYPE_FREE:
9827  // curr->free.prev, next are validated separately.
9828  ctx.calculatedSumFreeSize += levelNodeSize;
9829  ++ctx.calculatedFreeCount;
9830  break;
9831  case Node::TYPE_ALLOCATION:
9832  ++ctx.calculatedAllocationCount;
9833  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9834  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9835  break;
9836  case Node::TYPE_SPLIT:
9837  {
9838  const uint32_t childrenLevel = level + 1;
9839  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9840  const Node* const leftChild = curr->split.leftChild;
9841  VMA_VALIDATE(leftChild != VMA_NULL);
9842  VMA_VALIDATE(leftChild->offset == curr->offset);
9843  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9844  {
9845  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9846  }
9847  const Node* const rightChild = leftChild->buddy;
9848  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9849  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9850  {
9851  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9852  }
9853  }
9854  break;
9855  default:
9856  return false;
9857  }
9858 
9859  return true;
9860 }
9861 
9862 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9863 {
9864  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9865  uint32_t level = 0;
9866  VkDeviceSize currLevelNodeSize = m_UsableSize;
9867  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9868  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9869  {
9870  ++level;
9871  currLevelNodeSize = nextLevelNodeSize;
9872  nextLevelNodeSize = currLevelNodeSize >> 1;
9873  }
9874  return level;
9875 }
9876 
9877 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9878 {
9879  // Find node and level.
9880  Node* node = m_Root;
9881  VkDeviceSize nodeOffset = 0;
9882  uint32_t level = 0;
9883  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9884  while(node->type == Node::TYPE_SPLIT)
9885  {
9886  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9887  if(offset < nodeOffset + nextLevelSize)
9888  {
9889  node = node->split.leftChild;
9890  }
9891  else
9892  {
9893  node = node->split.leftChild->buddy;
9894  nodeOffset += nextLevelSize;
9895  }
9896  ++level;
9897  levelNodeSize = nextLevelSize;
9898  }
9899 
9900  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9901  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9902 
9903  ++m_FreeCount;
9904  --m_AllocationCount;
9905  m_SumFreeSize += alloc->GetSize();
9906 
9907  node->type = Node::TYPE_FREE;
9908 
9909  // Join free nodes if possible.
9910  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9911  {
9912  RemoveFromFreeList(level, node->buddy);
9913  Node* const parent = node->parent;
9914 
9915  vma_delete(GetAllocationCallbacks(), node->buddy);
9916  vma_delete(GetAllocationCallbacks(), node);
9917  parent->type = Node::TYPE_FREE;
9918 
9919  node = parent;
9920  --level;
9921  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9922  --m_FreeCount;
9923  }
9924 
9925  AddToFreeListFront(level, node);
9926 }
9927 
9928 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9929 {
9930  switch(node->type)
9931  {
9932  case Node::TYPE_FREE:
9933  ++outInfo.unusedRangeCount;
9934  outInfo.unusedBytes += levelNodeSize;
9935  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9936  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9937  break;
9938  case Node::TYPE_ALLOCATION:
9939  {
9940  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9941  ++outInfo.allocationCount;
9942  outInfo.usedBytes += allocSize;
9943  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9944  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9945 
9946  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9947  if(unusedRangeSize > 0)
9948  {
9949  ++outInfo.unusedRangeCount;
9950  outInfo.unusedBytes += unusedRangeSize;
9951  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9952  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9953  }
9954  }
9955  break;
9956  case Node::TYPE_SPLIT:
9957  {
9958  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9959  const Node* const leftChild = node->split.leftChild;
9960  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9961  const Node* const rightChild = leftChild->buddy;
9962  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9963  }
9964  break;
9965  default:
9966  VMA_ASSERT(0);
9967  }
9968 }
9969 
9970 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9971 {
9972  VMA_ASSERT(node->type == Node::TYPE_FREE);
9973 
9974  // List is empty.
9975  Node* const frontNode = m_FreeList[level].front;
9976  if(frontNode == VMA_NULL)
9977  {
9978  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9979  node->free.prev = node->free.next = VMA_NULL;
9980  m_FreeList[level].front = m_FreeList[level].back = node;
9981  }
9982  else
9983  {
9984  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9985  node->free.prev = VMA_NULL;
9986  node->free.next = frontNode;
9987  frontNode->free.prev = node;
9988  m_FreeList[level].front = node;
9989  }
9990 }
9991 
9992 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9993 {
9994  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9995 
9996  // It is at the front.
9997  if(node->free.prev == VMA_NULL)
9998  {
9999  VMA_ASSERT(m_FreeList[level].front == node);
10000  m_FreeList[level].front = node->free.next;
10001  }
10002  else
10003  {
10004  Node* const prevFreeNode = node->free.prev;
10005  VMA_ASSERT(prevFreeNode->free.next == node);
10006  prevFreeNode->free.next = node->free.next;
10007  }
10008 
10009  // It is at the back.
10010  if(node->free.next == VMA_NULL)
10011  {
10012  VMA_ASSERT(m_FreeList[level].back == node);
10013  m_FreeList[level].back = node->free.prev;
10014  }
10015  else
10016  {
10017  Node* const nextFreeNode = node->free.next;
10018  VMA_ASSERT(nextFreeNode->free.prev == node);
10019  nextFreeNode->free.prev = node->free.prev;
10020  }
10021 }
10022 
10023 #if VMA_STATS_STRING_ENABLED
10024 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10025 {
10026  switch(node->type)
10027  {
10028  case Node::TYPE_FREE:
10029  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10030  break;
10031  case Node::TYPE_ALLOCATION:
10032  {
10033  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10034  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10035  if(allocSize < levelNodeSize)
10036  {
10037  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10038  }
10039  }
10040  break;
10041  case Node::TYPE_SPLIT:
10042  {
10043  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10044  const Node* const leftChild = node->split.leftChild;
10045  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10046  const Node* const rightChild = leftChild->buddy;
10047  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10048  }
10049  break;
10050  default:
10051  VMA_ASSERT(0);
10052  }
10053 }
10054 #endif // #if VMA_STATS_STRING_ENABLED
10055 
10056 
10058 // class VmaDeviceMemoryBlock
10059 
10060 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10061  m_pMetadata(VMA_NULL),
10062  m_MemoryTypeIndex(UINT32_MAX),
10063  m_Id(0),
10064  m_hMemory(VK_NULL_HANDLE),
10065  m_MapCount(0),
10066  m_pMappedData(VMA_NULL)
10067 {
10068 }
10069 
10070 void VmaDeviceMemoryBlock::Init(
10071  VmaAllocator hAllocator,
10072  uint32_t newMemoryTypeIndex,
10073  VkDeviceMemory newMemory,
10074  VkDeviceSize newSize,
10075  uint32_t id,
10076  uint32_t algorithm)
10077 {
10078  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10079 
10080  m_MemoryTypeIndex = newMemoryTypeIndex;
10081  m_Id = id;
10082  m_hMemory = newMemory;
10083 
10084  switch(algorithm)
10085  {
10087  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10088  break;
10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10091  break;
10092  default:
10093  VMA_ASSERT(0);
10094  // Fall-through.
10095  case 0:
10096  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10097  }
10098  m_pMetadata->Init(newSize);
10099 }
10100 
10101 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10102 {
10103  // This is the most important assert in the entire library.
10104  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10105  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10106 
10107  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10108  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10109  m_hMemory = VK_NULL_HANDLE;
10110 
10111  vma_delete(allocator, m_pMetadata);
10112  m_pMetadata = VMA_NULL;
10113 }
10114 
10115 bool VmaDeviceMemoryBlock::Validate() const
10116 {
10117  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10118  (m_pMetadata->GetSize() != 0));
10119 
10120  return m_pMetadata->Validate();
10121 }
10122 
10123 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10124 {
10125  void* pData = nullptr;
10126  VkResult res = Map(hAllocator, 1, &pData);
10127  if(res != VK_SUCCESS)
10128  {
10129  return res;
10130  }
10131 
10132  res = m_pMetadata->CheckCorruption(pData);
10133 
10134  Unmap(hAllocator, 1);
10135 
10136  return res;
10137 }
10138 
10139 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10140 {
10141  if(count == 0)
10142  {
10143  return VK_SUCCESS;
10144  }
10145 
10146  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10147  if(m_MapCount != 0)
10148  {
10149  m_MapCount += count;
10150  VMA_ASSERT(m_pMappedData != VMA_NULL);
10151  if(ppData != VMA_NULL)
10152  {
10153  *ppData = m_pMappedData;
10154  }
10155  return VK_SUCCESS;
10156  }
10157  else
10158  {
10159  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10160  hAllocator->m_hDevice,
10161  m_hMemory,
10162  0, // offset
10163  VK_WHOLE_SIZE,
10164  0, // flags
10165  &m_pMappedData);
10166  if(result == VK_SUCCESS)
10167  {
10168  if(ppData != VMA_NULL)
10169  {
10170  *ppData = m_pMappedData;
10171  }
10172  m_MapCount = count;
10173  }
10174  return result;
10175  }
10176 }
10177 
10178 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10179 {
10180  if(count == 0)
10181  {
10182  return;
10183  }
10184 
10185  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10186  if(m_MapCount >= count)
10187  {
10188  m_MapCount -= count;
10189  if(m_MapCount == 0)
10190  {
10191  m_pMappedData = VMA_NULL;
10192  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10193  }
10194  }
10195  else
10196  {
10197  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10198  }
10199 }
10200 
10201 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10202 {
10203  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10204  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10205 
10206  void* pData;
10207  VkResult res = Map(hAllocator, 1, &pData);
10208  if(res != VK_SUCCESS)
10209  {
10210  return res;
10211  }
10212 
10213  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10214  VmaWriteMagicValue(pData, allocOffset + allocSize);
10215 
10216  Unmap(hAllocator, 1);
10217 
10218  return VK_SUCCESS;
10219 }
10220 
10221 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10222 {
10223  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10224  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10225 
10226  void* pData;
10227  VkResult res = Map(hAllocator, 1, &pData);
10228  if(res != VK_SUCCESS)
10229  {
10230  return res;
10231  }
10232 
10233  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10234  {
10235  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10236  }
10237  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10238  {
10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10240  }
10241 
10242  Unmap(hAllocator, 1);
10243 
10244  return VK_SUCCESS;
10245 }
10246 
10247 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10248  const VmaAllocator hAllocator,
10249  const VmaAllocation hAllocation,
10250  VkBuffer hBuffer)
10251 {
10252  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10253  hAllocation->GetBlock() == this);
10254  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10255  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10256  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10257  hAllocator->m_hDevice,
10258  hBuffer,
10259  m_hMemory,
10260  hAllocation->GetOffset());
10261 }
10262 
10263 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10264  const VmaAllocator hAllocator,
10265  const VmaAllocation hAllocation,
10266  VkImage hImage)
10267 {
10268  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10269  hAllocation->GetBlock() == this);
10270  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10271  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10272  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10273  hAllocator->m_hDevice,
10274  hImage,
10275  m_hMemory,
10276  hAllocation->GetOffset());
10277 }
10278 
10279 static void InitStatInfo(VmaStatInfo& outInfo)
10280 {
10281  memset(&outInfo, 0, sizeof(outInfo));
10282  outInfo.allocationSizeMin = UINT64_MAX;
10283  outInfo.unusedRangeSizeMin = UINT64_MAX;
10284 }
10285 
10286 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10287 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10288 {
10289  inoutInfo.blockCount += srcInfo.blockCount;
10290  inoutInfo.allocationCount += srcInfo.allocationCount;
10291  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10292  inoutInfo.usedBytes += srcInfo.usedBytes;
10293  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10294  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10295  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10296  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10297  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10298 }
10299 
10300 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10301 {
10302  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10304  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10305  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10306 }
10307 
10308 VmaPool_T::VmaPool_T(
10309  VmaAllocator hAllocator,
10310  const VmaPoolCreateInfo& createInfo,
10311  VkDeviceSize preferredBlockSize) :
10312  m_BlockVector(
10313  hAllocator,
10314  createInfo.memoryTypeIndex,
10315  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10316  createInfo.minBlockCount,
10317  createInfo.maxBlockCount,
10318  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10319  createInfo.frameInUseCount,
10320  true, // isCustomPool
10321  createInfo.blockSize != 0, // explicitBlockSize
10322  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10323  m_Id(0)
10324 {
10325 }
10326 
10327 VmaPool_T::~VmaPool_T()
10328 {
10329 }
10330 
10331 #if VMA_STATS_STRING_ENABLED
10332 
10333 #endif // #if VMA_STATS_STRING_ENABLED
10334 
10335 VmaBlockVector::VmaBlockVector(
10336  VmaAllocator hAllocator,
10337  uint32_t memoryTypeIndex,
10338  VkDeviceSize preferredBlockSize,
10339  size_t minBlockCount,
10340  size_t maxBlockCount,
10341  VkDeviceSize bufferImageGranularity,
10342  uint32_t frameInUseCount,
10343  bool isCustomPool,
10344  bool explicitBlockSize,
10345  uint32_t algorithm) :
10346  m_hAllocator(hAllocator),
10347  m_MemoryTypeIndex(memoryTypeIndex),
10348  m_PreferredBlockSize(preferredBlockSize),
10349  m_MinBlockCount(minBlockCount),
10350  m_MaxBlockCount(maxBlockCount),
10351  m_BufferImageGranularity(bufferImageGranularity),
10352  m_FrameInUseCount(frameInUseCount),
10353  m_IsCustomPool(isCustomPool),
10354  m_ExplicitBlockSize(explicitBlockSize),
10355  m_Algorithm(algorithm),
10356  m_HasEmptyBlock(false),
10357  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10358  m_pDefragmentator(VMA_NULL),
10359  m_NextBlockId(0)
10360 {
10361 }
10362 
10363 VmaBlockVector::~VmaBlockVector()
10364 {
10365  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10366 
10367  for(size_t i = m_Blocks.size(); i--; )
10368  {
10369  m_Blocks[i]->Destroy(m_hAllocator);
10370  vma_delete(m_hAllocator, m_Blocks[i]);
10371  }
10372 }
10373 
10374 VkResult VmaBlockVector::CreateMinBlocks()
10375 {
10376  for(size_t i = 0; i < m_MinBlockCount; ++i)
10377  {
10378  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10379  if(res != VK_SUCCESS)
10380  {
10381  return res;
10382  }
10383  }
10384  return VK_SUCCESS;
10385 }
10386 
10387 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10388 {
10389  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10390 
10391  const size_t blockCount = m_Blocks.size();
10392 
10393  pStats->size = 0;
10394  pStats->unusedSize = 0;
10395  pStats->allocationCount = 0;
10396  pStats->unusedRangeCount = 0;
10397  pStats->unusedRangeSizeMax = 0;
10398  pStats->blockCount = blockCount;
10399 
10400  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10401  {
10402  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10403  VMA_ASSERT(pBlock);
10404  VMA_HEAVY_ASSERT(pBlock->Validate());
10405  pBlock->m_pMetadata->AddPoolStats(*pStats);
10406  }
10407 }
10408 
10409 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10410 {
10411  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10412  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10413  (VMA_DEBUG_MARGIN > 0) &&
10414  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10415 }
10416 
10417 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10418 
10419 VkResult VmaBlockVector::Allocate(
10420  VmaPool hCurrentPool,
10421  uint32_t currentFrameIndex,
10422  VkDeviceSize size,
10423  VkDeviceSize alignment,
10424  const VmaAllocationCreateInfo& createInfo,
10425  VmaSuballocationType suballocType,
10426  VmaAllocation* pAllocation)
10427 {
10428  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10429  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10430  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10431  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10432  const bool canCreateNewBlock =
10433  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10434  (m_Blocks.size() < m_MaxBlockCount);
10435  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10436 
10437  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10438  // Which in turn is available only when maxBlockCount = 1.
10439  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10440  {
10441  canMakeOtherLost = false;
10442  }
10443 
10444  // Upper address can only be used with linear allocator and within single memory block.
10445  if(isUpperAddress &&
10446  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10447  {
10448  return VK_ERROR_FEATURE_NOT_PRESENT;
10449  }
10450 
10451  // Validate strategy.
10452  switch(strategy)
10453  {
10454  case 0:
10456  break;
10460  break;
10461  default:
10462  return VK_ERROR_FEATURE_NOT_PRESENT;
10463  }
10464 
10465  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10466  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10467  {
10468  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10469  }
10470 
10471  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10472 
10473  /*
10474  Under certain condition, this whole section can be skipped for optimization, so
10475  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10476  e.g. for custom pools with linear algorithm.
10477  */
10478  if(!canMakeOtherLost || canCreateNewBlock)
10479  {
10480  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10481  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10483 
10484  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10485  {
10486  // Use only last block.
10487  if(!m_Blocks.empty())
10488  {
10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10490  VMA_ASSERT(pCurrBlock);
10491  VkResult res = AllocateFromBlock(
10492  pCurrBlock,
10493  hCurrentPool,
10494  currentFrameIndex,
10495  size,
10496  alignment,
10497  allocFlagsCopy,
10498  createInfo.pUserData,
10499  suballocType,
10500  strategy,
10501  pAllocation);
10502  if(res == VK_SUCCESS)
10503  {
10504  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10505  return VK_SUCCESS;
10506  }
10507  }
10508  }
10509  else
10510  {
10512  {
10513  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10514  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10515  {
10516  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10517  VMA_ASSERT(pCurrBlock);
10518  VkResult res = AllocateFromBlock(
10519  pCurrBlock,
10520  hCurrentPool,
10521  currentFrameIndex,
10522  size,
10523  alignment,
10524  allocFlagsCopy,
10525  createInfo.pUserData,
10526  suballocType,
10527  strategy,
10528  pAllocation);
10529  if(res == VK_SUCCESS)
10530  {
10531  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10532  return VK_SUCCESS;
10533  }
10534  }
10535  }
10536  else // WORST_FIT, FIRST_FIT
10537  {
10538  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10539  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10540  {
10541  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10542  VMA_ASSERT(pCurrBlock);
10543  VkResult res = AllocateFromBlock(
10544  pCurrBlock,
10545  hCurrentPool,
10546  currentFrameIndex,
10547  size,
10548  alignment,
10549  allocFlagsCopy,
10550  createInfo.pUserData,
10551  suballocType,
10552  strategy,
10553  pAllocation);
10554  if(res == VK_SUCCESS)
10555  {
10556  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10557  return VK_SUCCESS;
10558  }
10559  }
10560  }
10561  }
10562 
10563  // 2. Try to create new block.
10564  if(canCreateNewBlock)
10565  {
10566  // Calculate optimal size for new block.
10567  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10568  uint32_t newBlockSizeShift = 0;
10569  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10570 
10571  if(!m_ExplicitBlockSize)
10572  {
10573  // Allocate 1/8, 1/4, 1/2 as first blocks.
10574  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10575  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10576  {
10577  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10578  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10579  {
10580  newBlockSize = smallerNewBlockSize;
10581  ++newBlockSizeShift;
10582  }
10583  else
10584  {
10585  break;
10586  }
10587  }
10588  }
10589 
10590  size_t newBlockIndex = 0;
10591  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10592  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10593  if(!m_ExplicitBlockSize)
10594  {
10595  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10596  {
10597  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10598  if(smallerNewBlockSize >= size)
10599  {
10600  newBlockSize = smallerNewBlockSize;
10601  ++newBlockSizeShift;
10602  res = CreateBlock(newBlockSize, &newBlockIndex);
10603  }
10604  else
10605  {
10606  break;
10607  }
10608  }
10609  }
10610 
10611  if(res == VK_SUCCESS)
10612  {
10613  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10614  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10615 
10616  res = AllocateFromBlock(
10617  pBlock,
10618  hCurrentPool,
10619  currentFrameIndex,
10620  size,
10621  alignment,
10622  allocFlagsCopy,
10623  createInfo.pUserData,
10624  suballocType,
10625  strategy,
10626  pAllocation);
10627  if(res == VK_SUCCESS)
10628  {
10629  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10630  return VK_SUCCESS;
10631  }
10632  else
10633  {
10634  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10635  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10636  }
10637  }
10638  }
10639  }
10640 
10641  // 3. Try to allocate from existing blocks with making other allocations lost.
10642  if(canMakeOtherLost)
10643  {
10644  uint32_t tryIndex = 0;
10645  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10646  {
10647  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10648  VmaAllocationRequest bestRequest = {};
10649  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10650 
10651  // 1. Search existing allocations.
10653  {
10654  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10655  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10656  {
10657  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10658  VMA_ASSERT(pCurrBlock);
10659  VmaAllocationRequest currRequest = {};
10660  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10661  currentFrameIndex,
10662  m_FrameInUseCount,
10663  m_BufferImageGranularity,
10664  size,
10665  alignment,
10666  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10667  suballocType,
10668  canMakeOtherLost,
10669  strategy,
10670  &currRequest))
10671  {
10672  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10673  if(pBestRequestBlock == VMA_NULL ||
10674  currRequestCost < bestRequestCost)
10675  {
10676  pBestRequestBlock = pCurrBlock;
10677  bestRequest = currRequest;
10678  bestRequestCost = currRequestCost;
10679 
10680  if(bestRequestCost == 0)
10681  {
10682  break;
10683  }
10684  }
10685  }
10686  }
10687  }
10688  else // WORST_FIT, FIRST_FIT
10689  {
10690  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10691  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10692  {
10693  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10694  VMA_ASSERT(pCurrBlock);
10695  VmaAllocationRequest currRequest = {};
10696  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10697  currentFrameIndex,
10698  m_FrameInUseCount,
10699  m_BufferImageGranularity,
10700  size,
10701  alignment,
10702  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10703  suballocType,
10704  canMakeOtherLost,
10705  strategy,
10706  &currRequest))
10707  {
10708  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10709  if(pBestRequestBlock == VMA_NULL ||
10710  currRequestCost < bestRequestCost ||
10712  {
10713  pBestRequestBlock = pCurrBlock;
10714  bestRequest = currRequest;
10715  bestRequestCost = currRequestCost;
10716 
10717  if(bestRequestCost == 0 ||
10719  {
10720  break;
10721  }
10722  }
10723  }
10724  }
10725  }
10726 
10727  if(pBestRequestBlock != VMA_NULL)
10728  {
10729  if(mapped)
10730  {
10731  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10732  if(res != VK_SUCCESS)
10733  {
10734  return res;
10735  }
10736  }
10737 
10738  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10739  currentFrameIndex,
10740  m_FrameInUseCount,
10741  &bestRequest))
10742  {
10743  // We no longer have an empty Allocation.
10744  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10745  {
10746  m_HasEmptyBlock = false;
10747  }
10748  // Allocate from this pBlock.
10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10750  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10751  (*pAllocation)->InitBlockAllocation(
10752  hCurrentPool,
10753  pBestRequestBlock,
10754  bestRequest.offset,
10755  alignment,
10756  size,
10757  suballocType,
10758  mapped,
10759  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10760  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10761  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10762  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10763  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10764  {
10765  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10766  }
10767  if(IsCorruptionDetectionEnabled())
10768  {
10769  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10770  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10771  }
10772  return VK_SUCCESS;
10773  }
10774  // else: Some allocations must have been touched while we are here. Next try.
10775  }
10776  else
10777  {
10778  // Could not find place in any of the blocks - break outer loop.
10779  break;
10780  }
10781  }
10782  /* Maximum number of tries exceeded - a very unlike event when many other
10783  threads are simultaneously touching allocations making it impossible to make
10784  lost at the same time as we try to allocate. */
10785  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10786  {
10787  return VK_ERROR_TOO_MANY_OBJECTS;
10788  }
10789  }
10790 
10791  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10792 }
10793 
10794 void VmaBlockVector::Free(
10795  VmaAllocation hAllocation)
10796 {
10797  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10798 
10799  // Scope for lock.
10800  {
10801  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10802 
10803  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10804 
10805  if(IsCorruptionDetectionEnabled())
10806  {
10807  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10808  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10809  }
10810 
10811  if(hAllocation->IsPersistentMap())
10812  {
10813  pBlock->Unmap(m_hAllocator, 1);
10814  }
10815 
10816  pBlock->m_pMetadata->Free(hAllocation);
10817  VMA_HEAVY_ASSERT(pBlock->Validate());
10818 
10819  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10820 
10821  // pBlock became empty after this deallocation.
10822  if(pBlock->m_pMetadata->IsEmpty())
10823  {
10824  // Already has empty Allocation. We don't want to have two, so delete this one.
10825  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10826  {
10827  pBlockToDelete = pBlock;
10828  Remove(pBlock);
10829  }
10830  // We now have first empty block.
10831  else
10832  {
10833  m_HasEmptyBlock = true;
10834  }
10835  }
10836  // pBlock didn't become empty, but we have another empty block - find and free that one.
10837  // (This is optional, heuristics.)
10838  else if(m_HasEmptyBlock)
10839  {
10840  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10841  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10842  {
10843  pBlockToDelete = pLastBlock;
10844  m_Blocks.pop_back();
10845  m_HasEmptyBlock = false;
10846  }
10847  }
10848 
10849  IncrementallySortBlocks();
10850  }
10851 
10852  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10853  // lock, for performance reason.
10854  if(pBlockToDelete != VMA_NULL)
10855  {
10856  VMA_DEBUG_LOG(" Deleted empty allocation");
10857  pBlockToDelete->Destroy(m_hAllocator);
10858  vma_delete(m_hAllocator, pBlockToDelete);
10859  }
10860 }
10861 
10862 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10863 {
10864  VkDeviceSize result = 0;
10865  for(size_t i = m_Blocks.size(); i--; )
10866  {
10867  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10868  if(result >= m_PreferredBlockSize)
10869  {
10870  break;
10871  }
10872  }
10873  return result;
10874 }
10875 
10876 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10877 {
10878  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10879  {
10880  if(m_Blocks[blockIndex] == pBlock)
10881  {
10882  VmaVectorRemove(m_Blocks, blockIndex);
10883  return;
10884  }
10885  }
10886  VMA_ASSERT(0);
10887 }
10888 
10889 void VmaBlockVector::IncrementallySortBlocks()
10890 {
10891  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10892  {
10893  // Bubble sort only until first swap.
10894  for(size_t i = 1; i < m_Blocks.size(); ++i)
10895  {
10896  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10897  {
10898  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10899  return;
10900  }
10901  }
10902  }
10903 }
10904 
10905 VkResult VmaBlockVector::AllocateFromBlock(
10906  VmaDeviceMemoryBlock* pBlock,
10907  VmaPool hCurrentPool,
10908  uint32_t currentFrameIndex,
10909  VkDeviceSize size,
10910  VkDeviceSize alignment,
10911  VmaAllocationCreateFlags allocFlags,
10912  void* pUserData,
10913  VmaSuballocationType suballocType,
10914  uint32_t strategy,
10915  VmaAllocation* pAllocation)
10916 {
10917  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10918  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10919  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10920  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10921 
10922  VmaAllocationRequest currRequest = {};
10923  if(pBlock->m_pMetadata->CreateAllocationRequest(
10924  currentFrameIndex,
10925  m_FrameInUseCount,
10926  m_BufferImageGranularity,
10927  size,
10928  alignment,
10929  isUpperAddress,
10930  suballocType,
10931  false, // canMakeOtherLost
10932  strategy,
10933  &currRequest))
10934  {
10935  // Allocate from pCurrBlock.
10936  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10937 
10938  if(mapped)
10939  {
10940  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10941  if(res != VK_SUCCESS)
10942  {
10943  return res;
10944  }
10945  }
10946 
10947  // We no longer have an empty Allocation.
10948  if(pBlock->m_pMetadata->IsEmpty())
10949  {
10950  m_HasEmptyBlock = false;
10951  }
10952 
10953  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10954  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10955  (*pAllocation)->InitBlockAllocation(
10956  hCurrentPool,
10957  pBlock,
10958  currRequest.offset,
10959  alignment,
10960  size,
10961  suballocType,
10962  mapped,
10963  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10964  VMA_HEAVY_ASSERT(pBlock->Validate());
10965  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10966  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10967  {
10968  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10969  }
10970  if(IsCorruptionDetectionEnabled())
10971  {
10972  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10973  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10974  }
10975  return VK_SUCCESS;
10976  }
10977  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10978 }
10979 
10980 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10981 {
10982  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10983  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10984  allocInfo.allocationSize = blockSize;
10985  VkDeviceMemory mem = VK_NULL_HANDLE;
10986  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10987  if(res < 0)
10988  {
10989  return res;
10990  }
10991 
10992  // New VkDeviceMemory successfully created.
10993 
10994  // Create new Allocation for it.
10995  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10996  pBlock->Init(
10997  m_hAllocator,
10998  m_MemoryTypeIndex,
10999  mem,
11000  allocInfo.allocationSize,
11001  m_NextBlockId++,
11002  m_Algorithm);
11003 
11004  m_Blocks.push_back(pBlock);
11005  if(pNewBlockIndex != VMA_NULL)
11006  {
11007  *pNewBlockIndex = m_Blocks.size() - 1;
11008  }
11009 
11010  return VK_SUCCESS;
11011 }
11012 
11013 #if VMA_STATS_STRING_ENABLED
11014 
11015 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11016 {
11017  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11018 
11019  json.BeginObject();
11020 
11021  if(m_IsCustomPool)
11022  {
11023  json.WriteString("MemoryTypeIndex");
11024  json.WriteNumber(m_MemoryTypeIndex);
11025 
11026  json.WriteString("BlockSize");
11027  json.WriteNumber(m_PreferredBlockSize);
11028 
11029  json.WriteString("BlockCount");
11030  json.BeginObject(true);
11031  if(m_MinBlockCount > 0)
11032  {
11033  json.WriteString("Min");
11034  json.WriteNumber((uint64_t)m_MinBlockCount);
11035  }
11036  if(m_MaxBlockCount < SIZE_MAX)
11037  {
11038  json.WriteString("Max");
11039  json.WriteNumber((uint64_t)m_MaxBlockCount);
11040  }
11041  json.WriteString("Cur");
11042  json.WriteNumber((uint64_t)m_Blocks.size());
11043  json.EndObject();
11044 
11045  if(m_FrameInUseCount > 0)
11046  {
11047  json.WriteString("FrameInUseCount");
11048  json.WriteNumber(m_FrameInUseCount);
11049  }
11050 
11051  if(m_Algorithm != 0)
11052  {
11053  json.WriteString("Algorithm");
11054  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
11055  }
11056  }
11057  else
11058  {
11059  json.WriteString("PreferredBlockSize");
11060  json.WriteNumber(m_PreferredBlockSize);
11061  }
11062 
11063  json.WriteString("Blocks");
11064  json.BeginObject();
11065  for(size_t i = 0; i < m_Blocks.size(); ++i)
11066  {
11067  json.BeginString();
11068  json.ContinueString(m_Blocks[i]->GetId());
11069  json.EndString();
11070 
11071  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11072  }
11073  json.EndObject();
11074 
11075  json.EndObject();
11076 }
11077 
11078 #endif // #if VMA_STATS_STRING_ENABLED
11079 
11080 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11081  VmaAllocator hAllocator,
11082  uint32_t currentFrameIndex)
11083 {
11084  if(m_pDefragmentator == VMA_NULL)
11085  {
11086  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11087  hAllocator,
11088  this,
11089  currentFrameIndex);
11090  }
11091 
11092  return m_pDefragmentator;
11093 }
11094 
11095 VkResult VmaBlockVector::Defragment(
11096  VmaDefragmentationStats* pDefragmentationStats,
11097  VkDeviceSize& maxBytesToMove,
11098  uint32_t& maxAllocationsToMove)
11099 {
11100  if(m_pDefragmentator == VMA_NULL)
11101  {
11102  return VK_SUCCESS;
11103  }
11104 
11105  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11106 
11107  // Defragment.
11108  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11109 
11110  // Accumulate statistics.
11111  if(pDefragmentationStats != VMA_NULL)
11112  {
11113  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11114  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11115  pDefragmentationStats->bytesMoved += bytesMoved;
11116  pDefragmentationStats->allocationsMoved += allocationsMoved;
11117  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11118  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11119  maxBytesToMove -= bytesMoved;
11120  maxAllocationsToMove -= allocationsMoved;
11121  }
11122 
11123  // Free empty blocks.
11124  m_HasEmptyBlock = false;
11125  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11126  {
11127  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11128  if(pBlock->m_pMetadata->IsEmpty())
11129  {
11130  if(m_Blocks.size() > m_MinBlockCount)
11131  {
11132  if(pDefragmentationStats != VMA_NULL)
11133  {
11134  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11135  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11136  }
11137 
11138  VmaVectorRemove(m_Blocks, blockIndex);
11139  pBlock->Destroy(m_hAllocator);
11140  vma_delete(m_hAllocator, pBlock);
11141  }
11142  else
11143  {
11144  m_HasEmptyBlock = true;
11145  }
11146  }
11147  }
11148 
11149  return result;
11150 }
11151 
11152 void VmaBlockVector::DestroyDefragmentator()
11153 {
11154  if(m_pDefragmentator != VMA_NULL)
11155  {
11156  vma_delete(m_hAllocator, m_pDefragmentator);
11157  m_pDefragmentator = VMA_NULL;
11158  }
11159 }
11160 
11161 void VmaBlockVector::MakePoolAllocationsLost(
11162  uint32_t currentFrameIndex,
11163  size_t* pLostAllocationCount)
11164 {
11165  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11166  size_t lostAllocationCount = 0;
11167  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11168  {
11169  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11170  VMA_ASSERT(pBlock);
11171  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11172  }
11173  if(pLostAllocationCount != VMA_NULL)
11174  {
11175  *pLostAllocationCount = lostAllocationCount;
11176  }
11177 }
11178 
11179 VkResult VmaBlockVector::CheckCorruption()
11180 {
11181  if(!IsCorruptionDetectionEnabled())
11182  {
11183  return VK_ERROR_FEATURE_NOT_PRESENT;
11184  }
11185 
11186  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11187  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11188  {
11189  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11190  VMA_ASSERT(pBlock);
11191  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11192  if(res != VK_SUCCESS)
11193  {
11194  return res;
11195  }
11196  }
11197  return VK_SUCCESS;
11198 }
11199 
11200 void VmaBlockVector::AddStats(VmaStats* pStats)
11201 {
11202  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11203  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11204 
11205  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11206 
11207  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11208  {
11209  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11210  VMA_ASSERT(pBlock);
11211  VMA_HEAVY_ASSERT(pBlock->Validate());
11212  VmaStatInfo allocationStatInfo;
11213  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11214  VmaAddStatInfo(pStats->total, allocationStatInfo);
11215  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11216  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11217  }
11218 }
11219 
11221 // VmaDefragmentator members definition
11222 
11223 VmaDefragmentator::VmaDefragmentator(
11224  VmaAllocator hAllocator,
11225  VmaBlockVector* pBlockVector,
11226  uint32_t currentFrameIndex) :
11227  m_hAllocator(hAllocator),
11228  m_pBlockVector(pBlockVector),
11229  m_CurrentFrameIndex(currentFrameIndex),
11230  m_BytesMoved(0),
11231  m_AllocationsMoved(0),
11232  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11233  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11234 {
11235  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11236 }
11237 
11238 VmaDefragmentator::~VmaDefragmentator()
11239 {
11240  for(size_t i = m_Blocks.size(); i--; )
11241  {
11242  vma_delete(m_hAllocator, m_Blocks[i]);
11243  }
11244 }
11245 
11246 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11247 {
11248  AllocationInfo allocInfo;
11249  allocInfo.m_hAllocation = hAlloc;
11250  allocInfo.m_pChanged = pChanged;
11251  m_Allocations.push_back(allocInfo);
11252 }
11253 
11254 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11255 {
11256  // It has already been mapped for defragmentation.
11257  if(m_pMappedDataForDefragmentation)
11258  {
11259  *ppMappedData = m_pMappedDataForDefragmentation;
11260  return VK_SUCCESS;
11261  }
11262 
11263  // It is originally mapped.
11264  if(m_pBlock->GetMappedData())
11265  {
11266  *ppMappedData = m_pBlock->GetMappedData();
11267  return VK_SUCCESS;
11268  }
11269 
11270  // Map on first usage.
11271  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11272  *ppMappedData = m_pMappedDataForDefragmentation;
11273  return res;
11274 }
11275 
11276 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11277 {
11278  if(m_pMappedDataForDefragmentation != VMA_NULL)
11279  {
11280  m_pBlock->Unmap(hAllocator, 1);
11281  }
11282 }
11283 
11284 VkResult VmaDefragmentator::DefragmentRound(
11285  VkDeviceSize maxBytesToMove,
11286  uint32_t maxAllocationsToMove)
11287 {
11288  if(m_Blocks.empty())
11289  {
11290  return VK_SUCCESS;
11291  }
11292 
11293  size_t srcBlockIndex = m_Blocks.size() - 1;
11294  size_t srcAllocIndex = SIZE_MAX;
11295  for(;;)
11296  {
11297  // 1. Find next allocation to move.
11298  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11299  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11300  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11301  {
11302  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11303  {
11304  // Finished: no more allocations to process.
11305  if(srcBlockIndex == 0)
11306  {
11307  return VK_SUCCESS;
11308  }
11309  else
11310  {
11311  --srcBlockIndex;
11312  srcAllocIndex = SIZE_MAX;
11313  }
11314  }
11315  else
11316  {
11317  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11318  }
11319  }
11320 
11321  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11322  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11323 
11324  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11325  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11326  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11327  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11328 
11329  // 2. Try to find new place for this allocation in preceding or current block.
11330  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11331  {
11332  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11333  VmaAllocationRequest dstAllocRequest;
11334  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11335  m_CurrentFrameIndex,
11336  m_pBlockVector->GetFrameInUseCount(),
11337  m_pBlockVector->GetBufferImageGranularity(),
11338  size,
11339  alignment,
11340  false, // upperAddress
11341  suballocType,
11342  false, // canMakeOtherLost
11344  &dstAllocRequest) &&
11345  MoveMakesSense(
11346  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11347  {
11348  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11349 
11350  // Reached limit on number of allocations or bytes to move.
11351  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11352  (m_BytesMoved + size > maxBytesToMove))
11353  {
11354  return VK_INCOMPLETE;
11355  }
11356 
11357  void* pDstMappedData = VMA_NULL;
11358  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11359  if(res != VK_SUCCESS)
11360  {
11361  return res;
11362  }
11363 
11364  void* pSrcMappedData = VMA_NULL;
11365  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11366  if(res != VK_SUCCESS)
11367  {
11368  return res;
11369  }
11370 
11371  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11372  memcpy(
11373  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11374  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11375  static_cast<size_t>(size));
11376 
11377  if(VMA_DEBUG_MARGIN > 0)
11378  {
11379  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11380  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11381  }
11382 
11383  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11384  dstAllocRequest,
11385  suballocType,
11386  size,
11387  false, // upperAddress
11388  allocInfo.m_hAllocation);
11389  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11390 
11391  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11392 
11393  if(allocInfo.m_pChanged != VMA_NULL)
11394  {
11395  *allocInfo.m_pChanged = VK_TRUE;
11396  }
11397 
11398  ++m_AllocationsMoved;
11399  m_BytesMoved += size;
11400 
11401  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11402 
11403  break;
11404  }
11405  }
11406 
11407  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11408 
11409  if(srcAllocIndex > 0)
11410  {
11411  --srcAllocIndex;
11412  }
11413  else
11414  {
11415  if(srcBlockIndex > 0)
11416  {
11417  --srcBlockIndex;
11418  srcAllocIndex = SIZE_MAX;
11419  }
11420  else
11421  {
11422  return VK_SUCCESS;
11423  }
11424  }
11425  }
11426 }
11427 
11428 VkResult VmaDefragmentator::Defragment(
11429  VkDeviceSize maxBytesToMove,
11430  uint32_t maxAllocationsToMove)
11431 {
11432  if(m_Allocations.empty())
11433  {
11434  return VK_SUCCESS;
11435  }
11436 
11437  // Create block info for each block.
11438  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11439  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11440  {
11441  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11442  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11443  m_Blocks.push_back(pBlockInfo);
11444  }
11445 
11446  // Sort them by m_pBlock pointer value.
11447  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11448 
11449  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11450  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11451  {
11452  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11453  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11454  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11455  {
11456  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11457  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11458  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11459  {
11460  (*it)->m_Allocations.push_back(allocInfo);
11461  }
11462  else
11463  {
11464  VMA_ASSERT(0);
11465  }
11466  }
11467  }
11468  m_Allocations.clear();
11469 
11470  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11471  {
11472  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11473  pBlockInfo->CalcHasNonMovableAllocations();
11474  pBlockInfo->SortAllocationsBySizeDescecnding();
11475  }
11476 
11477  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11478  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11479 
11480  // Execute defragmentation rounds (the main part).
11481  VkResult result = VK_SUCCESS;
11482  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11483  {
11484  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11485  }
11486 
11487  // Unmap blocks that were mapped for defragmentation.
11488  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11489  {
11490  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11491  }
11492 
11493  return result;
11494 }
11495 
11496 bool VmaDefragmentator::MoveMakesSense(
11497  size_t dstBlockIndex, VkDeviceSize dstOffset,
11498  size_t srcBlockIndex, VkDeviceSize srcOffset)
11499 {
11500  if(dstBlockIndex < srcBlockIndex)
11501  {
11502  return true;
11503  }
11504  if(dstBlockIndex > srcBlockIndex)
11505  {
11506  return false;
11507  }
11508  if(dstOffset < srcOffset)
11509  {
11510  return true;
11511  }
11512  return false;
11513 }
11514 
11516 // VmaRecorder
11517 
11518 #if VMA_RECORDING_ENABLED
11519 
11520 VmaRecorder::VmaRecorder() :
11521  m_UseMutex(true),
11522  m_Flags(0),
11523  m_File(VMA_NULL),
11524  m_Freq(INT64_MAX),
11525  m_StartCounter(INT64_MAX)
11526 {
11527 }
11528 
11529 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11530 {
11531  m_UseMutex = useMutex;
11532  m_Flags = settings.flags;
11533 
11534  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11535  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11536 
11537  // Open file for writing.
11538  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11539  if(err != 0)
11540  {
11541  return VK_ERROR_INITIALIZATION_FAILED;
11542  }
11543 
11544  // Write header.
11545  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11546  fprintf(m_File, "%s\n", "1,4");
11547 
11548  return VK_SUCCESS;
11549 }
11550 
11551 VmaRecorder::~VmaRecorder()
11552 {
11553  if(m_File != VMA_NULL)
11554  {
11555  fclose(m_File);
11556  }
11557 }
11558 
11559 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11560 {
11561  CallParams callParams;
11562  GetBasicParams(callParams);
11563 
11564  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11565  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11566  Flush();
11567 }
11568 
11569 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11570 {
11571  CallParams callParams;
11572  GetBasicParams(callParams);
11573 
11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11575  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11576  Flush();
11577 }
11578 
11579 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11580 {
11581  CallParams callParams;
11582  GetBasicParams(callParams);
11583 
11584  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11585  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11586  createInfo.memoryTypeIndex,
11587  createInfo.flags,
11588  createInfo.blockSize,
11589  (uint64_t)createInfo.minBlockCount,
11590  (uint64_t)createInfo.maxBlockCount,
11591  createInfo.frameInUseCount,
11592  pool);
11593  Flush();
11594 }
11595 
11596 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11597 {
11598  CallParams callParams;
11599  GetBasicParams(callParams);
11600 
11601  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11602  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11603  pool);
11604  Flush();
11605 }
11606 
11607 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11608  const VkMemoryRequirements& vkMemReq,
11609  const VmaAllocationCreateInfo& createInfo,
11610  VmaAllocation allocation)
11611 {
11612  CallParams callParams;
11613  GetBasicParams(callParams);
11614 
11615  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11616  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11617  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11618  vkMemReq.size,
11619  vkMemReq.alignment,
11620  vkMemReq.memoryTypeBits,
11621  createInfo.flags,
11622  createInfo.usage,
11623  createInfo.requiredFlags,
11624  createInfo.preferredFlags,
11625  createInfo.memoryTypeBits,
11626  createInfo.pool,
11627  allocation,
11628  userDataStr.GetString());
11629  Flush();
11630 }
11631 
11632 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11633  const VkMemoryRequirements& vkMemReq,
11634  bool requiresDedicatedAllocation,
11635  bool prefersDedicatedAllocation,
11636  const VmaAllocationCreateInfo& createInfo,
11637  VmaAllocation allocation)
11638 {
11639  CallParams callParams;
11640  GetBasicParams(callParams);
11641 
11642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11643  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11644  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11645  vkMemReq.size,
11646  vkMemReq.alignment,
11647  vkMemReq.memoryTypeBits,
11648  requiresDedicatedAllocation ? 1 : 0,
11649  prefersDedicatedAllocation ? 1 : 0,
11650  createInfo.flags,
11651  createInfo.usage,
11652  createInfo.requiredFlags,
11653  createInfo.preferredFlags,
11654  createInfo.memoryTypeBits,
11655  createInfo.pool,
11656  allocation,
11657  userDataStr.GetString());
11658  Flush();
11659 }
11660 
11661 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11662  const VkMemoryRequirements& vkMemReq,
11663  bool requiresDedicatedAllocation,
11664  bool prefersDedicatedAllocation,
11665  const VmaAllocationCreateInfo& createInfo,
11666  VmaAllocation allocation)
11667 {
11668  CallParams callParams;
11669  GetBasicParams(callParams);
11670 
11671  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11672  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11673  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11674  vkMemReq.size,
11675  vkMemReq.alignment,
11676  vkMemReq.memoryTypeBits,
11677  requiresDedicatedAllocation ? 1 : 0,
11678  prefersDedicatedAllocation ? 1 : 0,
11679  createInfo.flags,
11680  createInfo.usage,
11681  createInfo.requiredFlags,
11682  createInfo.preferredFlags,
11683  createInfo.memoryTypeBits,
11684  createInfo.pool,
11685  allocation,
11686  userDataStr.GetString());
11687  Flush();
11688 }
11689 
11690 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11691  VmaAllocation allocation)
11692 {
11693  CallParams callParams;
11694  GetBasicParams(callParams);
11695 
11696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11697  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11698  allocation);
11699  Flush();
11700 }
11701 
11702 void VmaRecorder::RecordResizeAllocation(
11703  uint32_t frameIndex,
11704  VmaAllocation allocation,
11705  VkDeviceSize newSize)
11706 {
11707  CallParams callParams;
11708  GetBasicParams(callParams);
11709 
11710  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11711  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
11712  allocation, newSize);
11713  Flush();
11714 }
11715 
11716 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11717  VmaAllocation allocation,
11718  const void* pUserData)
11719 {
11720  CallParams callParams;
11721  GetBasicParams(callParams);
11722 
11723  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11724  UserDataString userDataStr(
11725  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11726  pUserData);
11727  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11728  allocation,
11729  userDataStr.GetString());
11730  Flush();
11731 }
11732 
11733 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11734  VmaAllocation allocation)
11735 {
11736  CallParams callParams;
11737  GetBasicParams(callParams);
11738 
11739  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11740  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11741  allocation);
11742  Flush();
11743 }
11744 
11745 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11746  VmaAllocation allocation)
11747 {
11748  CallParams callParams;
11749  GetBasicParams(callParams);
11750 
11751  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11752  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11753  allocation);
11754  Flush();
11755 }
11756 
11757 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11758  VmaAllocation allocation)
11759 {
11760  CallParams callParams;
11761  GetBasicParams(callParams);
11762 
11763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11764  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11765  allocation);
11766  Flush();
11767 }
11768 
11769 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11770  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11771 {
11772  CallParams callParams;
11773  GetBasicParams(callParams);
11774 
11775  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11776  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11777  allocation,
11778  offset,
11779  size);
11780  Flush();
11781 }
11782 
11783 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11784  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11785 {
11786  CallParams callParams;
11787  GetBasicParams(callParams);
11788 
11789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11790  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11791  allocation,
11792  offset,
11793  size);
11794  Flush();
11795 }
11796 
11797 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11798  const VkBufferCreateInfo& bufCreateInfo,
11799  const VmaAllocationCreateInfo& allocCreateInfo,
11800  VmaAllocation allocation)
11801 {
11802  CallParams callParams;
11803  GetBasicParams(callParams);
11804 
11805  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11806  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11807  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11808  bufCreateInfo.flags,
11809  bufCreateInfo.size,
11810  bufCreateInfo.usage,
11811  bufCreateInfo.sharingMode,
11812  allocCreateInfo.flags,
11813  allocCreateInfo.usage,
11814  allocCreateInfo.requiredFlags,
11815  allocCreateInfo.preferredFlags,
11816  allocCreateInfo.memoryTypeBits,
11817  allocCreateInfo.pool,
11818  allocation,
11819  userDataStr.GetString());
11820  Flush();
11821 }
11822 
11823 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11824  const VkImageCreateInfo& imageCreateInfo,
11825  const VmaAllocationCreateInfo& allocCreateInfo,
11826  VmaAllocation allocation)
11827 {
11828  CallParams callParams;
11829  GetBasicParams(callParams);
11830 
11831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11832  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11833  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11834  imageCreateInfo.flags,
11835  imageCreateInfo.imageType,
11836  imageCreateInfo.format,
11837  imageCreateInfo.extent.width,
11838  imageCreateInfo.extent.height,
11839  imageCreateInfo.extent.depth,
11840  imageCreateInfo.mipLevels,
11841  imageCreateInfo.arrayLayers,
11842  imageCreateInfo.samples,
11843  imageCreateInfo.tiling,
11844  imageCreateInfo.usage,
11845  imageCreateInfo.sharingMode,
11846  imageCreateInfo.initialLayout,
11847  allocCreateInfo.flags,
11848  allocCreateInfo.usage,
11849  allocCreateInfo.requiredFlags,
11850  allocCreateInfo.preferredFlags,
11851  allocCreateInfo.memoryTypeBits,
11852  allocCreateInfo.pool,
11853  allocation,
11854  userDataStr.GetString());
11855  Flush();
11856 }
11857 
11858 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11859  VmaAllocation allocation)
11860 {
11861  CallParams callParams;
11862  GetBasicParams(callParams);
11863 
11864  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11865  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11866  allocation);
11867  Flush();
11868 }
11869 
11870 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11871  VmaAllocation allocation)
11872 {
11873  CallParams callParams;
11874  GetBasicParams(callParams);
11875 
11876  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11877  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11878  allocation);
11879  Flush();
11880 }
11881 
11882 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11883  VmaAllocation allocation)
11884 {
11885  CallParams callParams;
11886  GetBasicParams(callParams);
11887 
11888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11889  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11890  allocation);
11891  Flush();
11892 }
11893 
11894 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11895  VmaAllocation allocation)
11896 {
11897  CallParams callParams;
11898  GetBasicParams(callParams);
11899 
11900  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11901  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11902  allocation);
11903  Flush();
11904 }
11905 
11906 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11907  VmaPool pool)
11908 {
11909  CallParams callParams;
11910  GetBasicParams(callParams);
11911 
11912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11913  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11914  pool);
11915  Flush();
11916 }
11917 
11918 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11919 {
11920  if(pUserData != VMA_NULL)
11921  {
11922  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11923  {
11924  m_Str = (const char*)pUserData;
11925  }
11926  else
11927  {
11928  sprintf_s(m_PtrStr, "%p", pUserData);
11929  m_Str = m_PtrStr;
11930  }
11931  }
11932  else
11933  {
11934  m_Str = "";
11935  }
11936 }
11937 
11938 void VmaRecorder::WriteConfiguration(
11939  const VkPhysicalDeviceProperties& devProps,
11940  const VkPhysicalDeviceMemoryProperties& memProps,
11941  bool dedicatedAllocationExtensionEnabled)
11942 {
11943  fprintf(m_File, "Config,Begin\n");
11944 
11945  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11946  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11947  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11948  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11949  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11950  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11951 
11952  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11953  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11954  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11955 
11956  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11957  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11958  {
11959  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11960  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11961  }
11962  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11963  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11964  {
11965  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11966  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11967  }
11968 
11969  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11970 
11971  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11972  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11973  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11974  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11975  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11976  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11977  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11978  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11979  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11980 
11981  fprintf(m_File, "Config,End\n");
11982 }
11983 
11984 void VmaRecorder::GetBasicParams(CallParams& outParams)
11985 {
11986  outParams.threadId = GetCurrentThreadId();
11987 
11988  LARGE_INTEGER counter;
11989  QueryPerformanceCounter(&counter);
11990  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11991 }
11992 
11993 void VmaRecorder::Flush()
11994 {
11995  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11996  {
11997  fflush(m_File);
11998  }
11999 }
12000 
12001 #endif // #if VMA_RECORDING_ENABLED
12002 
12004 // VmaAllocator_T
12005 
12006 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
12007  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
12008  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
12009  m_hDevice(pCreateInfo->device),
12010  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
12011  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
12012  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
12013  m_PreferredLargeHeapBlockSize(0),
12014  m_PhysicalDevice(pCreateInfo->physicalDevice),
12015  m_CurrentFrameIndex(0),
12016  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
12017  m_NextPoolId(0)
12019  ,m_pRecorder(VMA_NULL)
12020 #endif
12021 {
12022  if(VMA_DEBUG_DETECT_CORRUPTION)
12023  {
12024  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
12025  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
12026  }
12027 
12028  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
12029 
12030 #if !(VMA_DEDICATED_ALLOCATION)
12032  {
12033  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
12034  }
12035 #endif
12036 
12037  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
12038  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
12039  memset(&m_MemProps, 0, sizeof(m_MemProps));
12040 
12041  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
12042  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
12043 
12044  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12045  {
12046  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
12047  }
12048 
12049  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
12050  {
12051  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
12052  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
12053  }
12054 
12055  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
12056 
12057  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
12058  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
12059 
12060  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
12061  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
12062  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
12063  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
12064 
12065  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
12066  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
12067 
12068  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
12069  {
12070  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12071  {
12072  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12073  if(limit != VK_WHOLE_SIZE)
12074  {
12075  m_HeapSizeLimit[heapIndex] = limit;
12076  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12077  {
12078  m_MemProps.memoryHeaps[heapIndex].size = limit;
12079  }
12080  }
12081  }
12082  }
12083 
12084  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12085  {
12086  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12087 
12088  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12089  this,
12090  memTypeIndex,
12091  preferredBlockSize,
12092  0,
12093  SIZE_MAX,
12094  GetBufferImageGranularity(),
12095  pCreateInfo->frameInUseCount,
12096  false, // isCustomPool
12097  false, // explicitBlockSize
12098  false); // linearAlgorithm
12099  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12100  // becase minBlockCount is 0.
12101  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12102 
12103  }
12104 }
12105 
12106 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12107 {
12108  VkResult res = VK_SUCCESS;
12109 
12110  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12111  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12112  {
12113 #if VMA_RECORDING_ENABLED
12114  m_pRecorder = vma_new(this, VmaRecorder)();
12115  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12116  if(res != VK_SUCCESS)
12117  {
12118  return res;
12119  }
12120  m_pRecorder->WriteConfiguration(
12121  m_PhysicalDeviceProperties,
12122  m_MemProps,
12123  m_UseKhrDedicatedAllocation);
12124  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12125 #else
12126  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12127  return VK_ERROR_FEATURE_NOT_PRESENT;
12128 #endif
12129  }
12130 
12131  return res;
12132 }
12133 
12134 VmaAllocator_T::~VmaAllocator_T()
12135 {
12136 #if VMA_RECORDING_ENABLED
12137  if(m_pRecorder != VMA_NULL)
12138  {
12139  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12140  vma_delete(this, m_pRecorder);
12141  }
12142 #endif
12143 
12144  VMA_ASSERT(m_Pools.empty());
12145 
12146  for(size_t i = GetMemoryTypeCount(); i--; )
12147  {
12148  vma_delete(this, m_pDedicatedAllocations[i]);
12149  vma_delete(this, m_pBlockVectors[i]);
12150  }
12151 }
12152 
12153 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12154 {
12155 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12156  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12157  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12158  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12159  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12160  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12161  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12162  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12163  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12164  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12165  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12166  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12167  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12168  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12169  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12170  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12171  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12172 #if VMA_DEDICATED_ALLOCATION
12173  if(m_UseKhrDedicatedAllocation)
12174  {
12175  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12176  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12177  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12178  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12179  }
12180 #endif // #if VMA_DEDICATED_ALLOCATION
12181 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12182 
12183 #define VMA_COPY_IF_NOT_NULL(funcName) \
12184  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12185 
12186  if(pVulkanFunctions != VMA_NULL)
12187  {
12188  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12189  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12190  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12191  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12192  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12193  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12194  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12195  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12196  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12197  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12198  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12199  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12200  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12201  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12202  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12203  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12204 #if VMA_DEDICATED_ALLOCATION
12205  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12206  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12207 #endif
12208  }
12209 
12210 #undef VMA_COPY_IF_NOT_NULL
12211 
12212  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12213  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12214  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12215  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12216  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12217  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12218  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12219  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12220  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12221  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12222  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12223  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12224  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12225  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12226  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12228  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12229  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12230 #if VMA_DEDICATED_ALLOCATION
12231  if(m_UseKhrDedicatedAllocation)
12232  {
12233  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12234  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12235  }
12236 #endif
12237 }
12238 
12239 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12240 {
12241  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12242  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12243  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12244  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12245 }
12246 
12247 VkResult VmaAllocator_T::AllocateMemoryOfType(
12248  VkDeviceSize size,
12249  VkDeviceSize alignment,
12250  bool dedicatedAllocation,
12251  VkBuffer dedicatedBuffer,
12252  VkImage dedicatedImage,
12253  const VmaAllocationCreateInfo& createInfo,
12254  uint32_t memTypeIndex,
12255  VmaSuballocationType suballocType,
12256  VmaAllocation* pAllocation)
12257 {
12258  VMA_ASSERT(pAllocation != VMA_NULL);
12259  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12260 
12261  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12262 
12263  // If memory type is not HOST_VISIBLE, disable MAPPED.
12264  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12265  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12266  {
12267  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12268  }
12269 
12270  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12271  VMA_ASSERT(blockVector);
12272 
12273  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12274  bool preferDedicatedMemory =
12275  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12276  dedicatedAllocation ||
12277  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12278  size > preferredBlockSize / 2;
12279 
12280  if(preferDedicatedMemory &&
12281  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12282  finalCreateInfo.pool == VK_NULL_HANDLE)
12283  {
12285  }
12286 
12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12288  {
12289  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12290  {
12291  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12292  }
12293  else
12294  {
12295  return AllocateDedicatedMemory(
12296  size,
12297  suballocType,
12298  memTypeIndex,
12299  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12300  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12301  finalCreateInfo.pUserData,
12302  dedicatedBuffer,
12303  dedicatedImage,
12304  pAllocation);
12305  }
12306  }
12307  else
12308  {
12309  VkResult res = blockVector->Allocate(
12310  VK_NULL_HANDLE, // hCurrentPool
12311  m_CurrentFrameIndex.load(),
12312  size,
12313  alignment,
12314  finalCreateInfo,
12315  suballocType,
12316  pAllocation);
12317  if(res == VK_SUCCESS)
12318  {
12319  return res;
12320  }
12321 
12322  // 5. Try dedicated memory.
12323  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12324  {
12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12326  }
12327  else
12328  {
12329  res = AllocateDedicatedMemory(
12330  size,
12331  suballocType,
12332  memTypeIndex,
12333  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12334  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12335  finalCreateInfo.pUserData,
12336  dedicatedBuffer,
12337  dedicatedImage,
12338  pAllocation);
12339  if(res == VK_SUCCESS)
12340  {
12341  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12342  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12343  return VK_SUCCESS;
12344  }
12345  else
12346  {
12347  // Everything failed: Return error code.
12348  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12349  return res;
12350  }
12351  }
12352  }
12353 }
12354 
12355 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12356  VkDeviceSize size,
12357  VmaSuballocationType suballocType,
12358  uint32_t memTypeIndex,
12359  bool map,
12360  bool isUserDataString,
12361  void* pUserData,
12362  VkBuffer dedicatedBuffer,
12363  VkImage dedicatedImage,
12364  VmaAllocation* pAllocation)
12365 {
12366  VMA_ASSERT(pAllocation);
12367 
12368  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12369  allocInfo.memoryTypeIndex = memTypeIndex;
12370  allocInfo.allocationSize = size;
12371 
12372 #if VMA_DEDICATED_ALLOCATION
12373  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12374  if(m_UseKhrDedicatedAllocation)
12375  {
12376  if(dedicatedBuffer != VK_NULL_HANDLE)
12377  {
12378  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12379  dedicatedAllocInfo.buffer = dedicatedBuffer;
12380  allocInfo.pNext = &dedicatedAllocInfo;
12381  }
12382  else if(dedicatedImage != VK_NULL_HANDLE)
12383  {
12384  dedicatedAllocInfo.image = dedicatedImage;
12385  allocInfo.pNext = &dedicatedAllocInfo;
12386  }
12387  }
12388 #endif // #if VMA_DEDICATED_ALLOCATION
12389 
12390  // Allocate VkDeviceMemory.
12391  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12392  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12393  if(res < 0)
12394  {
12395  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12396  return res;
12397  }
12398 
12399  void* pMappedData = VMA_NULL;
12400  if(map)
12401  {
12402  res = (*m_VulkanFunctions.vkMapMemory)(
12403  m_hDevice,
12404  hMemory,
12405  0,
12406  VK_WHOLE_SIZE,
12407  0,
12408  &pMappedData);
12409  if(res < 0)
12410  {
12411  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12412  FreeVulkanMemory(memTypeIndex, size, hMemory);
12413  return res;
12414  }
12415  }
12416 
12417  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12418  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12419  (*pAllocation)->SetUserData(this, pUserData);
12420  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12421  {
12422  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12423  }
12424 
12425  // Register it in m_pDedicatedAllocations.
12426  {
12427  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12428  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12429  VMA_ASSERT(pDedicatedAllocations);
12430  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12431  }
12432 
12433  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12434 
12435  return VK_SUCCESS;
12436 }
12437 
12438 void VmaAllocator_T::GetBufferMemoryRequirements(
12439  VkBuffer hBuffer,
12440  VkMemoryRequirements& memReq,
12441  bool& requiresDedicatedAllocation,
12442  bool& prefersDedicatedAllocation) const
12443 {
12444 #if VMA_DEDICATED_ALLOCATION
12445  if(m_UseKhrDedicatedAllocation)
12446  {
12447  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12448  memReqInfo.buffer = hBuffer;
12449 
12450  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12451 
12452  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12453  memReq2.pNext = &memDedicatedReq;
12454 
12455  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12456 
12457  memReq = memReq2.memoryRequirements;
12458  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12459  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12460  }
12461  else
12462 #endif // #if VMA_DEDICATED_ALLOCATION
12463  {
12464  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12465  requiresDedicatedAllocation = false;
12466  prefersDedicatedAllocation = false;
12467  }
12468 }
12469 
12470 void VmaAllocator_T::GetImageMemoryRequirements(
12471  VkImage hImage,
12472  VkMemoryRequirements& memReq,
12473  bool& requiresDedicatedAllocation,
12474  bool& prefersDedicatedAllocation) const
12475 {
12476 #if VMA_DEDICATED_ALLOCATION
12477  if(m_UseKhrDedicatedAllocation)
12478  {
12479  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12480  memReqInfo.image = hImage;
12481 
12482  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12483 
12484  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12485  memReq2.pNext = &memDedicatedReq;
12486 
12487  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12488 
12489  memReq = memReq2.memoryRequirements;
12490  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12491  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12492  }
12493  else
12494 #endif // #if VMA_DEDICATED_ALLOCATION
12495  {
12496  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12497  requiresDedicatedAllocation = false;
12498  prefersDedicatedAllocation = false;
12499  }
12500 }
12501 
12502 VkResult VmaAllocator_T::AllocateMemory(
12503  const VkMemoryRequirements& vkMemReq,
12504  bool requiresDedicatedAllocation,
12505  bool prefersDedicatedAllocation,
12506  VkBuffer dedicatedBuffer,
12507  VkImage dedicatedImage,
12508  const VmaAllocationCreateInfo& createInfo,
12509  VmaSuballocationType suballocType,
12510  VmaAllocation* pAllocation)
12511 {
12512  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12513 
12514  if(vkMemReq.size == 0)
12515  {
12516  return VK_ERROR_VALIDATION_FAILED_EXT;
12517  }
12518  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12519  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12520  {
12521  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12522  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12523  }
12524  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12526  {
12527  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12529  }
12530  if(requiresDedicatedAllocation)
12531  {
12532  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12533  {
12534  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12536  }
12537  if(createInfo.pool != VK_NULL_HANDLE)
12538  {
12539  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12540  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12541  }
12542  }
12543  if((createInfo.pool != VK_NULL_HANDLE) &&
12544  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12545  {
12546  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12548  }
12549 
12550  if(createInfo.pool != VK_NULL_HANDLE)
12551  {
12552  const VkDeviceSize alignmentForPool = VMA_MAX(
12553  vkMemReq.alignment,
12554  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12555  return createInfo.pool->m_BlockVector.Allocate(
12556  createInfo.pool,
12557  m_CurrentFrameIndex.load(),
12558  vkMemReq.size,
12559  alignmentForPool,
12560  createInfo,
12561  suballocType,
12562  pAllocation);
12563  }
12564  else
12565  {
12566  // Bit mask of memory Vulkan types acceptable for this allocation.
12567  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12568  uint32_t memTypeIndex = UINT32_MAX;
12569  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12570  if(res == VK_SUCCESS)
12571  {
12572  VkDeviceSize alignmentForMemType = VMA_MAX(
12573  vkMemReq.alignment,
12574  GetMemoryTypeMinAlignment(memTypeIndex));
12575 
12576  res = AllocateMemoryOfType(
12577  vkMemReq.size,
12578  alignmentForMemType,
12579  requiresDedicatedAllocation || prefersDedicatedAllocation,
12580  dedicatedBuffer,
12581  dedicatedImage,
12582  createInfo,
12583  memTypeIndex,
12584  suballocType,
12585  pAllocation);
12586  // Succeeded on first try.
12587  if(res == VK_SUCCESS)
12588  {
12589  return res;
12590  }
12591  // Allocation from this memory type failed. Try other compatible memory types.
12592  else
12593  {
12594  for(;;)
12595  {
12596  // Remove old memTypeIndex from list of possibilities.
12597  memoryTypeBits &= ~(1u << memTypeIndex);
12598  // Find alternative memTypeIndex.
12599  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12600  if(res == VK_SUCCESS)
12601  {
12602  alignmentForMemType = VMA_MAX(
12603  vkMemReq.alignment,
12604  GetMemoryTypeMinAlignment(memTypeIndex));
12605 
12606  res = AllocateMemoryOfType(
12607  vkMemReq.size,
12608  alignmentForMemType,
12609  requiresDedicatedAllocation || prefersDedicatedAllocation,
12610  dedicatedBuffer,
12611  dedicatedImage,
12612  createInfo,
12613  memTypeIndex,
12614  suballocType,
12615  pAllocation);
12616  // Allocation from this alternative memory type succeeded.
12617  if(res == VK_SUCCESS)
12618  {
12619  return res;
12620  }
12621  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12622  }
12623  // No other matching memory type index could be found.
12624  else
12625  {
12626  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12627  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12628  }
12629  }
12630  }
12631  }
12632  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12633  else
12634  return res;
12635  }
12636 }
12637 
12638 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12639 {
12640  VMA_ASSERT(allocation);
12641 
12642  if(TouchAllocation(allocation))
12643  {
12644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12645  {
12646  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12647  }
12648 
12649  switch(allocation->GetType())
12650  {
12651  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12652  {
12653  VmaBlockVector* pBlockVector = VMA_NULL;
12654  VmaPool hPool = allocation->GetPool();
12655  if(hPool != VK_NULL_HANDLE)
12656  {
12657  pBlockVector = &hPool->m_BlockVector;
12658  }
12659  else
12660  {
12661  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12662  pBlockVector = m_pBlockVectors[memTypeIndex];
12663  }
12664  pBlockVector->Free(allocation);
12665  }
12666  break;
12667  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12668  FreeDedicatedMemory(allocation);
12669  break;
12670  default:
12671  VMA_ASSERT(0);
12672  }
12673  }
12674 
12675  allocation->SetUserData(this, VMA_NULL);
12676  vma_delete(this, allocation);
12677 }
12678 
12679 VkResult VmaAllocator_T::ResizeAllocation(
12680  const VmaAllocation alloc,
12681  VkDeviceSize newSize)
12682 {
12683  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
12684  {
12685  return VK_ERROR_VALIDATION_FAILED_EXT;
12686  }
12687  if(newSize == alloc->GetSize())
12688  {
12689  return VK_SUCCESS;
12690  }
12691 
12692  switch(alloc->GetType())
12693  {
12694  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12695  return VK_ERROR_FEATURE_NOT_PRESENT;
12696  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12697  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
12698  {
12699  alloc->ChangeSize(newSize);
12700  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
12701  return VK_SUCCESS;
12702  }
12703  else
12704  {
12705  return VK_ERROR_OUT_OF_POOL_MEMORY;
12706  }
12707  default:
12708  VMA_ASSERT(0);
12709  return VK_ERROR_VALIDATION_FAILED_EXT;
12710  }
12711 }
12712 
12713 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12714 {
12715  // Initialize.
12716  InitStatInfo(pStats->total);
12717  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12718  InitStatInfo(pStats->memoryType[i]);
12719  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12720  InitStatInfo(pStats->memoryHeap[i]);
12721 
12722  // Process default pools.
12723  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12724  {
12725  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12726  VMA_ASSERT(pBlockVector);
12727  pBlockVector->AddStats(pStats);
12728  }
12729 
12730  // Process custom pools.
12731  {
12732  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12733  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12734  {
12735  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12736  }
12737  }
12738 
12739  // Process dedicated allocations.
12740  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12741  {
12742  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12743  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12744  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12745  VMA_ASSERT(pDedicatedAllocVector);
12746  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12747  {
12748  VmaStatInfo allocationStatInfo;
12749  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12750  VmaAddStatInfo(pStats->total, allocationStatInfo);
12751  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12752  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12753  }
12754  }
12755 
12756  // Postprocess.
12757  VmaPostprocessCalcStatInfo(pStats->total);
12758  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12759  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12760  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12761  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12762 }
12763 
12764 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12765 
12766 VkResult VmaAllocator_T::Defragment(
12767  VmaAllocation* pAllocations,
12768  size_t allocationCount,
12769  VkBool32* pAllocationsChanged,
12770  const VmaDefragmentationInfo* pDefragmentationInfo,
12771  VmaDefragmentationStats* pDefragmentationStats)
12772 {
12773  if(pAllocationsChanged != VMA_NULL)
12774  {
12775  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12776  }
12777  if(pDefragmentationStats != VMA_NULL)
12778  {
12779  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12780  }
12781 
12782  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12783 
12784  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12785 
12786  const size_t poolCount = m_Pools.size();
12787 
12788  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12789  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12790  {
12791  VmaAllocation hAlloc = pAllocations[allocIndex];
12792  VMA_ASSERT(hAlloc);
12793  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12794  // DedicatedAlloc cannot be defragmented.
12795  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12796  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12797  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12798  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12799  // Lost allocation cannot be defragmented.
12800  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12801  {
12802  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12803 
12804  const VmaPool hAllocPool = hAlloc->GetPool();
12805  // This allocation belongs to custom pool.
12806  if(hAllocPool != VK_NULL_HANDLE)
12807  {
12808  // Pools with linear or buddy algorithm are not defragmented.
12809  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12810  {
12811  pAllocBlockVector = &hAllocPool->m_BlockVector;
12812  }
12813  }
12814  // This allocation belongs to general pool.
12815  else
12816  {
12817  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12818  }
12819 
12820  if(pAllocBlockVector != VMA_NULL)
12821  {
12822  VmaDefragmentator* const pDefragmentator =
12823  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12824  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12825  &pAllocationsChanged[allocIndex] : VMA_NULL;
12826  pDefragmentator->AddAllocation(hAlloc, pChanged);
12827  }
12828  }
12829  }
12830 
12831  VkResult result = VK_SUCCESS;
12832 
12833  // ======== Main processing.
12834 
12835  VkDeviceSize maxBytesToMove = SIZE_MAX;
12836  uint32_t maxAllocationsToMove = UINT32_MAX;
12837  if(pDefragmentationInfo != VMA_NULL)
12838  {
12839  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12840  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12841  }
12842 
12843  // Process standard memory.
12844  for(uint32_t memTypeIndex = 0;
12845  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12846  ++memTypeIndex)
12847  {
12848  // Only HOST_VISIBLE memory types can be defragmented.
12849  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12850  {
12851  result = m_pBlockVectors[memTypeIndex]->Defragment(
12852  pDefragmentationStats,
12853  maxBytesToMove,
12854  maxAllocationsToMove);
12855  }
12856  }
12857 
12858  // Process custom pools.
12859  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12860  {
12861  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12862  pDefragmentationStats,
12863  maxBytesToMove,
12864  maxAllocationsToMove);
12865  }
12866 
12867  // ======== Destroy defragmentators.
12868 
12869  // Process custom pools.
12870  for(size_t poolIndex = poolCount; poolIndex--; )
12871  {
12872  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12873  }
12874 
12875  // Process standard memory.
12876  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12877  {
12878  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12879  {
12880  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12881  }
12882  }
12883 
12884  return result;
12885 }
12886 
12887 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12888 {
12889  if(hAllocation->CanBecomeLost())
12890  {
12891  /*
12892  Warning: This is a carefully designed algorithm.
12893  Do not modify unless you really know what you're doing :)
12894  */
12895  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12896  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12897  for(;;)
12898  {
12899  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12900  {
12901  pAllocationInfo->memoryType = UINT32_MAX;
12902  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12903  pAllocationInfo->offset = 0;
12904  pAllocationInfo->size = hAllocation->GetSize();
12905  pAllocationInfo->pMappedData = VMA_NULL;
12906  pAllocationInfo->pUserData = hAllocation->GetUserData();
12907  return;
12908  }
12909  else if(localLastUseFrameIndex == localCurrFrameIndex)
12910  {
12911  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12912  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12913  pAllocationInfo->offset = hAllocation->GetOffset();
12914  pAllocationInfo->size = hAllocation->GetSize();
12915  pAllocationInfo->pMappedData = VMA_NULL;
12916  pAllocationInfo->pUserData = hAllocation->GetUserData();
12917  return;
12918  }
12919  else // Last use time earlier than current time.
12920  {
12921  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12922  {
12923  localLastUseFrameIndex = localCurrFrameIndex;
12924  }
12925  }
12926  }
12927  }
12928  else
12929  {
12930 #if VMA_STATS_STRING_ENABLED
12931  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12932  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12933  for(;;)
12934  {
12935  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12936  if(localLastUseFrameIndex == localCurrFrameIndex)
12937  {
12938  break;
12939  }
12940  else // Last use time earlier than current time.
12941  {
12942  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12943  {
12944  localLastUseFrameIndex = localCurrFrameIndex;
12945  }
12946  }
12947  }
12948 #endif
12949 
12950  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12951  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12952  pAllocationInfo->offset = hAllocation->GetOffset();
12953  pAllocationInfo->size = hAllocation->GetSize();
12954  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12955  pAllocationInfo->pUserData = hAllocation->GetUserData();
12956  }
12957 }
12958 
12959 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12960 {
12961  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12962  if(hAllocation->CanBecomeLost())
12963  {
12964  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12965  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12966  for(;;)
12967  {
12968  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12969  {
12970  return false;
12971  }
12972  else if(localLastUseFrameIndex == localCurrFrameIndex)
12973  {
12974  return true;
12975  }
12976  else // Last use time earlier than current time.
12977  {
12978  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12979  {
12980  localLastUseFrameIndex = localCurrFrameIndex;
12981  }
12982  }
12983  }
12984  }
12985  else
12986  {
12987 #if VMA_STATS_STRING_ENABLED
12988  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12989  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12990  for(;;)
12991  {
12992  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12993  if(localLastUseFrameIndex == localCurrFrameIndex)
12994  {
12995  break;
12996  }
12997  else // Last use time earlier than current time.
12998  {
12999  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
13000  {
13001  localLastUseFrameIndex = localCurrFrameIndex;
13002  }
13003  }
13004  }
13005 #endif
13006 
13007  return true;
13008  }
13009 }
13010 
13011 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
13012 {
13013  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
13014 
13015  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
13016 
13017  if(newCreateInfo.maxBlockCount == 0)
13018  {
13019  newCreateInfo.maxBlockCount = SIZE_MAX;
13020  }
13021  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
13022  {
13023  return VK_ERROR_INITIALIZATION_FAILED;
13024  }
13025 
13026  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
13027 
13028  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
13029 
13030  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
13031  if(res != VK_SUCCESS)
13032  {
13033  vma_delete(this, *pPool);
13034  *pPool = VMA_NULL;
13035  return res;
13036  }
13037 
13038  // Add to m_Pools.
13039  {
13040  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13041  (*pPool)->SetId(m_NextPoolId++);
13042  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
13043  }
13044 
13045  return VK_SUCCESS;
13046 }
13047 
13048 void VmaAllocator_T::DestroyPool(VmaPool pool)
13049 {
13050  // Remove from m_Pools.
13051  {
13052  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13053  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13054  VMA_ASSERT(success && "Pool not found in Allocator.");
13055  }
13056 
13057  vma_delete(this, pool);
13058 }
13059 
13060 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13061 {
13062  pool->m_BlockVector.GetPoolStats(pPoolStats);
13063 }
13064 
13065 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13066 {
13067  m_CurrentFrameIndex.store(frameIndex);
13068 }
13069 
13070 void VmaAllocator_T::MakePoolAllocationsLost(
13071  VmaPool hPool,
13072  size_t* pLostAllocationCount)
13073 {
13074  hPool->m_BlockVector.MakePoolAllocationsLost(
13075  m_CurrentFrameIndex.load(),
13076  pLostAllocationCount);
13077 }
13078 
13079 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13080 {
13081  return hPool->m_BlockVector.CheckCorruption();
13082 }
13083 
13084 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13085 {
13086  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13087 
13088  // Process default pools.
13089  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13090  {
13091  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13092  {
13093  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13094  VMA_ASSERT(pBlockVector);
13095  VkResult localRes = pBlockVector->CheckCorruption();
13096  switch(localRes)
13097  {
13098  case VK_ERROR_FEATURE_NOT_PRESENT:
13099  break;
13100  case VK_SUCCESS:
13101  finalRes = VK_SUCCESS;
13102  break;
13103  default:
13104  return localRes;
13105  }
13106  }
13107  }
13108 
13109  // Process custom pools.
13110  {
13111  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13112  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13113  {
13114  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13115  {
13116  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13117  switch(localRes)
13118  {
13119  case VK_ERROR_FEATURE_NOT_PRESENT:
13120  break;
13121  case VK_SUCCESS:
13122  finalRes = VK_SUCCESS;
13123  break;
13124  default:
13125  return localRes;
13126  }
13127  }
13128  }
13129  }
13130 
13131  return finalRes;
13132 }
13133 
13134 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13135 {
13136  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13137  (*pAllocation)->InitLost();
13138 }
13139 
13140 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13141 {
13142  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13143 
13144  VkResult res;
13145  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13146  {
13147  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13148  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13149  {
13150  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13151  if(res == VK_SUCCESS)
13152  {
13153  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13154  }
13155  }
13156  else
13157  {
13158  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13159  }
13160  }
13161  else
13162  {
13163  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13164  }
13165 
13166  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13167  {
13168  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13169  }
13170 
13171  return res;
13172 }
13173 
13174 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13175 {
13176  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13177  {
13178  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13179  }
13180 
13181  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13182 
13183  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13184  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13185  {
13186  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13187  m_HeapSizeLimit[heapIndex] += size;
13188  }
13189 }
13190 
13191 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13192 {
13193  if(hAllocation->CanBecomeLost())
13194  {
13195  return VK_ERROR_MEMORY_MAP_FAILED;
13196  }
13197 
13198  switch(hAllocation->GetType())
13199  {
13200  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13201  {
13202  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13203  char *pBytes = VMA_NULL;
13204  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13205  if(res == VK_SUCCESS)
13206  {
13207  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13208  hAllocation->BlockAllocMap();
13209  }
13210  return res;
13211  }
13212  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13213  return hAllocation->DedicatedAllocMap(this, ppData);
13214  default:
13215  VMA_ASSERT(0);
13216  return VK_ERROR_MEMORY_MAP_FAILED;
13217  }
13218 }
13219 
13220 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13221 {
13222  switch(hAllocation->GetType())
13223  {
13224  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13225  {
13226  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13227  hAllocation->BlockAllocUnmap();
13228  pBlock->Unmap(this, 1);
13229  }
13230  break;
13231  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13232  hAllocation->DedicatedAllocUnmap(this);
13233  break;
13234  default:
13235  VMA_ASSERT(0);
13236  }
13237 }
13238 
13239 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13240 {
13241  VkResult res = VK_SUCCESS;
13242  switch(hAllocation->GetType())
13243  {
13244  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13245  res = GetVulkanFunctions().vkBindBufferMemory(
13246  m_hDevice,
13247  hBuffer,
13248  hAllocation->GetMemory(),
13249  0); //memoryOffset
13250  break;
13251  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13252  {
13253  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13254  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13255  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13256  break;
13257  }
13258  default:
13259  VMA_ASSERT(0);
13260  }
13261  return res;
13262 }
13263 
13264 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13265 {
13266  VkResult res = VK_SUCCESS;
13267  switch(hAllocation->GetType())
13268  {
13269  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13270  res = GetVulkanFunctions().vkBindImageMemory(
13271  m_hDevice,
13272  hImage,
13273  hAllocation->GetMemory(),
13274  0); //memoryOffset
13275  break;
13276  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13277  {
13278  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13279  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13280  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13281  break;
13282  }
13283  default:
13284  VMA_ASSERT(0);
13285  }
13286  return res;
13287 }
13288 
13289 void VmaAllocator_T::FlushOrInvalidateAllocation(
13290  VmaAllocation hAllocation,
13291  VkDeviceSize offset, VkDeviceSize size,
13292  VMA_CACHE_OPERATION op)
13293 {
13294  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13295  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13296  {
13297  const VkDeviceSize allocationSize = hAllocation->GetSize();
13298  VMA_ASSERT(offset <= allocationSize);
13299 
13300  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13301 
13302  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13303  memRange.memory = hAllocation->GetMemory();
13304 
13305  switch(hAllocation->GetType())
13306  {
13307  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13308  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13309  if(size == VK_WHOLE_SIZE)
13310  {
13311  memRange.size = allocationSize - memRange.offset;
13312  }
13313  else
13314  {
13315  VMA_ASSERT(offset + size <= allocationSize);
13316  memRange.size = VMA_MIN(
13317  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13318  allocationSize - memRange.offset);
13319  }
13320  break;
13321 
13322  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13323  {
13324  // 1. Still within this allocation.
13325  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13326  if(size == VK_WHOLE_SIZE)
13327  {
13328  size = allocationSize - offset;
13329  }
13330  else
13331  {
13332  VMA_ASSERT(offset + size <= allocationSize);
13333  }
13334  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13335 
13336  // 2. Adjust to whole block.
13337  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13338  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13339  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13340  memRange.offset += allocationOffset;
13341  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13342 
13343  break;
13344  }
13345 
13346  default:
13347  VMA_ASSERT(0);
13348  }
13349 
13350  switch(op)
13351  {
13352  case VMA_CACHE_FLUSH:
13353  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13354  break;
13355  case VMA_CACHE_INVALIDATE:
13356  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13357  break;
13358  default:
13359  VMA_ASSERT(0);
13360  }
13361  }
13362  // else: Just ignore this call.
13363 }
13364 
13365 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13366 {
13367  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13368 
13369  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13370  {
13371  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13372  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13373  VMA_ASSERT(pDedicatedAllocations);
13374  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13375  VMA_ASSERT(success);
13376  }
13377 
13378  VkDeviceMemory hMemory = allocation->GetMemory();
13379 
13380  /*
13381  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13382  before vkFreeMemory.
13383 
13384  if(allocation->GetMappedData() != VMA_NULL)
13385  {
13386  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13387  }
13388  */
13389 
13390  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13391 
13392  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13393 }
13394 
13395 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13396 {
13397  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13398  !hAllocation->CanBecomeLost() &&
13399  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13400  {
13401  void* pData = VMA_NULL;
13402  VkResult res = Map(hAllocation, &pData);
13403  if(res == VK_SUCCESS)
13404  {
13405  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13406  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13407  Unmap(hAllocation);
13408  }
13409  else
13410  {
13411  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13412  }
13413  }
13414 }
13415 
13416 #if VMA_STATS_STRING_ENABLED
13417 
13418 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13419 {
13420  bool dedicatedAllocationsStarted = false;
13421  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13422  {
13423  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13424  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13425  VMA_ASSERT(pDedicatedAllocVector);
13426  if(pDedicatedAllocVector->empty() == false)
13427  {
13428  if(dedicatedAllocationsStarted == false)
13429  {
13430  dedicatedAllocationsStarted = true;
13431  json.WriteString("DedicatedAllocations");
13432  json.BeginObject();
13433  }
13434 
13435  json.BeginString("Type ");
13436  json.ContinueString(memTypeIndex);
13437  json.EndString();
13438 
13439  json.BeginArray();
13440 
13441  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13442  {
13443  json.BeginObject(true);
13444  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13445  hAlloc->PrintParameters(json);
13446  json.EndObject();
13447  }
13448 
13449  json.EndArray();
13450  }
13451  }
13452  if(dedicatedAllocationsStarted)
13453  {
13454  json.EndObject();
13455  }
13456 
13457  {
13458  bool allocationsStarted = false;
13459  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13460  {
13461  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13462  {
13463  if(allocationsStarted == false)
13464  {
13465  allocationsStarted = true;
13466  json.WriteString("DefaultPools");
13467  json.BeginObject();
13468  }
13469 
13470  json.BeginString("Type ");
13471  json.ContinueString(memTypeIndex);
13472  json.EndString();
13473 
13474  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13475  }
13476  }
13477  if(allocationsStarted)
13478  {
13479  json.EndObject();
13480  }
13481  }
13482 
13483  // Custom pools
13484  {
13485  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13486  const size_t poolCount = m_Pools.size();
13487  if(poolCount > 0)
13488  {
13489  json.WriteString("Pools");
13490  json.BeginObject();
13491  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13492  {
13493  json.BeginString();
13494  json.ContinueString(m_Pools[poolIndex]->GetId());
13495  json.EndString();
13496 
13497  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13498  }
13499  json.EndObject();
13500  }
13501  }
13502 }
13503 
13504 #endif // #if VMA_STATS_STRING_ENABLED
13505 
13507 // Public interface
13508 
13509 VkResult vmaCreateAllocator(
13510  const VmaAllocatorCreateInfo* pCreateInfo,
13511  VmaAllocator* pAllocator)
13512 {
13513  VMA_ASSERT(pCreateInfo && pAllocator);
13514  VMA_DEBUG_LOG("vmaCreateAllocator");
13515  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13516  return (*pAllocator)->Init(pCreateInfo);
13517 }
13518 
13519 void vmaDestroyAllocator(
13520  VmaAllocator allocator)
13521 {
13522  if(allocator != VK_NULL_HANDLE)
13523  {
13524  VMA_DEBUG_LOG("vmaDestroyAllocator");
13525  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13526  vma_delete(&allocationCallbacks, allocator);
13527  }
13528 }
13529 
13531  VmaAllocator allocator,
13532  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13533 {
13534  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13535  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13536 }
13537 
13539  VmaAllocator allocator,
13540  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13541 {
13542  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13543  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13544 }
13545 
13547  VmaAllocator allocator,
13548  uint32_t memoryTypeIndex,
13549  VkMemoryPropertyFlags* pFlags)
13550 {
13551  VMA_ASSERT(allocator && pFlags);
13552  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13553  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13554 }
13555 
13557  VmaAllocator allocator,
13558  uint32_t frameIndex)
13559 {
13560  VMA_ASSERT(allocator);
13561  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13562 
13563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13564 
13565  allocator->SetCurrentFrameIndex(frameIndex);
13566 }
13567 
13568 void vmaCalculateStats(
13569  VmaAllocator allocator,
13570  VmaStats* pStats)
13571 {
13572  VMA_ASSERT(allocator && pStats);
13573  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13574  allocator->CalculateStats(pStats);
13575 }
13576 
13577 #if VMA_STATS_STRING_ENABLED
13578 
13579 void vmaBuildStatsString(
13580  VmaAllocator allocator,
13581  char** ppStatsString,
13582  VkBool32 detailedMap)
13583 {
13584  VMA_ASSERT(allocator && ppStatsString);
13585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13586 
13587  VmaStringBuilder sb(allocator);
13588  {
13589  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13590  json.BeginObject();
13591 
13592  VmaStats stats;
13593  allocator->CalculateStats(&stats);
13594 
13595  json.WriteString("Total");
13596  VmaPrintStatInfo(json, stats.total);
13597 
13598  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13599  {
13600  json.BeginString("Heap ");
13601  json.ContinueString(heapIndex);
13602  json.EndString();
13603  json.BeginObject();
13604 
13605  json.WriteString("Size");
13606  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13607 
13608  json.WriteString("Flags");
13609  json.BeginArray(true);
13610  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13611  {
13612  json.WriteString("DEVICE_LOCAL");
13613  }
13614  json.EndArray();
13615 
13616  if(stats.memoryHeap[heapIndex].blockCount > 0)
13617  {
13618  json.WriteString("Stats");
13619  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13620  }
13621 
13622  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13623  {
13624  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13625  {
13626  json.BeginString("Type ");
13627  json.ContinueString(typeIndex);
13628  json.EndString();
13629 
13630  json.BeginObject();
13631 
13632  json.WriteString("Flags");
13633  json.BeginArray(true);
13634  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13635  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13636  {
13637  json.WriteString("DEVICE_LOCAL");
13638  }
13639  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13640  {
13641  json.WriteString("HOST_VISIBLE");
13642  }
13643  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13644  {
13645  json.WriteString("HOST_COHERENT");
13646  }
13647  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13648  {
13649  json.WriteString("HOST_CACHED");
13650  }
13651  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13652  {
13653  json.WriteString("LAZILY_ALLOCATED");
13654  }
13655  json.EndArray();
13656 
13657  if(stats.memoryType[typeIndex].blockCount > 0)
13658  {
13659  json.WriteString("Stats");
13660  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13661  }
13662 
13663  json.EndObject();
13664  }
13665  }
13666 
13667  json.EndObject();
13668  }
13669  if(detailedMap == VK_TRUE)
13670  {
13671  allocator->PrintDetailedMap(json);
13672  }
13673 
13674  json.EndObject();
13675  }
13676 
13677  const size_t len = sb.GetLength();
13678  char* const pChars = vma_new_array(allocator, char, len + 1);
13679  if(len > 0)
13680  {
13681  memcpy(pChars, sb.GetData(), len);
13682  }
13683  pChars[len] = '\0';
13684  *ppStatsString = pChars;
13685 }
13686 
13687 void vmaFreeStatsString(
13688  VmaAllocator allocator,
13689  char* pStatsString)
13690 {
13691  if(pStatsString != VMA_NULL)
13692  {
13693  VMA_ASSERT(allocator);
13694  size_t len = strlen(pStatsString);
13695  vma_delete_array(allocator, pStatsString, len + 1);
13696  }
13697 }
13698 
13699 #endif // #if VMA_STATS_STRING_ENABLED
13700 
13701 /*
13702 This function is not protected by any mutex because it just reads immutable data.
13703 */
13704 VkResult vmaFindMemoryTypeIndex(
13705  VmaAllocator allocator,
13706  uint32_t memoryTypeBits,
13707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13708  uint32_t* pMemoryTypeIndex)
13709 {
13710  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13711  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13712  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13713 
13714  if(pAllocationCreateInfo->memoryTypeBits != 0)
13715  {
13716  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13717  }
13718 
13719  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13720  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13721 
13722  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13723  if(mapped)
13724  {
13725  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13726  }
13727 
13728  // Convert usage to requiredFlags and preferredFlags.
13729  switch(pAllocationCreateInfo->usage)
13730  {
13732  break;
13734  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13735  {
13736  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13737  }
13738  break;
13740  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13741  break;
13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13744  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13745  {
13746  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13747  }
13748  break;
13750  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13751  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13752  break;
13753  default:
13754  break;
13755  }
13756 
13757  *pMemoryTypeIndex = UINT32_MAX;
13758  uint32_t minCost = UINT32_MAX;
13759  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13760  memTypeIndex < allocator->GetMemoryTypeCount();
13761  ++memTypeIndex, memTypeBit <<= 1)
13762  {
13763  // This memory type is acceptable according to memoryTypeBits bitmask.
13764  if((memTypeBit & memoryTypeBits) != 0)
13765  {
13766  const VkMemoryPropertyFlags currFlags =
13767  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13768  // This memory type contains requiredFlags.
13769  if((requiredFlags & ~currFlags) == 0)
13770  {
13771  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13772  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13773  // Remember memory type with lowest cost.
13774  if(currCost < minCost)
13775  {
13776  *pMemoryTypeIndex = memTypeIndex;
13777  if(currCost == 0)
13778  {
13779  return VK_SUCCESS;
13780  }
13781  minCost = currCost;
13782  }
13783  }
13784  }
13785  }
13786  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13787 }
13788 
13790  VmaAllocator allocator,
13791  const VkBufferCreateInfo* pBufferCreateInfo,
13792  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13793  uint32_t* pMemoryTypeIndex)
13794 {
13795  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13796  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13797  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13798  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13799 
13800  const VkDevice hDev = allocator->m_hDevice;
13801  VkBuffer hBuffer = VK_NULL_HANDLE;
13802  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13803  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13804  if(res == VK_SUCCESS)
13805  {
13806  VkMemoryRequirements memReq = {};
13807  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13808  hDev, hBuffer, &memReq);
13809 
13810  res = vmaFindMemoryTypeIndex(
13811  allocator,
13812  memReq.memoryTypeBits,
13813  pAllocationCreateInfo,
13814  pMemoryTypeIndex);
13815 
13816  allocator->GetVulkanFunctions().vkDestroyBuffer(
13817  hDev, hBuffer, allocator->GetAllocationCallbacks());
13818  }
13819  return res;
13820 }
13821 
13823  VmaAllocator allocator,
13824  const VkImageCreateInfo* pImageCreateInfo,
13825  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13826  uint32_t* pMemoryTypeIndex)
13827 {
13828  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13829  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13830  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13831  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13832 
13833  const VkDevice hDev = allocator->m_hDevice;
13834  VkImage hImage = VK_NULL_HANDLE;
13835  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13836  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13837  if(res == VK_SUCCESS)
13838  {
13839  VkMemoryRequirements memReq = {};
13840  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13841  hDev, hImage, &memReq);
13842 
13843  res = vmaFindMemoryTypeIndex(
13844  allocator,
13845  memReq.memoryTypeBits,
13846  pAllocationCreateInfo,
13847  pMemoryTypeIndex);
13848 
13849  allocator->GetVulkanFunctions().vkDestroyImage(
13850  hDev, hImage, allocator->GetAllocationCallbacks());
13851  }
13852  return res;
13853 }
13854 
13855 VkResult vmaCreatePool(
13856  VmaAllocator allocator,
13857  const VmaPoolCreateInfo* pCreateInfo,
13858  VmaPool* pPool)
13859 {
13860  VMA_ASSERT(allocator && pCreateInfo && pPool);
13861 
13862  VMA_DEBUG_LOG("vmaCreatePool");
13863 
13864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13865 
13866  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13867 
13868 #if VMA_RECORDING_ENABLED
13869  if(allocator->GetRecorder() != VMA_NULL)
13870  {
13871  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13872  }
13873 #endif
13874 
13875  return res;
13876 }
13877 
13878 void vmaDestroyPool(
13879  VmaAllocator allocator,
13880  VmaPool pool)
13881 {
13882  VMA_ASSERT(allocator);
13883 
13884  if(pool == VK_NULL_HANDLE)
13885  {
13886  return;
13887  }
13888 
13889  VMA_DEBUG_LOG("vmaDestroyPool");
13890 
13891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13892 
13893 #if VMA_RECORDING_ENABLED
13894  if(allocator->GetRecorder() != VMA_NULL)
13895  {
13896  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13897  }
13898 #endif
13899 
13900  allocator->DestroyPool(pool);
13901 }
13902 
13903 void vmaGetPoolStats(
13904  VmaAllocator allocator,
13905  VmaPool pool,
13906  VmaPoolStats* pPoolStats)
13907 {
13908  VMA_ASSERT(allocator && pool && pPoolStats);
13909 
13910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13911 
13912  allocator->GetPoolStats(pool, pPoolStats);
13913 }
13914 
13916  VmaAllocator allocator,
13917  VmaPool pool,
13918  size_t* pLostAllocationCount)
13919 {
13920  VMA_ASSERT(allocator && pool);
13921 
13922  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13923 
13924 #if VMA_RECORDING_ENABLED
13925  if(allocator->GetRecorder() != VMA_NULL)
13926  {
13927  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13928  }
13929 #endif
13930 
13931  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13932 }
13933 
13934 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13935 {
13936  VMA_ASSERT(allocator && pool);
13937 
13938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13939 
13940  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13941 
13942  return allocator->CheckPoolCorruption(pool);
13943 }
13944 
13945 VkResult vmaAllocateMemory(
13946  VmaAllocator allocator,
13947  const VkMemoryRequirements* pVkMemoryRequirements,
13948  const VmaAllocationCreateInfo* pCreateInfo,
13949  VmaAllocation* pAllocation,
13950  VmaAllocationInfo* pAllocationInfo)
13951 {
13952  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13953 
13954  VMA_DEBUG_LOG("vmaAllocateMemory");
13955 
13956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13957 
13958  VkResult result = allocator->AllocateMemory(
13959  *pVkMemoryRequirements,
13960  false, // requiresDedicatedAllocation
13961  false, // prefersDedicatedAllocation
13962  VK_NULL_HANDLE, // dedicatedBuffer
13963  VK_NULL_HANDLE, // dedicatedImage
13964  *pCreateInfo,
13965  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13966  pAllocation);
13967 
13968 #if VMA_RECORDING_ENABLED
13969  if(allocator->GetRecorder() != VMA_NULL)
13970  {
13971  allocator->GetRecorder()->RecordAllocateMemory(
13972  allocator->GetCurrentFrameIndex(),
13973  *pVkMemoryRequirements,
13974  *pCreateInfo,
13975  *pAllocation);
13976  }
13977 #endif
13978 
13979  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13980  {
13981  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13982  }
13983 
13984  return result;
13985 }
13986 
13988  VmaAllocator allocator,
13989  VkBuffer buffer,
13990  const VmaAllocationCreateInfo* pCreateInfo,
13991  VmaAllocation* pAllocation,
13992  VmaAllocationInfo* pAllocationInfo)
13993 {
13994  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13995 
13996  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13997 
13998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13999 
14000  VkMemoryRequirements vkMemReq = {};
14001  bool requiresDedicatedAllocation = false;
14002  bool prefersDedicatedAllocation = false;
14003  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14004  requiresDedicatedAllocation,
14005  prefersDedicatedAllocation);
14006 
14007  VkResult result = allocator->AllocateMemory(
14008  vkMemReq,
14009  requiresDedicatedAllocation,
14010  prefersDedicatedAllocation,
14011  buffer, // dedicatedBuffer
14012  VK_NULL_HANDLE, // dedicatedImage
14013  *pCreateInfo,
14014  VMA_SUBALLOCATION_TYPE_BUFFER,
14015  pAllocation);
14016 
14017 #if VMA_RECORDING_ENABLED
14018  if(allocator->GetRecorder() != VMA_NULL)
14019  {
14020  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14021  allocator->GetCurrentFrameIndex(),
14022  vkMemReq,
14023  requiresDedicatedAllocation,
14024  prefersDedicatedAllocation,
14025  *pCreateInfo,
14026  *pAllocation);
14027  }
14028 #endif
14029 
14030  if(pAllocationInfo && result == VK_SUCCESS)
14031  {
14032  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14033  }
14034 
14035  return result;
14036 }
14037 
14038 VkResult vmaAllocateMemoryForImage(
14039  VmaAllocator allocator,
14040  VkImage image,
14041  const VmaAllocationCreateInfo* pCreateInfo,
14042  VmaAllocation* pAllocation,
14043  VmaAllocationInfo* pAllocationInfo)
14044 {
14045  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14046 
14047  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14048 
14049  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14050 
14051  VkMemoryRequirements vkMemReq = {};
14052  bool requiresDedicatedAllocation = false;
14053  bool prefersDedicatedAllocation = false;
14054  allocator->GetImageMemoryRequirements(image, vkMemReq,
14055  requiresDedicatedAllocation, prefersDedicatedAllocation);
14056 
14057  VkResult result = allocator->AllocateMemory(
14058  vkMemReq,
14059  requiresDedicatedAllocation,
14060  prefersDedicatedAllocation,
14061  VK_NULL_HANDLE, // dedicatedBuffer
14062  image, // dedicatedImage
14063  *pCreateInfo,
14064  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14065  pAllocation);
14066 
14067 #if VMA_RECORDING_ENABLED
14068  if(allocator->GetRecorder() != VMA_NULL)
14069  {
14070  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14071  allocator->GetCurrentFrameIndex(),
14072  vkMemReq,
14073  requiresDedicatedAllocation,
14074  prefersDedicatedAllocation,
14075  *pCreateInfo,
14076  *pAllocation);
14077  }
14078 #endif
14079 
14080  if(pAllocationInfo && result == VK_SUCCESS)
14081  {
14082  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14083  }
14084 
14085  return result;
14086 }
14087 
14088 void vmaFreeMemory(
14089  VmaAllocator allocator,
14090  VmaAllocation allocation)
14091 {
14092  VMA_ASSERT(allocator);
14093 
14094  if(allocation == VK_NULL_HANDLE)
14095  {
14096  return;
14097  }
14098 
14099  VMA_DEBUG_LOG("vmaFreeMemory");
14100 
14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14102 
14103 #if VMA_RECORDING_ENABLED
14104  if(allocator->GetRecorder() != VMA_NULL)
14105  {
14106  allocator->GetRecorder()->RecordFreeMemory(
14107  allocator->GetCurrentFrameIndex(),
14108  allocation);
14109  }
14110 #endif
14111 
14112  allocator->FreeMemory(allocation);
14113 }
14114 
14115 VkResult vmaResizeAllocation(
14116  VmaAllocator allocator,
14117  VmaAllocation allocation,
14118  VkDeviceSize newSize)
14119 {
14120  VMA_ASSERT(allocator && allocation);
14121 
14122  VMA_DEBUG_LOG("vmaResizeAllocation");
14123 
14124  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14125 
14126 #if VMA_RECORDING_ENABLED
14127  if(allocator->GetRecorder() != VMA_NULL)
14128  {
14129  allocator->GetRecorder()->RecordResizeAllocation(
14130  allocator->GetCurrentFrameIndex(),
14131  allocation,
14132  newSize);
14133  }
14134 #endif
14135 
14136  return allocator->ResizeAllocation(allocation, newSize);
14137 }
14138 
14140  VmaAllocator allocator,
14141  VmaAllocation allocation,
14142  VmaAllocationInfo* pAllocationInfo)
14143 {
14144  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14145 
14146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14147 
14148 #if VMA_RECORDING_ENABLED
14149  if(allocator->GetRecorder() != VMA_NULL)
14150  {
14151  allocator->GetRecorder()->RecordGetAllocationInfo(
14152  allocator->GetCurrentFrameIndex(),
14153  allocation);
14154  }
14155 #endif
14156 
14157  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14158 }
14159 
14160 VkBool32 vmaTouchAllocation(
14161  VmaAllocator allocator,
14162  VmaAllocation allocation)
14163 {
14164  VMA_ASSERT(allocator && allocation);
14165 
14166  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14167 
14168 #if VMA_RECORDING_ENABLED
14169  if(allocator->GetRecorder() != VMA_NULL)
14170  {
14171  allocator->GetRecorder()->RecordTouchAllocation(
14172  allocator->GetCurrentFrameIndex(),
14173  allocation);
14174  }
14175 #endif
14176 
14177  return allocator->TouchAllocation(allocation);
14178 }
14179 
14181  VmaAllocator allocator,
14182  VmaAllocation allocation,
14183  void* pUserData)
14184 {
14185  VMA_ASSERT(allocator && allocation);
14186 
14187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14188 
14189  allocation->SetUserData(allocator, pUserData);
14190 
14191 #if VMA_RECORDING_ENABLED
14192  if(allocator->GetRecorder() != VMA_NULL)
14193  {
14194  allocator->GetRecorder()->RecordSetAllocationUserData(
14195  allocator->GetCurrentFrameIndex(),
14196  allocation,
14197  pUserData);
14198  }
14199 #endif
14200 }
14201 
14203  VmaAllocator allocator,
14204  VmaAllocation* pAllocation)
14205 {
14206  VMA_ASSERT(allocator && pAllocation);
14207 
14208  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14209 
14210  allocator->CreateLostAllocation(pAllocation);
14211 
14212 #if VMA_RECORDING_ENABLED
14213  if(allocator->GetRecorder() != VMA_NULL)
14214  {
14215  allocator->GetRecorder()->RecordCreateLostAllocation(
14216  allocator->GetCurrentFrameIndex(),
14217  *pAllocation);
14218  }
14219 #endif
14220 }
14221 
14222 VkResult vmaMapMemory(
14223  VmaAllocator allocator,
14224  VmaAllocation allocation,
14225  void** ppData)
14226 {
14227  VMA_ASSERT(allocator && allocation && ppData);
14228 
14229  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14230 
14231  VkResult res = allocator->Map(allocation, ppData);
14232 
14233 #if VMA_RECORDING_ENABLED
14234  if(allocator->GetRecorder() != VMA_NULL)
14235  {
14236  allocator->GetRecorder()->RecordMapMemory(
14237  allocator->GetCurrentFrameIndex(),
14238  allocation);
14239  }
14240 #endif
14241 
14242  return res;
14243 }
14244 
14245 void vmaUnmapMemory(
14246  VmaAllocator allocator,
14247  VmaAllocation allocation)
14248 {
14249  VMA_ASSERT(allocator && allocation);
14250 
14251  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14252 
14253 #if VMA_RECORDING_ENABLED
14254  if(allocator->GetRecorder() != VMA_NULL)
14255  {
14256  allocator->GetRecorder()->RecordUnmapMemory(
14257  allocator->GetCurrentFrameIndex(),
14258  allocation);
14259  }
14260 #endif
14261 
14262  allocator->Unmap(allocation);
14263 }
14264 
14265 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14266 {
14267  VMA_ASSERT(allocator && allocation);
14268 
14269  VMA_DEBUG_LOG("vmaFlushAllocation");
14270 
14271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14272 
14273  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14274 
14275 #if VMA_RECORDING_ENABLED
14276  if(allocator->GetRecorder() != VMA_NULL)
14277  {
14278  allocator->GetRecorder()->RecordFlushAllocation(
14279  allocator->GetCurrentFrameIndex(),
14280  allocation, offset, size);
14281  }
14282 #endif
14283 }
14284 
14285 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14286 {
14287  VMA_ASSERT(allocator && allocation);
14288 
14289  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14290 
14291  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14292 
14293  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14294 
14295 #if VMA_RECORDING_ENABLED
14296  if(allocator->GetRecorder() != VMA_NULL)
14297  {
14298  allocator->GetRecorder()->RecordInvalidateAllocation(
14299  allocator->GetCurrentFrameIndex(),
14300  allocation, offset, size);
14301  }
14302 #endif
14303 }
14304 
14305 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14306 {
14307  VMA_ASSERT(allocator);
14308 
14309  VMA_DEBUG_LOG("vmaCheckCorruption");
14310 
14311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14312 
14313  return allocator->CheckCorruption(memoryTypeBits);
14314 }
14315 
14316 VkResult vmaDefragment(
14317  VmaAllocator allocator,
14318  VmaAllocation* pAllocations,
14319  size_t allocationCount,
14320  VkBool32* pAllocationsChanged,
14321  const VmaDefragmentationInfo *pDefragmentationInfo,
14322  VmaDefragmentationStats* pDefragmentationStats)
14323 {
14324  VMA_ASSERT(allocator && pAllocations);
14325 
14326  VMA_DEBUG_LOG("vmaDefragment");
14327 
14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14329 
14330  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14331 }
14332 
14333 VkResult vmaBindBufferMemory(
14334  VmaAllocator allocator,
14335  VmaAllocation allocation,
14336  VkBuffer buffer)
14337 {
14338  VMA_ASSERT(allocator && allocation && buffer);
14339 
14340  VMA_DEBUG_LOG("vmaBindBufferMemory");
14341 
14342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14343 
14344  return allocator->BindBufferMemory(allocation, buffer);
14345 }
14346 
14347 VkResult vmaBindImageMemory(
14348  VmaAllocator allocator,
14349  VmaAllocation allocation,
14350  VkImage image)
14351 {
14352  VMA_ASSERT(allocator && allocation && image);
14353 
14354  VMA_DEBUG_LOG("vmaBindImageMemory");
14355 
14356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14357 
14358  return allocator->BindImageMemory(allocation, image);
14359 }
14360 
14361 VkResult vmaCreateBuffer(
14362  VmaAllocator allocator,
14363  const VkBufferCreateInfo* pBufferCreateInfo,
14364  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14365  VkBuffer* pBuffer,
14366  VmaAllocation* pAllocation,
14367  VmaAllocationInfo* pAllocationInfo)
14368 {
14369  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14370 
14371  if(pBufferCreateInfo->size == 0)
14372  {
14373  return VK_ERROR_VALIDATION_FAILED_EXT;
14374  }
14375 
14376  VMA_DEBUG_LOG("vmaCreateBuffer");
14377 
14378  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14379 
14380  *pBuffer = VK_NULL_HANDLE;
14381  *pAllocation = VK_NULL_HANDLE;
14382 
14383  // 1. Create VkBuffer.
14384  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14385  allocator->m_hDevice,
14386  pBufferCreateInfo,
14387  allocator->GetAllocationCallbacks(),
14388  pBuffer);
14389  if(res >= 0)
14390  {
14391  // 2. vkGetBufferMemoryRequirements.
14392  VkMemoryRequirements vkMemReq = {};
14393  bool requiresDedicatedAllocation = false;
14394  bool prefersDedicatedAllocation = false;
14395  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14396  requiresDedicatedAllocation, prefersDedicatedAllocation);
14397 
14398  // Make sure alignment requirements for specific buffer usages reported
14399  // in Physical Device Properties are included in alignment reported by memory requirements.
14400  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14401  {
14402  VMA_ASSERT(vkMemReq.alignment %
14403  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14404  }
14405  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14406  {
14407  VMA_ASSERT(vkMemReq.alignment %
14408  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14409  }
14410  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14411  {
14412  VMA_ASSERT(vkMemReq.alignment %
14413  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14414  }
14415 
14416  // 3. Allocate memory using allocator.
14417  res = allocator->AllocateMemory(
14418  vkMemReq,
14419  requiresDedicatedAllocation,
14420  prefersDedicatedAllocation,
14421  *pBuffer, // dedicatedBuffer
14422  VK_NULL_HANDLE, // dedicatedImage
14423  *pAllocationCreateInfo,
14424  VMA_SUBALLOCATION_TYPE_BUFFER,
14425  pAllocation);
14426 
14427 #if VMA_RECORDING_ENABLED
14428  if(allocator->GetRecorder() != VMA_NULL)
14429  {
14430  allocator->GetRecorder()->RecordCreateBuffer(
14431  allocator->GetCurrentFrameIndex(),
14432  *pBufferCreateInfo,
14433  *pAllocationCreateInfo,
14434  *pAllocation);
14435  }
14436 #endif
14437 
14438  if(res >= 0)
14439  {
14440  // 3. Bind buffer with memory.
14441  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14442  if(res >= 0)
14443  {
14444  // All steps succeeded.
14445  #if VMA_STATS_STRING_ENABLED
14446  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14447  #endif
14448  if(pAllocationInfo != VMA_NULL)
14449  {
14450  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14451  }
14452 
14453  return VK_SUCCESS;
14454  }
14455  allocator->FreeMemory(*pAllocation);
14456  *pAllocation = VK_NULL_HANDLE;
14457  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14458  *pBuffer = VK_NULL_HANDLE;
14459  return res;
14460  }
14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14462  *pBuffer = VK_NULL_HANDLE;
14463  return res;
14464  }
14465  return res;
14466 }
14467 
14468 void vmaDestroyBuffer(
14469  VmaAllocator allocator,
14470  VkBuffer buffer,
14471  VmaAllocation allocation)
14472 {
14473  VMA_ASSERT(allocator);
14474 
14475  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14476  {
14477  return;
14478  }
14479 
14480  VMA_DEBUG_LOG("vmaDestroyBuffer");
14481 
14482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14483 
14484 #if VMA_RECORDING_ENABLED
14485  if(allocator->GetRecorder() != VMA_NULL)
14486  {
14487  allocator->GetRecorder()->RecordDestroyBuffer(
14488  allocator->GetCurrentFrameIndex(),
14489  allocation);
14490  }
14491 #endif
14492 
14493  if(buffer != VK_NULL_HANDLE)
14494  {
14495  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14496  }
14497 
14498  if(allocation != VK_NULL_HANDLE)
14499  {
14500  allocator->FreeMemory(allocation);
14501  }
14502 }
14503 
14504 VkResult vmaCreateImage(
14505  VmaAllocator allocator,
14506  const VkImageCreateInfo* pImageCreateInfo,
14507  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14508  VkImage* pImage,
14509  VmaAllocation* pAllocation,
14510  VmaAllocationInfo* pAllocationInfo)
14511 {
14512  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14513 
14514  if(pImageCreateInfo->extent.width == 0 ||
14515  pImageCreateInfo->extent.height == 0 ||
14516  pImageCreateInfo->extent.depth == 0 ||
14517  pImageCreateInfo->mipLevels == 0 ||
14518  pImageCreateInfo->arrayLayers == 0)
14519  {
14520  return VK_ERROR_VALIDATION_FAILED_EXT;
14521  }
14522 
14523  VMA_DEBUG_LOG("vmaCreateImage");
14524 
14525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14526 
14527  *pImage = VK_NULL_HANDLE;
14528  *pAllocation = VK_NULL_HANDLE;
14529 
14530  // 1. Create VkImage.
14531  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14532  allocator->m_hDevice,
14533  pImageCreateInfo,
14534  allocator->GetAllocationCallbacks(),
14535  pImage);
14536  if(res >= 0)
14537  {
14538  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14539  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14540  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14541 
14542  // 2. Allocate memory using allocator.
14543  VkMemoryRequirements vkMemReq = {};
14544  bool requiresDedicatedAllocation = false;
14545  bool prefersDedicatedAllocation = false;
14546  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14547  requiresDedicatedAllocation, prefersDedicatedAllocation);
14548 
14549  res = allocator->AllocateMemory(
14550  vkMemReq,
14551  requiresDedicatedAllocation,
14552  prefersDedicatedAllocation,
14553  VK_NULL_HANDLE, // dedicatedBuffer
14554  *pImage, // dedicatedImage
14555  *pAllocationCreateInfo,
14556  suballocType,
14557  pAllocation);
14558 
14559 #if VMA_RECORDING_ENABLED
14560  if(allocator->GetRecorder() != VMA_NULL)
14561  {
14562  allocator->GetRecorder()->RecordCreateImage(
14563  allocator->GetCurrentFrameIndex(),
14564  *pImageCreateInfo,
14565  *pAllocationCreateInfo,
14566  *pAllocation);
14567  }
14568 #endif
14569 
14570  if(res >= 0)
14571  {
14572  // 3. Bind image with memory.
14573  res = allocator->BindImageMemory(*pAllocation, *pImage);
14574  if(res >= 0)
14575  {
14576  // All steps succeeded.
14577  #if VMA_STATS_STRING_ENABLED
14578  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14579  #endif
14580  if(pAllocationInfo != VMA_NULL)
14581  {
14582  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14583  }
14584 
14585  return VK_SUCCESS;
14586  }
14587  allocator->FreeMemory(*pAllocation);
14588  *pAllocation = VK_NULL_HANDLE;
14589  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14590  *pImage = VK_NULL_HANDLE;
14591  return res;
14592  }
14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14594  *pImage = VK_NULL_HANDLE;
14595  return res;
14596  }
14597  return res;
14598 }
14599 
14600 void vmaDestroyImage(
14601  VmaAllocator allocator,
14602  VkImage image,
14603  VmaAllocation allocation)
14604 {
14605  VMA_ASSERT(allocator);
14606 
14607  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14608  {
14609  return;
14610  }
14611 
14612  VMA_DEBUG_LOG("vmaDestroyImage");
14613 
14614  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14615 
14616 #if VMA_RECORDING_ENABLED
14617  if(allocator->GetRecorder() != VMA_NULL)
14618  {
14619  allocator->GetRecorder()->RecordDestroyImage(
14620  allocator->GetCurrentFrameIndex(),
14621  allocation);
14622  }
14623 #endif
14624 
14625  if(image != VK_NULL_HANDLE)
14626  {
14627  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14628  }
14629  if(allocation != VK_NULL_HANDLE)
14630  {
14631  allocator->FreeMemory(allocation);
14632  }
14633 }
14634 
14635 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1586
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1887
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1643
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1617
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2209
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1598
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1844
Definition: vk_mem_alloc.h:1947
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1590
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2309
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1640
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2579
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2098
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1487
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2190
Definition: vk_mem_alloc.h:1924
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1579
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1997
Definition: vk_mem_alloc.h:1871
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1652
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2126
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1705
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1637
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1875
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1777
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1595
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1776
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2583
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1669
VmaStatInfo total
Definition: vk_mem_alloc.h:1786
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2591
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1981
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2574
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1596
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1521
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1646
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2140
Definition: vk_mem_alloc.h:2134
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1712
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2319
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1591
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1615
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2018
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2160
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2196
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1577
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2143
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1822
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2569
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2587
Definition: vk_mem_alloc.h:1861
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2005
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1594
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1782
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1527
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1548
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1619
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1553
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2589
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1992
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2206
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1587
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1765
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2155
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1540
Definition: vk_mem_alloc.h:2130
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1931
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1778
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1544
Definition: vk_mem_alloc.h:1955
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2146
Definition: vk_mem_alloc.h:1870
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1593
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1987
Definition: vk_mem_alloc.h:1978
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1768
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1589
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2168
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1655
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2199
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1976
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2011
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1693
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1784
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1911
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1777
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1600
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1625
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1542
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1599
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2182
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1592
Definition: vk_mem_alloc.h:1942
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1633
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2333
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1649
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1777
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1774
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2187
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1951
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2314
Definition: vk_mem_alloc.h:1962
Definition: vk_mem_alloc.h:1974
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2585
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1585
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1772
Definition: vk_mem_alloc.h:1827
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2136
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1622
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1770
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1597
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1601
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1898
Definition: vk_mem_alloc.h:1969
Definition: vk_mem_alloc.h:1854
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2328
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1575
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1588
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2115
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2295
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1959
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2080
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1778
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1609
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1785
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2193
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1778
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2300