Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1477 /*
1478 Define this macro to 0/1 to disable/enable support for recording functionality,
1479 available through VmaAllocatorCreateInfo::pRecordSettings.
1480 */
1481 #ifndef VMA_RECORDING_ENABLED
1482  #ifdef _WIN32
1483  #define VMA_RECORDING_ENABLED 1
1484  #else
1485  #define VMA_RECORDING_ENABLED 0
1486  #endif
1487 #endif
1488 
1489 #ifndef NOMINMAX
1490  #define NOMINMAX // For windows.h
1491 #endif
1492 
1493 #include <vulkan/vulkan.h>
1494 
1495 #if VMA_RECORDING_ENABLED
1496  #include <windows.h>
1497 #endif
1498 
1499 #if !defined(VMA_DEDICATED_ALLOCATION)
1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1501  #define VMA_DEDICATED_ALLOCATION 1
1502  #else
1503  #define VMA_DEDICATED_ALLOCATION 0
1504  #endif
1505 #endif
1506 
1516 VK_DEFINE_HANDLE(VmaAllocator)
1517 
1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1520  VmaAllocator allocator,
1521  uint32_t memoryType,
1522  VkDeviceMemory memory,
1523  VkDeviceSize size);
1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1526  VmaAllocator allocator,
1527  uint32_t memoryType,
1528  VkDeviceMemory memory,
1529  VkDeviceSize size);
1530 
1544 
1574 
1577 typedef VkFlags VmaAllocatorCreateFlags;
1578 
1583 typedef struct VmaVulkanFunctions {
1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1586  PFN_vkAllocateMemory vkAllocateMemory;
1587  PFN_vkFreeMemory vkFreeMemory;
1588  PFN_vkMapMemory vkMapMemory;
1589  PFN_vkUnmapMemory vkUnmapMemory;
1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1592  PFN_vkBindBufferMemory vkBindBufferMemory;
1593  PFN_vkBindImageMemory vkBindImageMemory;
1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1596  PFN_vkCreateBuffer vkCreateBuffer;
1597  PFN_vkDestroyBuffer vkDestroyBuffer;
1598  PFN_vkCreateImage vkCreateImage;
1599  PFN_vkDestroyImage vkDestroyImage;
1600 #if VMA_DEDICATED_ALLOCATION
1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1603 #endif
1605 
1607 typedef enum VmaRecordFlagBits {
1614 
1617 typedef VkFlags VmaRecordFlags;
1618 
1620 typedef struct VmaRecordSettings
1621 {
1631  const char* pFilePath;
1633 
1636 {
1640 
1641  VkPhysicalDevice physicalDevice;
1643 
1644  VkDevice device;
1646 
1649 
1650  const VkAllocationCallbacks* pAllocationCallbacks;
1652 
1691  const VkDeviceSize* pHeapSizeLimit;
1712 
1714 VkResult vmaCreateAllocator(
1715  const VmaAllocatorCreateInfo* pCreateInfo,
1716  VmaAllocator* pAllocator);
1717 
1719 void vmaDestroyAllocator(
1720  VmaAllocator allocator);
1721 
1727  VmaAllocator allocator,
1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1729 
1735  VmaAllocator allocator,
1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1737 
1745  VmaAllocator allocator,
1746  uint32_t memoryTypeIndex,
1747  VkMemoryPropertyFlags* pFlags);
1748 
1758  VmaAllocator allocator,
1759  uint32_t frameIndex);
1760 
1763 typedef struct VmaStatInfo
1764 {
1766  uint32_t blockCount;
1772  VkDeviceSize usedBytes;
1774  VkDeviceSize unusedBytes;
1777 } VmaStatInfo;
1778 
1780 typedef struct VmaStats
1781 {
1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1785 } VmaStats;
1786 
1788 void vmaCalculateStats(
1789  VmaAllocator allocator,
1790  VmaStats* pStats);
1791 
1792 #define VMA_STATS_STRING_ENABLED 1
1793 
1794 #if VMA_STATS_STRING_ENABLED
1795 
1797 
1799 void vmaBuildStatsString(
1800  VmaAllocator allocator,
1801  char** ppStatsString,
1802  VkBool32 detailedMap);
1803 
1804 void vmaFreeStatsString(
1805  VmaAllocator allocator,
1806  char* pStatsString);
1807 
1808 #endif // #if VMA_STATS_STRING_ENABLED
1809 
1818 VK_DEFINE_HANDLE(VmaPool)
1819 
1820 typedef enum VmaMemoryUsage
1821 {
1870 } VmaMemoryUsage;
1871 
1886 
1941 
1954 
1964 
1971 
1975 
1977 {
1990  VkMemoryPropertyFlags requiredFlags;
1995  VkMemoryPropertyFlags preferredFlags;
2003  uint32_t memoryTypeBits;
2016  void* pUserData;
2018 
2035 VkResult vmaFindMemoryTypeIndex(
2036  VmaAllocator allocator,
2037  uint32_t memoryTypeBits,
2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2039  uint32_t* pMemoryTypeIndex);
2040 
2054  VmaAllocator allocator,
2055  const VkBufferCreateInfo* pBufferCreateInfo,
2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2057  uint32_t* pMemoryTypeIndex);
2058 
2072  VmaAllocator allocator,
2073  const VkImageCreateInfo* pImageCreateInfo,
2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2075  uint32_t* pMemoryTypeIndex);
2076 
2097 
2114 
2125 
2131 
2134 typedef VkFlags VmaPoolCreateFlags;
2135 
2138 typedef struct VmaPoolCreateInfo {
2153  VkDeviceSize blockSize;
2182 
2185 typedef struct VmaPoolStats {
2188  VkDeviceSize size;
2191  VkDeviceSize unusedSize;
2204  VkDeviceSize unusedRangeSizeMax;
2207  size_t blockCount;
2208 } VmaPoolStats;
2209 
2216 VkResult vmaCreatePool(
2217  VmaAllocator allocator,
2218  const VmaPoolCreateInfo* pCreateInfo,
2219  VmaPool* pPool);
2220 
2223 void vmaDestroyPool(
2224  VmaAllocator allocator,
2225  VmaPool pool);
2226 
2233 void vmaGetPoolStats(
2234  VmaAllocator allocator,
2235  VmaPool pool,
2236  VmaPoolStats* pPoolStats);
2237 
2245  VmaAllocator allocator,
2246  VmaPool pool,
2247  size_t* pLostAllocationCount);
2248 
2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2264 
2289 VK_DEFINE_HANDLE(VmaAllocation)
2290 
2291 
2293 typedef struct VmaAllocationInfo {
2298  uint32_t memoryType;
2307  VkDeviceMemory deviceMemory;
2312  VkDeviceSize offset;
2317  VkDeviceSize size;
2331  void* pUserData;
2333 
2344 VkResult vmaAllocateMemory(
2345  VmaAllocator allocator,
2346  const VkMemoryRequirements* pVkMemoryRequirements,
2347  const VmaAllocationCreateInfo* pCreateInfo,
2348  VmaAllocation* pAllocation,
2349  VmaAllocationInfo* pAllocationInfo);
2350 
2358  VmaAllocator allocator,
2359  VkBuffer buffer,
2360  const VmaAllocationCreateInfo* pCreateInfo,
2361  VmaAllocation* pAllocation,
2362  VmaAllocationInfo* pAllocationInfo);
2363 
2365 VkResult vmaAllocateMemoryForImage(
2366  VmaAllocator allocator,
2367  VkImage image,
2368  const VmaAllocationCreateInfo* pCreateInfo,
2369  VmaAllocation* pAllocation,
2370  VmaAllocationInfo* pAllocationInfo);
2371 
2373 void vmaFreeMemory(
2374  VmaAllocator allocator,
2375  VmaAllocation allocation);
2376 
2397 VkResult vmaResizeAllocation(
2398  VmaAllocator allocator,
2399  VmaAllocation allocation,
2400  VkDeviceSize newSize);
2401 
2419  VmaAllocator allocator,
2420  VmaAllocation allocation,
2421  VmaAllocationInfo* pAllocationInfo);
2422 
2437 VkBool32 vmaTouchAllocation(
2438  VmaAllocator allocator,
2439  VmaAllocation allocation);
2440 
2455  VmaAllocator allocator,
2456  VmaAllocation allocation,
2457  void* pUserData);
2458 
2470  VmaAllocator allocator,
2471  VmaAllocation* pAllocation);
2472 
2507 VkResult vmaMapMemory(
2508  VmaAllocator allocator,
2509  VmaAllocation allocation,
2510  void** ppData);
2511 
2516 void vmaUnmapMemory(
2517  VmaAllocator allocator,
2518  VmaAllocation allocation);
2519 
2532 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2533 
2546 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2547 
2564 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2565 
2567 typedef struct VmaDefragmentationInfo {
2572  VkDeviceSize maxBytesToMove;
2579 
2581 typedef struct VmaDefragmentationStats {
2583  VkDeviceSize bytesMoved;
2585  VkDeviceSize bytesFreed;
2591 
2630 VkResult vmaDefragment(
2631  VmaAllocator allocator,
2632  VmaAllocation* pAllocations,
2633  size_t allocationCount,
2634  VkBool32* pAllocationsChanged,
2635  const VmaDefragmentationInfo *pDefragmentationInfo,
2636  VmaDefragmentationStats* pDefragmentationStats);
2637 
2650 VkResult vmaBindBufferMemory(
2651  VmaAllocator allocator,
2652  VmaAllocation allocation,
2653  VkBuffer buffer);
2654 
2667 VkResult vmaBindImageMemory(
2668  VmaAllocator allocator,
2669  VmaAllocation allocation,
2670  VkImage image);
2671 
2698 VkResult vmaCreateBuffer(
2699  VmaAllocator allocator,
2700  const VkBufferCreateInfo* pBufferCreateInfo,
2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2702  VkBuffer* pBuffer,
2703  VmaAllocation* pAllocation,
2704  VmaAllocationInfo* pAllocationInfo);
2705 
2717 void vmaDestroyBuffer(
2718  VmaAllocator allocator,
2719  VkBuffer buffer,
2720  VmaAllocation allocation);
2721 
2723 VkResult vmaCreateImage(
2724  VmaAllocator allocator,
2725  const VkImageCreateInfo* pImageCreateInfo,
2726  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2727  VkImage* pImage,
2728  VmaAllocation* pAllocation,
2729  VmaAllocationInfo* pAllocationInfo);
2730 
2742 void vmaDestroyImage(
2743  VmaAllocator allocator,
2744  VkImage image,
2745  VmaAllocation allocation);
2746 
2747 #ifdef __cplusplus
2748 }
2749 #endif
2750 
2751 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2752 
2753 // For Visual Studio IntelliSense.
2754 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2755 #define VMA_IMPLEMENTATION
2756 #endif
2757 
2758 #ifdef VMA_IMPLEMENTATION
2759 #undef VMA_IMPLEMENTATION
2760 
2761 #include <cstdint>
2762 #include <cstdlib>
2763 #include <cstring>
2764 
2765 /*******************************************************************************
2766 CONFIGURATION SECTION
2767 
2768 Define some of these macros before each #include of this header or change them
2769 here if you need other then default behavior depending on your environment.
2770 */
2771 
2772 /*
2773 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2774 internally, like:
2775 
2776  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2777 
2778 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2779 VmaAllocatorCreateInfo::pVulkanFunctions.
2780 */
2781 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2782 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2783 #endif
2784 
2785 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2786 //#define VMA_USE_STL_CONTAINERS 1
2787 
2788 /* Set this macro to 1 to make the library including and using STL containers:
2789 std::pair, std::vector, std::list, std::unordered_map.
2790 
2791 Set it to 0 or undefined to make the library using its own implementation of
2792 the containers.
2793 */
2794 #if VMA_USE_STL_CONTAINERS
2795  #define VMA_USE_STL_VECTOR 1
2796  #define VMA_USE_STL_UNORDERED_MAP 1
2797  #define VMA_USE_STL_LIST 1
2798 #endif
2799 
2800 #if VMA_USE_STL_VECTOR
2801  #include <vector>
2802 #endif
2803 
2804 #if VMA_USE_STL_UNORDERED_MAP
2805  #include <unordered_map>
2806 #endif
2807 
2808 #if VMA_USE_STL_LIST
2809  #include <list>
2810 #endif
2811 
2812 /*
2813 Following headers are used in this CONFIGURATION section only, so feel free to
2814 remove them if not needed.
2815 */
2816 #include <cassert> // for assert
2817 #include <algorithm> // for min, max
2818 #include <mutex> // for std::mutex
2819 #include <atomic> // for std::atomic
2820 
2821 #ifndef VMA_NULL
2822  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2823  #define VMA_NULL nullptr
2824 #endif
2825 
2826 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2827 #include <cstdlib>
2828 void *aligned_alloc(size_t alignment, size_t size)
2829 {
2830  // alignment must be >= sizeof(void*)
2831  if(alignment < sizeof(void*))
2832  {
2833  alignment = sizeof(void*);
2834  }
2835 
2836  return memalign(alignment, size);
2837 }
2838 #elif defined(__APPLE__) || defined(__ANDROID__)
2839 #include <cstdlib>
2840 void *aligned_alloc(size_t alignment, size_t size)
2841 {
2842  // alignment must be >= sizeof(void*)
2843  if(alignment < sizeof(void*))
2844  {
2845  alignment = sizeof(void*);
2846  }
2847 
2848  void *pointer;
2849  if(posix_memalign(&pointer, alignment, size) == 0)
2850  return pointer;
2851  return VMA_NULL;
2852 }
2853 #endif
2854 
2855 // If your compiler is not compatible with C++11 and definition of
2856 // aligned_alloc() function is missing, uncommeting following line may help:
2857 
2858 //#include <malloc.h>
2859 
2860 // Normal assert to check for programmer's errors, especially in Debug configuration.
2861 #ifndef VMA_ASSERT
2862  #ifdef _DEBUG
2863  #define VMA_ASSERT(expr) assert(expr)
2864  #else
2865  #define VMA_ASSERT(expr)
2866  #endif
2867 #endif
2868 
2869 // Assert that will be called very often, like inside data structures e.g. operator[].
2870 // Making it non-empty can make program slow.
2871 #ifndef VMA_HEAVY_ASSERT
2872  #ifdef _DEBUG
2873  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2874  #else
2875  #define VMA_HEAVY_ASSERT(expr)
2876  #endif
2877 #endif
2878 
2879 #ifndef VMA_ALIGN_OF
2880  #define VMA_ALIGN_OF(type) (__alignof(type))
2881 #endif
2882 
2883 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2884  #if defined(_WIN32)
2885  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2886  #else
2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2888  #endif
2889 #endif
2890 
2891 #ifndef VMA_SYSTEM_FREE
2892  #if defined(_WIN32)
2893  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2894  #else
2895  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2896  #endif
2897 #endif
2898 
2899 #ifndef VMA_MIN
2900  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2901 #endif
2902 
2903 #ifndef VMA_MAX
2904  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2905 #endif
2906 
2907 #ifndef VMA_SWAP
2908  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2909 #endif
2910 
2911 #ifndef VMA_SORT
2912  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2913 #endif
2914 
2915 #ifndef VMA_DEBUG_LOG
2916  #define VMA_DEBUG_LOG(format, ...)
2917  /*
2918  #define VMA_DEBUG_LOG(format, ...) do { \
2919  printf(format, __VA_ARGS__); \
2920  printf("\n"); \
2921  } while(false)
2922  */
2923 #endif
2924 
2925 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2926 #if VMA_STATS_STRING_ENABLED
2927  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2928  {
2929  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2930  }
2931  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2932  {
2933  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2934  }
2935  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2936  {
2937  snprintf(outStr, strLen, "%p", ptr);
2938  }
2939 #endif
2940 
2941 #ifndef VMA_MUTEX
2942  class VmaMutex
2943  {
2944  public:
2945  VmaMutex() { }
2946  ~VmaMutex() { }
2947  void Lock() { m_Mutex.lock(); }
2948  void Unlock() { m_Mutex.unlock(); }
2949  private:
2950  std::mutex m_Mutex;
2951  };
2952  #define VMA_MUTEX VmaMutex
2953 #endif
2954 
2955 /*
2956 If providing your own implementation, you need to implement a subset of std::atomic:
2957 
2958 - Constructor(uint32_t desired)
2959 - uint32_t load() const
2960 - void store(uint32_t desired)
2961 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2962 */
2963 #ifndef VMA_ATOMIC_UINT32
2964  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2965 #endif
2966 
2967 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2968 
2972  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2973 #endif
2974 
2975 #ifndef VMA_DEBUG_ALIGNMENT
2976 
2980  #define VMA_DEBUG_ALIGNMENT (1)
2981 #endif
2982 
2983 #ifndef VMA_DEBUG_MARGIN
2984 
2988  #define VMA_DEBUG_MARGIN (0)
2989 #endif
2990 
2991 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2992 
2996  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2997 #endif
2998 
2999 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3000 
3005  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3006 #endif
3007 
3008 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3009 
3013  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3014 #endif
3015 
3016 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3017 
3021  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3022 #endif
3023 
3024 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3025  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3027 #endif
3028 
3029 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3030  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3032 #endif
3033 
3034 #ifndef VMA_CLASS_NO_COPY
3035  #define VMA_CLASS_NO_COPY(className) \
3036  private: \
3037  className(const className&) = delete; \
3038  className& operator=(const className&) = delete;
3039 #endif
3040 
3041 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3042 
3043 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3044 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3045 
3046 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3047 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3048 
3049 /*******************************************************************************
3050 END OF CONFIGURATION
3051 */
3052 
3053 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3054  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3055 
3056 // Returns number of bits set to 1 in (v).
3057 static inline uint32_t VmaCountBitsSet(uint32_t v)
3058 {
3059  uint32_t c = v - ((v >> 1) & 0x55555555);
3060  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3061  c = ((c >> 4) + c) & 0x0F0F0F0F;
3062  c = ((c >> 8) + c) & 0x00FF00FF;
3063  c = ((c >> 16) + c) & 0x0000FFFF;
3064  return c;
3065 }
3066 
3067 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3068 // Use types like uint32_t, uint64_t as T.
3069 template <typename T>
3070 static inline T VmaAlignUp(T val, T align)
3071 {
3072  return (val + align - 1) / align * align;
3073 }
3074 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3075 // Use types like uint32_t, uint64_t as T.
3076 template <typename T>
3077 static inline T VmaAlignDown(T val, T align)
3078 {
3079  return val / align * align;
3080 }
3081 
3082 // Division with mathematical rounding to nearest number.
3083 template <typename T>
3084 static inline T VmaRoundDiv(T x, T y)
3085 {
3086  return (x + (y / (T)2)) / y;
3087 }
3088 
3089 /*
3090 Returns true if given number is a power of two.
3091 T must be unsigned integer number or signed integer but always nonnegative.
3092 For 0 returns true.
3093 */
3094 template <typename T>
3095 inline bool VmaIsPow2(T x)
3096 {
3097  return (x & (x-1)) == 0;
3098 }
3099 
3100 // Returns smallest power of 2 greater or equal to v.
3101 static inline uint32_t VmaNextPow2(uint32_t v)
3102 {
3103  v--;
3104  v |= v >> 1;
3105  v |= v >> 2;
3106  v |= v >> 4;
3107  v |= v >> 8;
3108  v |= v >> 16;
3109  v++;
3110  return v;
3111 }
3112 static inline uint64_t VmaNextPow2(uint64_t v)
3113 {
3114  v--;
3115  v |= v >> 1;
3116  v |= v >> 2;
3117  v |= v >> 4;
3118  v |= v >> 8;
3119  v |= v >> 16;
3120  v |= v >> 32;
3121  v++;
3122  return v;
3123 }
3124 
3125 // Returns largest power of 2 less or equal to v.
3126 static inline uint32_t VmaPrevPow2(uint32_t v)
3127 {
3128  v |= v >> 1;
3129  v |= v >> 2;
3130  v |= v >> 4;
3131  v |= v >> 8;
3132  v |= v >> 16;
3133  v = v ^ (v >> 1);
3134  return v;
3135 }
3136 static inline uint64_t VmaPrevPow2(uint64_t v)
3137 {
3138  v |= v >> 1;
3139  v |= v >> 2;
3140  v |= v >> 4;
3141  v |= v >> 8;
3142  v |= v >> 16;
3143  v |= v >> 32;
3144  v = v ^ (v >> 1);
3145  return v;
3146 }
3147 
3148 static inline bool VmaStrIsEmpty(const char* pStr)
3149 {
3150  return pStr == VMA_NULL || *pStr == '\0';
3151 }
3152 
3153 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3154 {
3155  switch(algorithm)
3156  {
3158  return "Linear";
3160  return "Buddy";
3161  case 0:
3162  return "Default";
3163  default:
3164  VMA_ASSERT(0);
3165  return "";
3166  }
3167 }
3168 
3169 #ifndef VMA_SORT
3170 
3171 template<typename Iterator, typename Compare>
3172 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3173 {
3174  Iterator centerValue = end; --centerValue;
3175  Iterator insertIndex = beg;
3176  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3177  {
3178  if(cmp(*memTypeIndex, *centerValue))
3179  {
3180  if(insertIndex != memTypeIndex)
3181  {
3182  VMA_SWAP(*memTypeIndex, *insertIndex);
3183  }
3184  ++insertIndex;
3185  }
3186  }
3187  if(insertIndex != centerValue)
3188  {
3189  VMA_SWAP(*insertIndex, *centerValue);
3190  }
3191  return insertIndex;
3192 }
3193 
3194 template<typename Iterator, typename Compare>
3195 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3196 {
3197  if(beg < end)
3198  {
3199  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3200  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3201  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3202  }
3203 }
3204 
3205 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3206 
3207 #endif // #ifndef VMA_SORT
3208 
3209 /*
3210 Returns true if two memory blocks occupy overlapping pages.
3211 ResourceA must be in less memory offset than ResourceB.
3212 
3213 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3214 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3215 */
3216 static inline bool VmaBlocksOnSamePage(
3217  VkDeviceSize resourceAOffset,
3218  VkDeviceSize resourceASize,
3219  VkDeviceSize resourceBOffset,
3220  VkDeviceSize pageSize)
3221 {
3222  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3223  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3224  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3225  VkDeviceSize resourceBStart = resourceBOffset;
3226  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3227  return resourceAEndPage == resourceBStartPage;
3228 }
3229 
3230 enum VmaSuballocationType
3231 {
3232  VMA_SUBALLOCATION_TYPE_FREE = 0,
3233  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3234  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3235  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3236  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3237  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3238  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3239 };
3240 
3241 /*
3242 Returns true if given suballocation types could conflict and must respect
3243 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3244 or linear image and another one is optimal image. If type is unknown, behave
3245 conservatively.
3246 */
3247 static inline bool VmaIsBufferImageGranularityConflict(
3248  VmaSuballocationType suballocType1,
3249  VmaSuballocationType suballocType2)
3250 {
3251  if(suballocType1 > suballocType2)
3252  {
3253  VMA_SWAP(suballocType1, suballocType2);
3254  }
3255 
3256  switch(suballocType1)
3257  {
3258  case VMA_SUBALLOCATION_TYPE_FREE:
3259  return false;
3260  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3261  return true;
3262  case VMA_SUBALLOCATION_TYPE_BUFFER:
3263  return
3264  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3266  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3267  return
3268  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3271  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3272  return
3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3274  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3275  return false;
3276  default:
3277  VMA_ASSERT(0);
3278  return true;
3279  }
3280 }
3281 
3282 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3283 {
3284  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3285  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3286  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3287  {
3288  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3289  }
3290 }
3291 
3292 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3293 {
3294  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3295  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3296  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3297  {
3298  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3299  {
3300  return false;
3301  }
3302  }
3303  return true;
3304 }
3305 
3306 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3307 struct VmaMutexLock
3308 {
3309  VMA_CLASS_NO_COPY(VmaMutexLock)
3310 public:
3311  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3312  m_pMutex(useMutex ? &mutex : VMA_NULL)
3313  {
3314  if(m_pMutex)
3315  {
3316  m_pMutex->Lock();
3317  }
3318  }
3319 
3320  ~VmaMutexLock()
3321  {
3322  if(m_pMutex)
3323  {
3324  m_pMutex->Unlock();
3325  }
3326  }
3327 
3328 private:
3329  VMA_MUTEX* m_pMutex;
3330 };
3331 
3332 #if VMA_DEBUG_GLOBAL_MUTEX
3333  static VMA_MUTEX gDebugGlobalMutex;
3334  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3335 #else
3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3337 #endif
3338 
3339 // Minimum size of a free suballocation to register it in the free suballocation collection.
3340 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3341 
3342 /*
3343 Performs binary search and returns iterator to first element that is greater or
3344 equal to (key), according to comparison (cmp).
3345 
3346 Cmp should return true if first argument is less than second argument.
3347 
3348 Returned value is the found element, if present in the collection or place where
3349 new element with value (key) should be inserted.
3350 */
3351 template <typename CmpLess, typename IterT, typename KeyT>
3352 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3353 {
3354  size_t down = 0, up = (end - beg);
3355  while(down < up)
3356  {
3357  const size_t mid = (down + up) / 2;
3358  if(cmp(*(beg+mid), key))
3359  {
3360  down = mid + 1;
3361  }
3362  else
3363  {
3364  up = mid;
3365  }
3366  }
3367  return beg + down;
3368 }
3369 
3371 // Memory allocation
3372 
3373 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3374 {
3375  if((pAllocationCallbacks != VMA_NULL) &&
3376  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3377  {
3378  return (*pAllocationCallbacks->pfnAllocation)(
3379  pAllocationCallbacks->pUserData,
3380  size,
3381  alignment,
3382  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3383  }
3384  else
3385  {
3386  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3387  }
3388 }
3389 
3390 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3391 {
3392  if((pAllocationCallbacks != VMA_NULL) &&
3393  (pAllocationCallbacks->pfnFree != VMA_NULL))
3394  {
3395  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3396  }
3397  else
3398  {
3399  VMA_SYSTEM_FREE(ptr);
3400  }
3401 }
3402 
3403 template<typename T>
3404 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3405 {
3406  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3407 }
3408 
3409 template<typename T>
3410 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3411 {
3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3413 }
3414 
3415 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3416 
3417 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3418 
3419 template<typename T>
3420 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3421 {
3422  ptr->~T();
3423  VmaFree(pAllocationCallbacks, ptr);
3424 }
3425 
3426 template<typename T>
3427 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3428 {
3429  if(ptr != VMA_NULL)
3430  {
3431  for(size_t i = count; i--; )
3432  {
3433  ptr[i].~T();
3434  }
3435  VmaFree(pAllocationCallbacks, ptr);
3436  }
3437 }
3438 
3439 // STL-compatible allocator.
3440 template<typename T>
3441 class VmaStlAllocator
3442 {
3443 public:
3444  const VkAllocationCallbacks* const m_pCallbacks;
3445  typedef T value_type;
3446 
3447  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3448  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3449 
3450  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3451  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3452 
3453  template<typename U>
3454  bool operator==(const VmaStlAllocator<U>& rhs) const
3455  {
3456  return m_pCallbacks == rhs.m_pCallbacks;
3457  }
3458  template<typename U>
3459  bool operator!=(const VmaStlAllocator<U>& rhs) const
3460  {
3461  return m_pCallbacks != rhs.m_pCallbacks;
3462  }
3463 
3464  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3465 };
3466 
3467 #if VMA_USE_STL_VECTOR
3468 
3469 #define VmaVector std::vector
3470 
3471 template<typename T, typename allocatorT>
3472 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3473 {
3474  vec.insert(vec.begin() + index, item);
3475 }
3476 
3477 template<typename T, typename allocatorT>
3478 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3479 {
3480  vec.erase(vec.begin() + index);
3481 }
3482 
3483 #else // #if VMA_USE_STL_VECTOR
3484 
3485 /* Class with interface compatible with subset of std::vector.
3486 T must be POD because constructors and destructors are not called and memcpy is
3487 used for these objects. */
3488 template<typename T, typename AllocatorT>
3489 class VmaVector
3490 {
3491 public:
3492  typedef T value_type;
3493 
3494  VmaVector(const AllocatorT& allocator) :
3495  m_Allocator(allocator),
3496  m_pArray(VMA_NULL),
3497  m_Count(0),
3498  m_Capacity(0)
3499  {
3500  }
3501 
3502  VmaVector(size_t count, const AllocatorT& allocator) :
3503  m_Allocator(allocator),
3504  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3505  m_Count(count),
3506  m_Capacity(count)
3507  {
3508  }
3509 
3510  VmaVector(const VmaVector<T, AllocatorT>& src) :
3511  m_Allocator(src.m_Allocator),
3512  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3513  m_Count(src.m_Count),
3514  m_Capacity(src.m_Count)
3515  {
3516  if(m_Count != 0)
3517  {
3518  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3519  }
3520  }
3521 
3522  ~VmaVector()
3523  {
3524  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3525  }
3526 
3527  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3528  {
3529  if(&rhs != this)
3530  {
3531  resize(rhs.m_Count);
3532  if(m_Count != 0)
3533  {
3534  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3535  }
3536  }
3537  return *this;
3538  }
3539 
3540  bool empty() const { return m_Count == 0; }
3541  size_t size() const { return m_Count; }
3542  T* data() { return m_pArray; }
3543  const T* data() const { return m_pArray; }
3544 
3545  T& operator[](size_t index)
3546  {
3547  VMA_HEAVY_ASSERT(index < m_Count);
3548  return m_pArray[index];
3549  }
3550  const T& operator[](size_t index) const
3551  {
3552  VMA_HEAVY_ASSERT(index < m_Count);
3553  return m_pArray[index];
3554  }
3555 
3556  T& front()
3557  {
3558  VMA_HEAVY_ASSERT(m_Count > 0);
3559  return m_pArray[0];
3560  }
3561  const T& front() const
3562  {
3563  VMA_HEAVY_ASSERT(m_Count > 0);
3564  return m_pArray[0];
3565  }
3566  T& back()
3567  {
3568  VMA_HEAVY_ASSERT(m_Count > 0);
3569  return m_pArray[m_Count - 1];
3570  }
3571  const T& back() const
3572  {
3573  VMA_HEAVY_ASSERT(m_Count > 0);
3574  return m_pArray[m_Count - 1];
3575  }
3576 
3577  void reserve(size_t newCapacity, bool freeMemory = false)
3578  {
3579  newCapacity = VMA_MAX(newCapacity, m_Count);
3580 
3581  if((newCapacity < m_Capacity) && !freeMemory)
3582  {
3583  newCapacity = m_Capacity;
3584  }
3585 
3586  if(newCapacity != m_Capacity)
3587  {
3588  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3589  if(m_Count != 0)
3590  {
3591  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3592  }
3593  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3594  m_Capacity = newCapacity;
3595  m_pArray = newArray;
3596  }
3597  }
3598 
3599  void resize(size_t newCount, bool freeMemory = false)
3600  {
3601  size_t newCapacity = m_Capacity;
3602  if(newCount > m_Capacity)
3603  {
3604  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3605  }
3606  else if(freeMemory)
3607  {
3608  newCapacity = newCount;
3609  }
3610 
3611  if(newCapacity != m_Capacity)
3612  {
3613  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3614  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3615  if(elementsToCopy != 0)
3616  {
3617  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3618  }
3619  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3620  m_Capacity = newCapacity;
3621  m_pArray = newArray;
3622  }
3623 
3624  m_Count = newCount;
3625  }
3626 
3627  void clear(bool freeMemory = false)
3628  {
3629  resize(0, freeMemory);
3630  }
3631 
3632  void insert(size_t index, const T& src)
3633  {
3634  VMA_HEAVY_ASSERT(index <= m_Count);
3635  const size_t oldCount = size();
3636  resize(oldCount + 1);
3637  if(index < oldCount)
3638  {
3639  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3640  }
3641  m_pArray[index] = src;
3642  }
3643 
3644  void remove(size_t index)
3645  {
3646  VMA_HEAVY_ASSERT(index < m_Count);
3647  const size_t oldCount = size();
3648  if(index < oldCount - 1)
3649  {
3650  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3651  }
3652  resize(oldCount - 1);
3653  }
3654 
3655  void push_back(const T& src)
3656  {
3657  const size_t newIndex = size();
3658  resize(newIndex + 1);
3659  m_pArray[newIndex] = src;
3660  }
3661 
3662  void pop_back()
3663  {
3664  VMA_HEAVY_ASSERT(m_Count > 0);
3665  resize(size() - 1);
3666  }
3667 
3668  void push_front(const T& src)
3669  {
3670  insert(0, src);
3671  }
3672 
3673  void pop_front()
3674  {
3675  VMA_HEAVY_ASSERT(m_Count > 0);
3676  remove(0);
3677  }
3678 
3679  typedef T* iterator;
3680 
3681  iterator begin() { return m_pArray; }
3682  iterator end() { return m_pArray + m_Count; }
3683 
3684 private:
3685  AllocatorT m_Allocator;
3686  T* m_pArray;
3687  size_t m_Count;
3688  size_t m_Capacity;
3689 };
3690 
3691 template<typename T, typename allocatorT>
3692 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3693 {
3694  vec.insert(index, item);
3695 }
3696 
3697 template<typename T, typename allocatorT>
3698 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3699 {
3700  vec.remove(index);
3701 }
3702 
3703 #endif // #if VMA_USE_STL_VECTOR
3704 
3705 template<typename CmpLess, typename VectorT>
3706 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3707 {
3708  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3709  vector.data(),
3710  vector.data() + vector.size(),
3711  value,
3712  CmpLess()) - vector.data();
3713  VmaVectorInsert(vector, indexToInsert, value);
3714  return indexToInsert;
3715 }
3716 
3717 template<typename CmpLess, typename VectorT>
3718 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3719 {
3720  CmpLess comparator;
3721  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3722  vector.begin(),
3723  vector.end(),
3724  value,
3725  comparator);
3726  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3727  {
3728  size_t indexToRemove = it - vector.begin();
3729  VmaVectorRemove(vector, indexToRemove);
3730  return true;
3731  }
3732  return false;
3733 }
3734 
3735 template<typename CmpLess, typename IterT, typename KeyT>
3736 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3737 {
3738  CmpLess comparator;
3739  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3740  beg, end, value, comparator);
3741  if(it == end ||
3742  (!comparator(*it, value) && !comparator(value, *it)))
3743  {
3744  return it;
3745  }
3746  return end;
3747 }
3748 
3750 // class VmaPoolAllocator
3751 
3752 /*
3753 Allocator for objects of type T using a list of arrays (pools) to speed up
3754 allocation. Number of elements that can be allocated is not bounded because
3755 allocator can create multiple blocks.
3756 */
3757 template<typename T>
3758 class VmaPoolAllocator
3759 {
3760  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3761 public:
3762  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3763  ~VmaPoolAllocator();
3764  void Clear();
3765  T* Alloc();
3766  void Free(T* ptr);
3767 
3768 private:
3769  union Item
3770  {
3771  uint32_t NextFreeIndex;
3772  T Value;
3773  };
3774 
3775  struct ItemBlock
3776  {
3777  Item* pItems;
3778  uint32_t FirstFreeIndex;
3779  };
3780 
3781  const VkAllocationCallbacks* m_pAllocationCallbacks;
3782  size_t m_ItemsPerBlock;
3783  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3784 
3785  ItemBlock& CreateNewBlock();
3786 };
3787 
3788 template<typename T>
3789 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3790  m_pAllocationCallbacks(pAllocationCallbacks),
3791  m_ItemsPerBlock(itemsPerBlock),
3792  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3793 {
3794  VMA_ASSERT(itemsPerBlock > 0);
3795 }
3796 
3797 template<typename T>
3798 VmaPoolAllocator<T>::~VmaPoolAllocator()
3799 {
3800  Clear();
3801 }
3802 
3803 template<typename T>
3804 void VmaPoolAllocator<T>::Clear()
3805 {
3806  for(size_t i = m_ItemBlocks.size(); i--; )
3807  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3808  m_ItemBlocks.clear();
3809 }
3810 
3811 template<typename T>
3812 T* VmaPoolAllocator<T>::Alloc()
3813 {
3814  for(size_t i = m_ItemBlocks.size(); i--; )
3815  {
3816  ItemBlock& block = m_ItemBlocks[i];
3817  // This block has some free items: Use first one.
3818  if(block.FirstFreeIndex != UINT32_MAX)
3819  {
3820  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3821  block.FirstFreeIndex = pItem->NextFreeIndex;
3822  return &pItem->Value;
3823  }
3824  }
3825 
3826  // No block has free item: Create new one and use it.
3827  ItemBlock& newBlock = CreateNewBlock();
3828  Item* const pItem = &newBlock.pItems[0];
3829  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3830  return &pItem->Value;
3831 }
3832 
3833 template<typename T>
3834 void VmaPoolAllocator<T>::Free(T* ptr)
3835 {
3836  // Search all memory blocks to find ptr.
3837  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3838  {
3839  ItemBlock& block = m_ItemBlocks[i];
3840 
3841  // Casting to union.
3842  Item* pItemPtr;
3843  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3844 
3845  // Check if pItemPtr is in address range of this block.
3846  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3847  {
3848  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3849  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3850  block.FirstFreeIndex = index;
3851  return;
3852  }
3853  }
3854  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3855 }
3856 
3857 template<typename T>
3858 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3859 {
3860  ItemBlock newBlock = {
3861  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3862 
3863  m_ItemBlocks.push_back(newBlock);
3864 
3865  // Setup singly-linked list of all free items in this block.
3866  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3867  newBlock.pItems[i].NextFreeIndex = i + 1;
3868  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3869  return m_ItemBlocks.back();
3870 }
3871 
3873 // class VmaRawList, VmaList
3874 
3875 #if VMA_USE_STL_LIST
3876 
3877 #define VmaList std::list
3878 
3879 #else // #if VMA_USE_STL_LIST
3880 
3881 template<typename T>
3882 struct VmaListItem
3883 {
3884  VmaListItem* pPrev;
3885  VmaListItem* pNext;
3886  T Value;
3887 };
3888 
3889 // Doubly linked list.
3890 template<typename T>
3891 class VmaRawList
3892 {
3893  VMA_CLASS_NO_COPY(VmaRawList)
3894 public:
3895  typedef VmaListItem<T> ItemType;
3896 
3897  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3898  ~VmaRawList();
3899  void Clear();
3900 
3901  size_t GetCount() const { return m_Count; }
3902  bool IsEmpty() const { return m_Count == 0; }
3903 
3904  ItemType* Front() { return m_pFront; }
3905  const ItemType* Front() const { return m_pFront; }
3906  ItemType* Back() { return m_pBack; }
3907  const ItemType* Back() const { return m_pBack; }
3908 
3909  ItemType* PushBack();
3910  ItemType* PushFront();
3911  ItemType* PushBack(const T& value);
3912  ItemType* PushFront(const T& value);
3913  void PopBack();
3914  void PopFront();
3915 
3916  // Item can be null - it means PushBack.
3917  ItemType* InsertBefore(ItemType* pItem);
3918  // Item can be null - it means PushFront.
3919  ItemType* InsertAfter(ItemType* pItem);
3920 
3921  ItemType* InsertBefore(ItemType* pItem, const T& value);
3922  ItemType* InsertAfter(ItemType* pItem, const T& value);
3923 
3924  void Remove(ItemType* pItem);
3925 
3926 private:
3927  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3928  VmaPoolAllocator<ItemType> m_ItemAllocator;
3929  ItemType* m_pFront;
3930  ItemType* m_pBack;
3931  size_t m_Count;
3932 };
3933 
3934 template<typename T>
3935 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3936  m_pAllocationCallbacks(pAllocationCallbacks),
3937  m_ItemAllocator(pAllocationCallbacks, 128),
3938  m_pFront(VMA_NULL),
3939  m_pBack(VMA_NULL),
3940  m_Count(0)
3941 {
3942 }
3943 
3944 template<typename T>
3945 VmaRawList<T>::~VmaRawList()
3946 {
3947  // Intentionally not calling Clear, because that would be unnecessary
3948  // computations to return all items to m_ItemAllocator as free.
3949 }
3950 
3951 template<typename T>
3952 void VmaRawList<T>::Clear()
3953 {
3954  if(IsEmpty() == false)
3955  {
3956  ItemType* pItem = m_pBack;
3957  while(pItem != VMA_NULL)
3958  {
3959  ItemType* const pPrevItem = pItem->pPrev;
3960  m_ItemAllocator.Free(pItem);
3961  pItem = pPrevItem;
3962  }
3963  m_pFront = VMA_NULL;
3964  m_pBack = VMA_NULL;
3965  m_Count = 0;
3966  }
3967 }
3968 
3969 template<typename T>
3970 VmaListItem<T>* VmaRawList<T>::PushBack()
3971 {
3972  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3973  pNewItem->pNext = VMA_NULL;
3974  if(IsEmpty())
3975  {
3976  pNewItem->pPrev = VMA_NULL;
3977  m_pFront = pNewItem;
3978  m_pBack = pNewItem;
3979  m_Count = 1;
3980  }
3981  else
3982  {
3983  pNewItem->pPrev = m_pBack;
3984  m_pBack->pNext = pNewItem;
3985  m_pBack = pNewItem;
3986  ++m_Count;
3987  }
3988  return pNewItem;
3989 }
3990 
3991 template<typename T>
3992 VmaListItem<T>* VmaRawList<T>::PushFront()
3993 {
3994  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3995  pNewItem->pPrev = VMA_NULL;
3996  if(IsEmpty())
3997  {
3998  pNewItem->pNext = VMA_NULL;
3999  m_pFront = pNewItem;
4000  m_pBack = pNewItem;
4001  m_Count = 1;
4002  }
4003  else
4004  {
4005  pNewItem->pNext = m_pFront;
4006  m_pFront->pPrev = pNewItem;
4007  m_pFront = pNewItem;
4008  ++m_Count;
4009  }
4010  return pNewItem;
4011 }
4012 
4013 template<typename T>
4014 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4015 {
4016  ItemType* const pNewItem = PushBack();
4017  pNewItem->Value = value;
4018  return pNewItem;
4019 }
4020 
4021 template<typename T>
4022 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4023 {
4024  ItemType* const pNewItem = PushFront();
4025  pNewItem->Value = value;
4026  return pNewItem;
4027 }
4028 
4029 template<typename T>
4030 void VmaRawList<T>::PopBack()
4031 {
4032  VMA_HEAVY_ASSERT(m_Count > 0);
4033  ItemType* const pBackItem = m_pBack;
4034  ItemType* const pPrevItem = pBackItem->pPrev;
4035  if(pPrevItem != VMA_NULL)
4036  {
4037  pPrevItem->pNext = VMA_NULL;
4038  }
4039  m_pBack = pPrevItem;
4040  m_ItemAllocator.Free(pBackItem);
4041  --m_Count;
4042 }
4043 
4044 template<typename T>
4045 void VmaRawList<T>::PopFront()
4046 {
4047  VMA_HEAVY_ASSERT(m_Count > 0);
4048  ItemType* const pFrontItem = m_pFront;
4049  ItemType* const pNextItem = pFrontItem->pNext;
4050  if(pNextItem != VMA_NULL)
4051  {
4052  pNextItem->pPrev = VMA_NULL;
4053  }
4054  m_pFront = pNextItem;
4055  m_ItemAllocator.Free(pFrontItem);
4056  --m_Count;
4057 }
4058 
4059 template<typename T>
4060 void VmaRawList<T>::Remove(ItemType* pItem)
4061 {
4062  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4063  VMA_HEAVY_ASSERT(m_Count > 0);
4064 
4065  if(pItem->pPrev != VMA_NULL)
4066  {
4067  pItem->pPrev->pNext = pItem->pNext;
4068  }
4069  else
4070  {
4071  VMA_HEAVY_ASSERT(m_pFront == pItem);
4072  m_pFront = pItem->pNext;
4073  }
4074 
4075  if(pItem->pNext != VMA_NULL)
4076  {
4077  pItem->pNext->pPrev = pItem->pPrev;
4078  }
4079  else
4080  {
4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
4082  m_pBack = pItem->pPrev;
4083  }
4084 
4085  m_ItemAllocator.Free(pItem);
4086  --m_Count;
4087 }
4088 
4089 template<typename T>
4090 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4091 {
4092  if(pItem != VMA_NULL)
4093  {
4094  ItemType* const prevItem = pItem->pPrev;
4095  ItemType* const newItem = m_ItemAllocator.Alloc();
4096  newItem->pPrev = prevItem;
4097  newItem->pNext = pItem;
4098  pItem->pPrev = newItem;
4099  if(prevItem != VMA_NULL)
4100  {
4101  prevItem->pNext = newItem;
4102  }
4103  else
4104  {
4105  VMA_HEAVY_ASSERT(m_pFront == pItem);
4106  m_pFront = newItem;
4107  }
4108  ++m_Count;
4109  return newItem;
4110  }
4111  else
4112  return PushBack();
4113 }
4114 
4115 template<typename T>
4116 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4117 {
4118  if(pItem != VMA_NULL)
4119  {
4120  ItemType* const nextItem = pItem->pNext;
4121  ItemType* const newItem = m_ItemAllocator.Alloc();
4122  newItem->pNext = nextItem;
4123  newItem->pPrev = pItem;
4124  pItem->pNext = newItem;
4125  if(nextItem != VMA_NULL)
4126  {
4127  nextItem->pPrev = newItem;
4128  }
4129  else
4130  {
4131  VMA_HEAVY_ASSERT(m_pBack == pItem);
4132  m_pBack = newItem;
4133  }
4134  ++m_Count;
4135  return newItem;
4136  }
4137  else
4138  return PushFront();
4139 }
4140 
4141 template<typename T>
4142 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4143 {
4144  ItemType* const newItem = InsertBefore(pItem);
4145  newItem->Value = value;
4146  return newItem;
4147 }
4148 
4149 template<typename T>
4150 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4151 {
4152  ItemType* const newItem = InsertAfter(pItem);
4153  newItem->Value = value;
4154  return newItem;
4155 }
4156 
4157 template<typename T, typename AllocatorT>
4158 class VmaList
4159 {
4160  VMA_CLASS_NO_COPY(VmaList)
4161 public:
4162  class iterator
4163  {
4164  public:
4165  iterator() :
4166  m_pList(VMA_NULL),
4167  m_pItem(VMA_NULL)
4168  {
4169  }
4170 
4171  T& operator*() const
4172  {
4173  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4174  return m_pItem->Value;
4175  }
4176  T* operator->() const
4177  {
4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4179  return &m_pItem->Value;
4180  }
4181 
4182  iterator& operator++()
4183  {
4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4185  m_pItem = m_pItem->pNext;
4186  return *this;
4187  }
4188  iterator& operator--()
4189  {
4190  if(m_pItem != VMA_NULL)
4191  {
4192  m_pItem = m_pItem->pPrev;
4193  }
4194  else
4195  {
4196  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4197  m_pItem = m_pList->Back();
4198  }
4199  return *this;
4200  }
4201 
4202  iterator operator++(int)
4203  {
4204  iterator result = *this;
4205  ++*this;
4206  return result;
4207  }
4208  iterator operator--(int)
4209  {
4210  iterator result = *this;
4211  --*this;
4212  return result;
4213  }
4214 
4215  bool operator==(const iterator& rhs) const
4216  {
4217  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4218  return m_pItem == rhs.m_pItem;
4219  }
4220  bool operator!=(const iterator& rhs) const
4221  {
4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4223  return m_pItem != rhs.m_pItem;
4224  }
4225 
4226  private:
4227  VmaRawList<T>* m_pList;
4228  VmaListItem<T>* m_pItem;
4229 
4230  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4231  m_pList(pList),
4232  m_pItem(pItem)
4233  {
4234  }
4235 
4236  friend class VmaList<T, AllocatorT>;
4237  };
4238 
4239  class const_iterator
4240  {
4241  public:
4242  const_iterator() :
4243  m_pList(VMA_NULL),
4244  m_pItem(VMA_NULL)
4245  {
4246  }
4247 
4248  const_iterator(const iterator& src) :
4249  m_pList(src.m_pList),
4250  m_pItem(src.m_pItem)
4251  {
4252  }
4253 
4254  const T& operator*() const
4255  {
4256  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4257  return m_pItem->Value;
4258  }
4259  const T* operator->() const
4260  {
4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4262  return &m_pItem->Value;
4263  }
4264 
4265  const_iterator& operator++()
4266  {
4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4268  m_pItem = m_pItem->pNext;
4269  return *this;
4270  }
4271  const_iterator& operator--()
4272  {
4273  if(m_pItem != VMA_NULL)
4274  {
4275  m_pItem = m_pItem->pPrev;
4276  }
4277  else
4278  {
4279  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4280  m_pItem = m_pList->Back();
4281  }
4282  return *this;
4283  }
4284 
4285  const_iterator operator++(int)
4286  {
4287  const_iterator result = *this;
4288  ++*this;
4289  return result;
4290  }
4291  const_iterator operator--(int)
4292  {
4293  const_iterator result = *this;
4294  --*this;
4295  return result;
4296  }
4297 
4298  bool operator==(const const_iterator& rhs) const
4299  {
4300  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4301  return m_pItem == rhs.m_pItem;
4302  }
4303  bool operator!=(const const_iterator& rhs) const
4304  {
4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4306  return m_pItem != rhs.m_pItem;
4307  }
4308 
4309  private:
4310  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4311  m_pList(pList),
4312  m_pItem(pItem)
4313  {
4314  }
4315 
4316  const VmaRawList<T>* m_pList;
4317  const VmaListItem<T>* m_pItem;
4318 
4319  friend class VmaList<T, AllocatorT>;
4320  };
4321 
4322  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4323 
4324  bool empty() const { return m_RawList.IsEmpty(); }
4325  size_t size() const { return m_RawList.GetCount(); }
4326 
4327  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4328  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4329 
4330  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4331  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4332 
4333  void clear() { m_RawList.Clear(); }
4334  void push_back(const T& value) { m_RawList.PushBack(value); }
4335  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4336  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4337 
4338 private:
4339  VmaRawList<T> m_RawList;
4340 };
4341 
4342 #endif // #if VMA_USE_STL_LIST
4343 
4345 // class VmaMap
4346 
4347 // Unused in this version.
4348 #if 0
4349 
4350 #if VMA_USE_STL_UNORDERED_MAP
4351 
4352 #define VmaPair std::pair
4353 
4354 #define VMA_MAP_TYPE(KeyT, ValueT) \
4355  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4356 
4357 #else // #if VMA_USE_STL_UNORDERED_MAP
4358 
4359 template<typename T1, typename T2>
4360 struct VmaPair
4361 {
4362  T1 first;
4363  T2 second;
4364 
4365  VmaPair() : first(), second() { }
4366  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4367 };
4368 
4369 /* Class compatible with subset of interface of std::unordered_map.
4370 KeyT, ValueT must be POD because they will be stored in VmaVector.
4371 */
4372 template<typename KeyT, typename ValueT>
4373 class VmaMap
4374 {
4375 public:
4376  typedef VmaPair<KeyT, ValueT> PairType;
4377  typedef PairType* iterator;
4378 
4379  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4380 
4381  iterator begin() { return m_Vector.begin(); }
4382  iterator end() { return m_Vector.end(); }
4383 
4384  void insert(const PairType& pair);
4385  iterator find(const KeyT& key);
4386  void erase(iterator it);
4387 
4388 private:
4389  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4390 };
4391 
4392 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4393 
4394 template<typename FirstT, typename SecondT>
4395 struct VmaPairFirstLess
4396 {
4397  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4398  {
4399  return lhs.first < rhs.first;
4400  }
4401  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4402  {
4403  return lhs.first < rhsFirst;
4404  }
4405 };
4406 
4407 template<typename KeyT, typename ValueT>
4408 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4409 {
4410  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4411  m_Vector.data(),
4412  m_Vector.data() + m_Vector.size(),
4413  pair,
4414  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4415  VmaVectorInsert(m_Vector, indexToInsert, pair);
4416 }
4417 
4418 template<typename KeyT, typename ValueT>
4419 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4420 {
4421  PairType* it = VmaBinaryFindFirstNotLess(
4422  m_Vector.data(),
4423  m_Vector.data() + m_Vector.size(),
4424  key,
4425  VmaPairFirstLess<KeyT, ValueT>());
4426  if((it != m_Vector.end()) && (it->first == key))
4427  {
4428  return it;
4429  }
4430  else
4431  {
4432  return m_Vector.end();
4433  }
4434 }
4435 
4436 template<typename KeyT, typename ValueT>
4437 void VmaMap<KeyT, ValueT>::erase(iterator it)
4438 {
4439  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4440 }
4441 
4442 #endif // #if VMA_USE_STL_UNORDERED_MAP
4443 
4444 #endif // #if 0
4445 
4447 
4448 class VmaDeviceMemoryBlock;
4449 
4450 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4451 
4452 struct VmaAllocation_T
4453 {
4454  VMA_CLASS_NO_COPY(VmaAllocation_T)
4455 private:
4456  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4457 
4458  enum FLAGS
4459  {
4460  FLAG_USER_DATA_STRING = 0x01,
4461  };
4462 
4463 public:
4464  enum ALLOCATION_TYPE
4465  {
4466  ALLOCATION_TYPE_NONE,
4467  ALLOCATION_TYPE_BLOCK,
4468  ALLOCATION_TYPE_DEDICATED,
4469  };
4470 
4471  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4472  m_Alignment(1),
4473  m_Size(0),
4474  m_pUserData(VMA_NULL),
4475  m_LastUseFrameIndex(currentFrameIndex),
4476  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4477  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4478  m_MapCount(0),
4479  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4480  {
4481 #if VMA_STATS_STRING_ENABLED
4482  m_CreationFrameIndex = currentFrameIndex;
4483  m_BufferImageUsage = 0;
4484 #endif
4485  }
4486 
4487  ~VmaAllocation_T()
4488  {
4489  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4490 
4491  // Check if owned string was freed.
4492  VMA_ASSERT(m_pUserData == VMA_NULL);
4493  }
4494 
4495  void InitBlockAllocation(
4496  VmaPool hPool,
4497  VmaDeviceMemoryBlock* block,
4498  VkDeviceSize offset,
4499  VkDeviceSize alignment,
4500  VkDeviceSize size,
4501  VmaSuballocationType suballocationType,
4502  bool mapped,
4503  bool canBecomeLost)
4504  {
4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4506  VMA_ASSERT(block != VMA_NULL);
4507  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4508  m_Alignment = alignment;
4509  m_Size = size;
4510  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4511  m_SuballocationType = (uint8_t)suballocationType;
4512  m_BlockAllocation.m_hPool = hPool;
4513  m_BlockAllocation.m_Block = block;
4514  m_BlockAllocation.m_Offset = offset;
4515  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4516  }
4517 
4518  void InitLost()
4519  {
4520  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4521  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4522  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4523  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4524  m_BlockAllocation.m_Block = VMA_NULL;
4525  m_BlockAllocation.m_Offset = 0;
4526  m_BlockAllocation.m_CanBecomeLost = true;
4527  }
4528 
4529  void ChangeBlockAllocation(
4530  VmaAllocator hAllocator,
4531  VmaDeviceMemoryBlock* block,
4532  VkDeviceSize offset);
4533 
4534  void ChangeSize(VkDeviceSize newSize);
4535 
4536  // pMappedData not null means allocation is created with MAPPED flag.
4537  void InitDedicatedAllocation(
4538  uint32_t memoryTypeIndex,
4539  VkDeviceMemory hMemory,
4540  VmaSuballocationType suballocationType,
4541  void* pMappedData,
4542  VkDeviceSize size)
4543  {
4544  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4545  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4546  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4547  m_Alignment = 0;
4548  m_Size = size;
4549  m_SuballocationType = (uint8_t)suballocationType;
4550  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4551  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4552  m_DedicatedAllocation.m_hMemory = hMemory;
4553  m_DedicatedAllocation.m_pMappedData = pMappedData;
4554  }
4555 
4556  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4557  VkDeviceSize GetAlignment() const { return m_Alignment; }
4558  VkDeviceSize GetSize() const { return m_Size; }
4559  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4560  void* GetUserData() const { return m_pUserData; }
4561  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4562  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4563 
4564  VmaDeviceMemoryBlock* GetBlock() const
4565  {
4566  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4567  return m_BlockAllocation.m_Block;
4568  }
4569  VkDeviceSize GetOffset() const;
4570  VkDeviceMemory GetMemory() const;
4571  uint32_t GetMemoryTypeIndex() const;
4572  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4573  void* GetMappedData() const;
4574  bool CanBecomeLost() const;
4575  VmaPool GetPool() const;
4576 
4577  uint32_t GetLastUseFrameIndex() const
4578  {
4579  return m_LastUseFrameIndex.load();
4580  }
4581  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4582  {
4583  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4584  }
4585  /*
4586  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4587  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4588  - Else, returns false.
4589 
4590  If hAllocation is already lost, assert - you should not call it then.
4591  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4592  */
4593  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4594 
4595  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4596  {
4597  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4598  outInfo.blockCount = 1;
4599  outInfo.allocationCount = 1;
4600  outInfo.unusedRangeCount = 0;
4601  outInfo.usedBytes = m_Size;
4602  outInfo.unusedBytes = 0;
4603  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4604  outInfo.unusedRangeSizeMin = UINT64_MAX;
4605  outInfo.unusedRangeSizeMax = 0;
4606  }
4607 
4608  void BlockAllocMap();
4609  void BlockAllocUnmap();
4610  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4611  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4612 
4613 #if VMA_STATS_STRING_ENABLED
4614  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4615  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4616 
4617  void InitBufferImageUsage(uint32_t bufferImageUsage)
4618  {
4619  VMA_ASSERT(m_BufferImageUsage == 0);
4620  m_BufferImageUsage = bufferImageUsage;
4621  }
4622 
4623  void PrintParameters(class VmaJsonWriter& json) const;
4624 #endif
4625 
4626 private:
4627  VkDeviceSize m_Alignment;
4628  VkDeviceSize m_Size;
4629  void* m_pUserData;
4630  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4631  uint8_t m_Type; // ALLOCATION_TYPE
4632  uint8_t m_SuballocationType; // VmaSuballocationType
4633  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4634  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4635  uint8_t m_MapCount;
4636  uint8_t m_Flags; // enum FLAGS
4637 
4638  // Allocation out of VmaDeviceMemoryBlock.
4639  struct BlockAllocation
4640  {
4641  VmaPool m_hPool; // Null if belongs to general memory.
4642  VmaDeviceMemoryBlock* m_Block;
4643  VkDeviceSize m_Offset;
4644  bool m_CanBecomeLost;
4645  };
4646 
4647  // Allocation for an object that has its own private VkDeviceMemory.
4648  struct DedicatedAllocation
4649  {
4650  uint32_t m_MemoryTypeIndex;
4651  VkDeviceMemory m_hMemory;
4652  void* m_pMappedData; // Not null means memory is mapped.
4653  };
4654 
4655  union
4656  {
4657  // Allocation out of VmaDeviceMemoryBlock.
4658  BlockAllocation m_BlockAllocation;
4659  // Allocation for an object that has its own private VkDeviceMemory.
4660  DedicatedAllocation m_DedicatedAllocation;
4661  };
4662 
4663 #if VMA_STATS_STRING_ENABLED
4664  uint32_t m_CreationFrameIndex;
4665  uint32_t m_BufferImageUsage; // 0 if unknown.
4666 #endif
4667 
4668  void FreeUserDataString(VmaAllocator hAllocator);
4669 };
4670 
4671 /*
4672 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4673 allocated memory block or free.
4674 */
4675 struct VmaSuballocation
4676 {
4677  VkDeviceSize offset;
4678  VkDeviceSize size;
4679  VmaAllocation hAllocation;
4680  VmaSuballocationType type;
4681 };
4682 
4683 // Comparator for offsets.
4684 struct VmaSuballocationOffsetLess
4685 {
4686  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4687  {
4688  return lhs.offset < rhs.offset;
4689  }
4690 };
4691 struct VmaSuballocationOffsetGreater
4692 {
4693  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4694  {
4695  return lhs.offset > rhs.offset;
4696  }
4697 };
4698 
4699 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4700 
4701 // Cost of one additional allocation lost, as equivalent in bytes.
4702 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4703 
4704 /*
4705 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4706 
4707 If canMakeOtherLost was false:
4708 - item points to a FREE suballocation.
4709 - itemsToMakeLostCount is 0.
4710 
4711 If canMakeOtherLost was true:
4712 - item points to first of sequence of suballocations, which are either FREE,
4713  or point to VmaAllocations that can become lost.
4714 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4715  the requested allocation to succeed.
4716 */
4717 struct VmaAllocationRequest
4718 {
4719  VkDeviceSize offset;
4720  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4721  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4722  VmaSuballocationList::iterator item;
4723  size_t itemsToMakeLostCount;
4724  void* customData;
4725 
4726  VkDeviceSize CalcCost() const
4727  {
4728  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4729  }
4730 };
4731 
4732 /*
4733 Data structure used for bookkeeping of allocations and unused ranges of memory
4734 in a single VkDeviceMemory block.
4735 */
4736 class VmaBlockMetadata
4737 {
4738 public:
4739  VmaBlockMetadata(VmaAllocator hAllocator);
4740  virtual ~VmaBlockMetadata() { }
4741  virtual void Init(VkDeviceSize size) { m_Size = size; }
4742 
4743  // Validates all data structures inside this object. If not valid, returns false.
4744  virtual bool Validate() const = 0;
4745  VkDeviceSize GetSize() const { return m_Size; }
4746  virtual size_t GetAllocationCount() const = 0;
4747  virtual VkDeviceSize GetSumFreeSize() const = 0;
4748  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4749  // Returns true if this block is empty - contains only single free suballocation.
4750  virtual bool IsEmpty() const = 0;
4751 
4752  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4753  // Shouldn't modify blockCount.
4754  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4755 
4756 #if VMA_STATS_STRING_ENABLED
4757  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4758 #endif
4759 
4760  // Tries to find a place for suballocation with given parameters inside this block.
4761  // If succeeded, fills pAllocationRequest and returns true.
4762  // If failed, returns false.
4763  virtual bool CreateAllocationRequest(
4764  uint32_t currentFrameIndex,
4765  uint32_t frameInUseCount,
4766  VkDeviceSize bufferImageGranularity,
4767  VkDeviceSize allocSize,
4768  VkDeviceSize allocAlignment,
4769  bool upperAddress,
4770  VmaSuballocationType allocType,
4771  bool canMakeOtherLost,
4772  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4773  VmaAllocationRequest* pAllocationRequest) = 0;
4774 
4775  virtual bool MakeRequestedAllocationsLost(
4776  uint32_t currentFrameIndex,
4777  uint32_t frameInUseCount,
4778  VmaAllocationRequest* pAllocationRequest) = 0;
4779 
4780  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4781 
4782  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4783 
4784  // Makes actual allocation based on request. Request must already be checked and valid.
4785  virtual void Alloc(
4786  const VmaAllocationRequest& request,
4787  VmaSuballocationType type,
4788  VkDeviceSize allocSize,
4789  bool upperAddress,
4790  VmaAllocation hAllocation) = 0;
4791 
4792  // Frees suballocation assigned to given memory region.
4793  virtual void Free(const VmaAllocation allocation) = 0;
4794  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4795 
4796  // Tries to resize (grow or shrink) space for given allocation, in place.
4797  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
4798 
4799 protected:
4800  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4801 
4802 #if VMA_STATS_STRING_ENABLED
4803  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4804  VkDeviceSize unusedBytes,
4805  size_t allocationCount,
4806  size_t unusedRangeCount) const;
4807  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4808  VkDeviceSize offset,
4809  VmaAllocation hAllocation) const;
4810  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4811  VkDeviceSize offset,
4812  VkDeviceSize size) const;
4813  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4814 #endif
4815 
4816 private:
4817  VkDeviceSize m_Size;
4818  const VkAllocationCallbacks* m_pAllocationCallbacks;
4819 };
4820 
4821 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4822  VMA_ASSERT(0 && "Validation failed: " #cond); \
4823  return false; \
4824  } } while(false)
4825 
4826 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4827 {
4828  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4829 public:
4830  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4831  virtual ~VmaBlockMetadata_Generic();
4832  virtual void Init(VkDeviceSize size);
4833 
4834  virtual bool Validate() const;
4835  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4836  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4837  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4838  virtual bool IsEmpty() const;
4839 
4840  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4841  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4842 
4843 #if VMA_STATS_STRING_ENABLED
4844  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4845 #endif
4846 
4847  virtual bool CreateAllocationRequest(
4848  uint32_t currentFrameIndex,
4849  uint32_t frameInUseCount,
4850  VkDeviceSize bufferImageGranularity,
4851  VkDeviceSize allocSize,
4852  VkDeviceSize allocAlignment,
4853  bool upperAddress,
4854  VmaSuballocationType allocType,
4855  bool canMakeOtherLost,
4856  uint32_t strategy,
4857  VmaAllocationRequest* pAllocationRequest);
4858 
4859  virtual bool MakeRequestedAllocationsLost(
4860  uint32_t currentFrameIndex,
4861  uint32_t frameInUseCount,
4862  VmaAllocationRequest* pAllocationRequest);
4863 
4864  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4865 
4866  virtual VkResult CheckCorruption(const void* pBlockData);
4867 
4868  virtual void Alloc(
4869  const VmaAllocationRequest& request,
4870  VmaSuballocationType type,
4871  VkDeviceSize allocSize,
4872  bool upperAddress,
4873  VmaAllocation hAllocation);
4874 
4875  virtual void Free(const VmaAllocation allocation);
4876  virtual void FreeAtOffset(VkDeviceSize offset);
4877 
4878  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
4879 
4880 private:
4881  uint32_t m_FreeCount;
4882  VkDeviceSize m_SumFreeSize;
4883  VmaSuballocationList m_Suballocations;
4884  // Suballocations that are free and have size greater than certain threshold.
4885  // Sorted by size, ascending.
4886  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4887 
4888  bool ValidateFreeSuballocationList() const;
4889 
4890  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4891  // If yes, fills pOffset and returns true. If no, returns false.
4892  bool CheckAllocation(
4893  uint32_t currentFrameIndex,
4894  uint32_t frameInUseCount,
4895  VkDeviceSize bufferImageGranularity,
4896  VkDeviceSize allocSize,
4897  VkDeviceSize allocAlignment,
4898  VmaSuballocationType allocType,
4899  VmaSuballocationList::const_iterator suballocItem,
4900  bool canMakeOtherLost,
4901  VkDeviceSize* pOffset,
4902  size_t* itemsToMakeLostCount,
4903  VkDeviceSize* pSumFreeSize,
4904  VkDeviceSize* pSumItemSize) const;
4905  // Given free suballocation, it merges it with following one, which must also be free.
4906  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4907  // Releases given suballocation, making it free.
4908  // Merges it with adjacent free suballocations if applicable.
4909  // Returns iterator to new free suballocation at this place.
4910  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4911  // Given free suballocation, it inserts it into sorted list of
4912  // m_FreeSuballocationsBySize if it's suitable.
4913  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4914  // Given free suballocation, it removes it from sorted list of
4915  // m_FreeSuballocationsBySize if it's suitable.
4916  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4917 };
4918 
4919 /*
4920 Allocations and their references in internal data structure look like this:
4921 
4922 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4923 
4924  0 +-------+
4925  | |
4926  | |
4927  | |
4928  +-------+
4929  | Alloc | 1st[m_1stNullItemsBeginCount]
4930  +-------+
4931  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4932  +-------+
4933  | ... |
4934  +-------+
4935  | Alloc | 1st[1st.size() - 1]
4936  +-------+
4937  | |
4938  | |
4939  | |
4940 GetSize() +-------+
4941 
4942 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4943 
4944  0 +-------+
4945  | Alloc | 2nd[0]
4946  +-------+
4947  | Alloc | 2nd[1]
4948  +-------+
4949  | ... |
4950  +-------+
4951  | Alloc | 2nd[2nd.size() - 1]
4952  +-------+
4953  | |
4954  | |
4955  | |
4956  +-------+
4957  | Alloc | 1st[m_1stNullItemsBeginCount]
4958  +-------+
4959  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4960  +-------+
4961  | ... |
4962  +-------+
4963  | Alloc | 1st[1st.size() - 1]
4964  +-------+
4965  | |
4966 GetSize() +-------+
4967 
4968 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4969 
4970  0 +-------+
4971  | |
4972  | |
4973  | |
4974  +-------+
4975  | Alloc | 1st[m_1stNullItemsBeginCount]
4976  +-------+
4977  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4978  +-------+
4979  | ... |
4980  +-------+
4981  | Alloc | 1st[1st.size() - 1]
4982  +-------+
4983  | |
4984  | |
4985  | |
4986  +-------+
4987  | Alloc | 2nd[2nd.size() - 1]
4988  +-------+
4989  | ... |
4990  +-------+
4991  | Alloc | 2nd[1]
4992  +-------+
4993  | Alloc | 2nd[0]
4994 GetSize() +-------+
4995 
4996 */
4997 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4998 {
4999  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5000 public:
5001  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5002  virtual ~VmaBlockMetadata_Linear();
5003  virtual void Init(VkDeviceSize size);
5004 
5005  virtual bool Validate() const;
5006  virtual size_t GetAllocationCount() const;
5007  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5008  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5009  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5010 
5011  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5012  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5013 
5014 #if VMA_STATS_STRING_ENABLED
5015  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5016 #endif
5017 
5018  virtual bool CreateAllocationRequest(
5019  uint32_t currentFrameIndex,
5020  uint32_t frameInUseCount,
5021  VkDeviceSize bufferImageGranularity,
5022  VkDeviceSize allocSize,
5023  VkDeviceSize allocAlignment,
5024  bool upperAddress,
5025  VmaSuballocationType allocType,
5026  bool canMakeOtherLost,
5027  uint32_t strategy,
5028  VmaAllocationRequest* pAllocationRequest);
5029 
5030  virtual bool MakeRequestedAllocationsLost(
5031  uint32_t currentFrameIndex,
5032  uint32_t frameInUseCount,
5033  VmaAllocationRequest* pAllocationRequest);
5034 
5035  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5036 
5037  virtual VkResult CheckCorruption(const void* pBlockData);
5038 
5039  virtual void Alloc(
5040  const VmaAllocationRequest& request,
5041  VmaSuballocationType type,
5042  VkDeviceSize allocSize,
5043  bool upperAddress,
5044  VmaAllocation hAllocation);
5045 
5046  virtual void Free(const VmaAllocation allocation);
5047  virtual void FreeAtOffset(VkDeviceSize offset);
5048 
5049 private:
5050  /*
5051  There are two suballocation vectors, used in ping-pong way.
5052  The one with index m_1stVectorIndex is called 1st.
5053  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5054  2nd can be non-empty only when 1st is not empty.
5055  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5056  */
5057  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5058 
5059  enum SECOND_VECTOR_MODE
5060  {
5061  SECOND_VECTOR_EMPTY,
5062  /*
5063  Suballocations in 2nd vector are created later than the ones in 1st, but they
5064  all have smaller offset.
5065  */
5066  SECOND_VECTOR_RING_BUFFER,
5067  /*
5068  Suballocations in 2nd vector are upper side of double stack.
5069  They all have offsets higher than those in 1st vector.
5070  Top of this stack means smaller offsets, but higher indices in this vector.
5071  */
5072  SECOND_VECTOR_DOUBLE_STACK,
5073  };
5074 
5075  VkDeviceSize m_SumFreeSize;
5076  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5077  uint32_t m_1stVectorIndex;
5078  SECOND_VECTOR_MODE m_2ndVectorMode;
5079 
5080  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5081  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5082  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5083  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5084 
5085  // Number of items in 1st vector with hAllocation = null at the beginning.
5086  size_t m_1stNullItemsBeginCount;
5087  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5088  size_t m_1stNullItemsMiddleCount;
5089  // Number of items in 2nd vector with hAllocation = null.
5090  size_t m_2ndNullItemsCount;
5091 
5092  bool ShouldCompact1st() const;
5093  void CleanupAfterFree();
5094 };
5095 
5096 /*
5097 - GetSize() is the original size of allocated memory block.
5098 - m_UsableSize is this size aligned down to a power of two.
5099  All allocations and calculations happen relative to m_UsableSize.
5100 - GetUnusableSize() is the difference between them.
5101  It is repoted as separate, unused range, not available for allocations.
5102 
5103 Node at level 0 has size = m_UsableSize.
5104 Each next level contains nodes with size 2 times smaller than current level.
5105 m_LevelCount is the maximum number of levels to use in the current object.
5106 */
5107 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5108 {
5109  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5110 public:
5111  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5112  virtual ~VmaBlockMetadata_Buddy();
5113  virtual void Init(VkDeviceSize size);
5114 
5115  virtual bool Validate() const;
5116  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5117  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5118  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5119  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5120 
5121  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5122  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5123 
5124 #if VMA_STATS_STRING_ENABLED
5125  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5126 #endif
5127 
5128  virtual bool CreateAllocationRequest(
5129  uint32_t currentFrameIndex,
5130  uint32_t frameInUseCount,
5131  VkDeviceSize bufferImageGranularity,
5132  VkDeviceSize allocSize,
5133  VkDeviceSize allocAlignment,
5134  bool upperAddress,
5135  VmaSuballocationType allocType,
5136  bool canMakeOtherLost,
5137  uint32_t strategy,
5138  VmaAllocationRequest* pAllocationRequest);
5139 
5140  virtual bool MakeRequestedAllocationsLost(
5141  uint32_t currentFrameIndex,
5142  uint32_t frameInUseCount,
5143  VmaAllocationRequest* pAllocationRequest);
5144 
5145  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5146 
5147  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5148 
5149  virtual void Alloc(
5150  const VmaAllocationRequest& request,
5151  VmaSuballocationType type,
5152  VkDeviceSize allocSize,
5153  bool upperAddress,
5154  VmaAllocation hAllocation);
5155 
5156  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5157  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5158 
5159 private:
5160  static const VkDeviceSize MIN_NODE_SIZE = 32;
5161  static const size_t MAX_LEVELS = 30;
5162 
5163  struct ValidationContext
5164  {
5165  size_t calculatedAllocationCount;
5166  size_t calculatedFreeCount;
5167  VkDeviceSize calculatedSumFreeSize;
5168 
5169  ValidationContext() :
5170  calculatedAllocationCount(0),
5171  calculatedFreeCount(0),
5172  calculatedSumFreeSize(0) { }
5173  };
5174 
5175  struct Node
5176  {
5177  VkDeviceSize offset;
5178  enum TYPE
5179  {
5180  TYPE_FREE,
5181  TYPE_ALLOCATION,
5182  TYPE_SPLIT,
5183  TYPE_COUNT
5184  } type;
5185  Node* parent;
5186  Node* buddy;
5187 
5188  union
5189  {
5190  struct
5191  {
5192  Node* prev;
5193  Node* next;
5194  } free;
5195  struct
5196  {
5197  VmaAllocation alloc;
5198  } allocation;
5199  struct
5200  {
5201  Node* leftChild;
5202  } split;
5203  };
5204  };
5205 
5206  // Size of the memory block aligned down to a power of two.
5207  VkDeviceSize m_UsableSize;
5208  uint32_t m_LevelCount;
5209 
5210  Node* m_Root;
5211  struct {
5212  Node* front;
5213  Node* back;
5214  } m_FreeList[MAX_LEVELS];
5215  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5216  size_t m_AllocationCount;
5217  // Number of nodes in the tree with type == TYPE_FREE.
5218  size_t m_FreeCount;
5219  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5220  VkDeviceSize m_SumFreeSize;
5221 
5222  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5223  void DeleteNode(Node* node);
5224  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5225  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5226  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5227  // Alloc passed just for validation. Can be null.
5228  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5229  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5230  // Adds node to the front of FreeList at given level.
5231  // node->type must be FREE.
5232  // node->free.prev, next can be undefined.
5233  void AddToFreeListFront(uint32_t level, Node* node);
5234  // Removes node from FreeList at given level.
5235  // node->type must be FREE.
5236  // node->free.prev, next stay untouched.
5237  void RemoveFromFreeList(uint32_t level, Node* node);
5238 
5239 #if VMA_STATS_STRING_ENABLED
5240  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5241 #endif
5242 };
5243 
5244 /*
5245 Represents a single block of device memory (`VkDeviceMemory`) with all the
5246 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5247 
5248 Thread-safety: This class must be externally synchronized.
5249 */
5250 class VmaDeviceMemoryBlock
5251 {
5252  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5253 public:
5254  VmaBlockMetadata* m_pMetadata;
5255 
5256  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5257 
5258  ~VmaDeviceMemoryBlock()
5259  {
5260  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5261  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5262  }
5263 
5264  // Always call after construction.
5265  void Init(
5266  VmaAllocator hAllocator,
5267  uint32_t newMemoryTypeIndex,
5268  VkDeviceMemory newMemory,
5269  VkDeviceSize newSize,
5270  uint32_t id,
5271  uint32_t algorithm);
5272  // Always call before destruction.
5273  void Destroy(VmaAllocator allocator);
5274 
5275  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5276  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5277  uint32_t GetId() const { return m_Id; }
5278  void* GetMappedData() const { return m_pMappedData; }
5279 
5280  // Validates all data structures inside this object. If not valid, returns false.
5281  bool Validate() const;
5282 
5283  VkResult CheckCorruption(VmaAllocator hAllocator);
5284 
5285  // ppData can be null.
5286  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5287  void Unmap(VmaAllocator hAllocator, uint32_t count);
5288 
5289  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5290  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5291 
5292  VkResult BindBufferMemory(
5293  const VmaAllocator hAllocator,
5294  const VmaAllocation hAllocation,
5295  VkBuffer hBuffer);
5296  VkResult BindImageMemory(
5297  const VmaAllocator hAllocator,
5298  const VmaAllocation hAllocation,
5299  VkImage hImage);
5300 
5301 private:
5302  uint32_t m_MemoryTypeIndex;
5303  uint32_t m_Id;
5304  VkDeviceMemory m_hMemory;
5305 
5306  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5307  // Also protects m_MapCount, m_pMappedData.
5308  VMA_MUTEX m_Mutex;
5309  uint32_t m_MapCount;
5310  void* m_pMappedData;
5311 };
5312 
5313 struct VmaPointerLess
5314 {
5315  bool operator()(const void* lhs, const void* rhs) const
5316  {
5317  return lhs < rhs;
5318  }
5319 };
5320 
5321 class VmaDefragmentator;
5322 
5323 /*
5324 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5325 Vulkan memory type.
5326 
5327 Synchronized internally with a mutex.
5328 */
5329 struct VmaBlockVector
5330 {
5331  VMA_CLASS_NO_COPY(VmaBlockVector)
5332 public:
5333  VmaBlockVector(
5334  VmaAllocator hAllocator,
5335  uint32_t memoryTypeIndex,
5336  VkDeviceSize preferredBlockSize,
5337  size_t minBlockCount,
5338  size_t maxBlockCount,
5339  VkDeviceSize bufferImageGranularity,
5340  uint32_t frameInUseCount,
5341  bool isCustomPool,
5342  bool explicitBlockSize,
5343  uint32_t algorithm);
5344  ~VmaBlockVector();
5345 
5346  VkResult CreateMinBlocks();
5347 
5348  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5349  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5350  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5351  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5352  uint32_t GetAlgorithm() const { return m_Algorithm; }
5353 
5354  void GetPoolStats(VmaPoolStats* pStats);
5355 
5356  bool IsEmpty() const { return m_Blocks.empty(); }
5357  bool IsCorruptionDetectionEnabled() const;
5358 
5359  VkResult Allocate(
5360  VmaPool hCurrentPool,
5361  uint32_t currentFrameIndex,
5362  VkDeviceSize size,
5363  VkDeviceSize alignment,
5364  const VmaAllocationCreateInfo& createInfo,
5365  VmaSuballocationType suballocType,
5366  VmaAllocation* pAllocation);
5367 
5368  void Free(
5369  VmaAllocation hAllocation);
5370 
5371  // Adds statistics of this BlockVector to pStats.
5372  void AddStats(VmaStats* pStats);
5373 
5374 #if VMA_STATS_STRING_ENABLED
5375  void PrintDetailedMap(class VmaJsonWriter& json);
5376 #endif
5377 
5378  void MakePoolAllocationsLost(
5379  uint32_t currentFrameIndex,
5380  size_t* pLostAllocationCount);
5381  VkResult CheckCorruption();
5382 
5383  VmaDefragmentator* EnsureDefragmentator(
5384  VmaAllocator hAllocator,
5385  uint32_t currentFrameIndex);
5386 
5387  VkResult Defragment(
5388  VmaDefragmentationStats* pDefragmentationStats,
5389  VkDeviceSize& maxBytesToMove,
5390  uint32_t& maxAllocationsToMove);
5391 
5392  void DestroyDefragmentator();
5393 
5394 private:
5395  friend class VmaDefragmentator;
5396 
5397  const VmaAllocator m_hAllocator;
5398  const uint32_t m_MemoryTypeIndex;
5399  const VkDeviceSize m_PreferredBlockSize;
5400  const size_t m_MinBlockCount;
5401  const size_t m_MaxBlockCount;
5402  const VkDeviceSize m_BufferImageGranularity;
5403  const uint32_t m_FrameInUseCount;
5404  const bool m_IsCustomPool;
5405  const bool m_ExplicitBlockSize;
5406  const uint32_t m_Algorithm;
5407  bool m_HasEmptyBlock;
5408  VMA_MUTEX m_Mutex;
5409  // Incrementally sorted by sumFreeSize, ascending.
5410  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5411  /* There can be at most one allocation that is completely empty - a
5412  hysteresis to avoid pessimistic case of alternating creation and destruction
5413  of a VkDeviceMemory. */
5414  VmaDefragmentator* m_pDefragmentator;
5415  uint32_t m_NextBlockId;
5416 
5417  VkDeviceSize CalcMaxBlockSize() const;
5418 
5419  // Finds and removes given block from vector.
5420  void Remove(VmaDeviceMemoryBlock* pBlock);
5421 
5422  // Performs single step in sorting m_Blocks. They may not be fully sorted
5423  // after this call.
5424  void IncrementallySortBlocks();
5425 
5426  // To be used only without CAN_MAKE_OTHER_LOST flag.
5427  VkResult AllocateFromBlock(
5428  VmaDeviceMemoryBlock* pBlock,
5429  VmaPool hCurrentPool,
5430  uint32_t currentFrameIndex,
5431  VkDeviceSize size,
5432  VkDeviceSize alignment,
5433  VmaAllocationCreateFlags allocFlags,
5434  void* pUserData,
5435  VmaSuballocationType suballocType,
5436  uint32_t strategy,
5437  VmaAllocation* pAllocation);
5438 
5439  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5440 };
5441 
5442 struct VmaPool_T
5443 {
5444  VMA_CLASS_NO_COPY(VmaPool_T)
5445 public:
5446  VmaBlockVector m_BlockVector;
5447 
5448  VmaPool_T(
5449  VmaAllocator hAllocator,
5450  const VmaPoolCreateInfo& createInfo,
5451  VkDeviceSize preferredBlockSize);
5452  ~VmaPool_T();
5453 
5454  uint32_t GetId() const { return m_Id; }
5455  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5456 
5457 #if VMA_STATS_STRING_ENABLED
5458  //void PrintDetailedMap(class VmaStringBuilder& sb);
5459 #endif
5460 
5461 private:
5462  uint32_t m_Id;
5463 };
5464 
5465 class VmaDefragmentator
5466 {
5467  VMA_CLASS_NO_COPY(VmaDefragmentator)
5468 private:
5469  const VmaAllocator m_hAllocator;
5470  VmaBlockVector* const m_pBlockVector;
5471  uint32_t m_CurrentFrameIndex;
5472  VkDeviceSize m_BytesMoved;
5473  uint32_t m_AllocationsMoved;
5474 
5475  struct AllocationInfo
5476  {
5477  VmaAllocation m_hAllocation;
5478  VkBool32* m_pChanged;
5479 
5480  AllocationInfo() :
5481  m_hAllocation(VK_NULL_HANDLE),
5482  m_pChanged(VMA_NULL)
5483  {
5484  }
5485  };
5486 
5487  struct AllocationInfoSizeGreater
5488  {
5489  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5490  {
5491  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5492  }
5493  };
5494 
5495  // Used between AddAllocation and Defragment.
5496  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5497 
5498  struct BlockInfo
5499  {
5500  VmaDeviceMemoryBlock* m_pBlock;
5501  bool m_HasNonMovableAllocations;
5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5503 
5504  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5505  m_pBlock(VMA_NULL),
5506  m_HasNonMovableAllocations(true),
5507  m_Allocations(pAllocationCallbacks),
5508  m_pMappedDataForDefragmentation(VMA_NULL)
5509  {
5510  }
5511 
5512  void CalcHasNonMovableAllocations()
5513  {
5514  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5515  const size_t defragmentAllocCount = m_Allocations.size();
5516  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5517  }
5518 
5519  void SortAllocationsBySizeDescecnding()
5520  {
5521  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5522  }
5523 
5524  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5525  void Unmap(VmaAllocator hAllocator);
5526 
5527  private:
5528  // Not null if mapped for defragmentation only, not originally mapped.
5529  void* m_pMappedDataForDefragmentation;
5530  };
5531 
5532  struct BlockPointerLess
5533  {
5534  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5535  {
5536  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5537  }
5538  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5539  {
5540  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5541  }
5542  };
5543 
5544  // 1. Blocks with some non-movable allocations go first.
5545  // 2. Blocks with smaller sumFreeSize go first.
5546  struct BlockInfoCompareMoveDestination
5547  {
5548  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5549  {
5550  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5551  {
5552  return true;
5553  }
5554  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5555  {
5556  return false;
5557  }
5558  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5559  {
5560  return true;
5561  }
5562  return false;
5563  }
5564  };
5565 
5566  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5567  BlockInfoVector m_Blocks;
5568 
5569  VkResult DefragmentRound(
5570  VkDeviceSize maxBytesToMove,
5571  uint32_t maxAllocationsToMove);
5572 
5573  static bool MoveMakesSense(
5574  size_t dstBlockIndex, VkDeviceSize dstOffset,
5575  size_t srcBlockIndex, VkDeviceSize srcOffset);
5576 
5577 public:
5578  VmaDefragmentator(
5579  VmaAllocator hAllocator,
5580  VmaBlockVector* pBlockVector,
5581  uint32_t currentFrameIndex);
5582 
5583  ~VmaDefragmentator();
5584 
5585  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5586  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5587 
5588  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5589 
5590  VkResult Defragment(
5591  VkDeviceSize maxBytesToMove,
5592  uint32_t maxAllocationsToMove);
5593 };
5594 
5595 #if VMA_RECORDING_ENABLED
5596 
5597 class VmaRecorder
5598 {
5599 public:
5600  VmaRecorder();
5601  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5602  void WriteConfiguration(
5603  const VkPhysicalDeviceProperties& devProps,
5604  const VkPhysicalDeviceMemoryProperties& memProps,
5605  bool dedicatedAllocationExtensionEnabled);
5606  ~VmaRecorder();
5607 
5608  void RecordCreateAllocator(uint32_t frameIndex);
5609  void RecordDestroyAllocator(uint32_t frameIndex);
5610  void RecordCreatePool(uint32_t frameIndex,
5611  const VmaPoolCreateInfo& createInfo,
5612  VmaPool pool);
5613  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5614  void RecordAllocateMemory(uint32_t frameIndex,
5615  const VkMemoryRequirements& vkMemReq,
5616  const VmaAllocationCreateInfo& createInfo,
5617  VmaAllocation allocation);
5618  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5619  const VkMemoryRequirements& vkMemReq,
5620  bool requiresDedicatedAllocation,
5621  bool prefersDedicatedAllocation,
5622  const VmaAllocationCreateInfo& createInfo,
5623  VmaAllocation allocation);
5624  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5625  const VkMemoryRequirements& vkMemReq,
5626  bool requiresDedicatedAllocation,
5627  bool prefersDedicatedAllocation,
5628  const VmaAllocationCreateInfo& createInfo,
5629  VmaAllocation allocation);
5630  void RecordFreeMemory(uint32_t frameIndex,
5631  VmaAllocation allocation);
5632  void RecordResizeAllocation(
5633  uint32_t frameIndex,
5634  VmaAllocation allocation,
5635  VkDeviceSize newSize);
5636  void RecordSetAllocationUserData(uint32_t frameIndex,
5637  VmaAllocation allocation,
5638  const void* pUserData);
5639  void RecordCreateLostAllocation(uint32_t frameIndex,
5640  VmaAllocation allocation);
5641  void RecordMapMemory(uint32_t frameIndex,
5642  VmaAllocation allocation);
5643  void RecordUnmapMemory(uint32_t frameIndex,
5644  VmaAllocation allocation);
5645  void RecordFlushAllocation(uint32_t frameIndex,
5646  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5647  void RecordInvalidateAllocation(uint32_t frameIndex,
5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5649  void RecordCreateBuffer(uint32_t frameIndex,
5650  const VkBufferCreateInfo& bufCreateInfo,
5651  const VmaAllocationCreateInfo& allocCreateInfo,
5652  VmaAllocation allocation);
5653  void RecordCreateImage(uint32_t frameIndex,
5654  const VkImageCreateInfo& imageCreateInfo,
5655  const VmaAllocationCreateInfo& allocCreateInfo,
5656  VmaAllocation allocation);
5657  void RecordDestroyBuffer(uint32_t frameIndex,
5658  VmaAllocation allocation);
5659  void RecordDestroyImage(uint32_t frameIndex,
5660  VmaAllocation allocation);
5661  void RecordTouchAllocation(uint32_t frameIndex,
5662  VmaAllocation allocation);
5663  void RecordGetAllocationInfo(uint32_t frameIndex,
5664  VmaAllocation allocation);
5665  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5666  VmaPool pool);
5667 
5668 private:
5669  struct CallParams
5670  {
5671  uint32_t threadId;
5672  double time;
5673  };
5674 
5675  class UserDataString
5676  {
5677  public:
5678  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5679  const char* GetString() const { return m_Str; }
5680 
5681  private:
5682  char m_PtrStr[17];
5683  const char* m_Str;
5684  };
5685 
5686  bool m_UseMutex;
5687  VmaRecordFlags m_Flags;
5688  FILE* m_File;
5689  VMA_MUTEX m_FileMutex;
5690  int64_t m_Freq;
5691  int64_t m_StartCounter;
5692 
5693  void GetBasicParams(CallParams& outParams);
5694  void Flush();
5695 };
5696 
5697 #endif // #if VMA_RECORDING_ENABLED
5698 
5699 // Main allocator object.
5700 struct VmaAllocator_T
5701 {
5702  VMA_CLASS_NO_COPY(VmaAllocator_T)
5703 public:
5704  bool m_UseMutex;
5705  bool m_UseKhrDedicatedAllocation;
5706  VkDevice m_hDevice;
5707  bool m_AllocationCallbacksSpecified;
5708  VkAllocationCallbacks m_AllocationCallbacks;
5709  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5710 
5711  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5712  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5713  VMA_MUTEX m_HeapSizeLimitMutex;
5714 
5715  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5716  VkPhysicalDeviceMemoryProperties m_MemProps;
5717 
5718  // Default pools.
5719  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5720 
5721  // Each vector is sorted by memory (handle value).
5722  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5723  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5724  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5725 
5726  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5727  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5728  ~VmaAllocator_T();
5729 
5730  const VkAllocationCallbacks* GetAllocationCallbacks() const
5731  {
5732  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5733  }
5734  const VmaVulkanFunctions& GetVulkanFunctions() const
5735  {
5736  return m_VulkanFunctions;
5737  }
5738 
5739  VkDeviceSize GetBufferImageGranularity() const
5740  {
5741  return VMA_MAX(
5742  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5743  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5744  }
5745 
5746  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5747  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5748 
5749  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5750  {
5751  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5752  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5753  }
5754  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5755  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5756  {
5757  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5758  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5759  }
5760  // Minimum alignment for all allocations in specific memory type.
5761  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5762  {
5763  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5764  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5765  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5766  }
5767 
5768  bool IsIntegratedGpu() const
5769  {
5770  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5771  }
5772 
5773 #if VMA_RECORDING_ENABLED
5774  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5775 #endif
5776 
5777  void GetBufferMemoryRequirements(
5778  VkBuffer hBuffer,
5779  VkMemoryRequirements& memReq,
5780  bool& requiresDedicatedAllocation,
5781  bool& prefersDedicatedAllocation) const;
5782  void GetImageMemoryRequirements(
5783  VkImage hImage,
5784  VkMemoryRequirements& memReq,
5785  bool& requiresDedicatedAllocation,
5786  bool& prefersDedicatedAllocation) const;
5787 
5788  // Main allocation function.
5789  VkResult AllocateMemory(
5790  const VkMemoryRequirements& vkMemReq,
5791  bool requiresDedicatedAllocation,
5792  bool prefersDedicatedAllocation,
5793  VkBuffer dedicatedBuffer,
5794  VkImage dedicatedImage,
5795  const VmaAllocationCreateInfo& createInfo,
5796  VmaSuballocationType suballocType,
5797  VmaAllocation* pAllocation);
5798 
5799  // Main deallocation function.
5800  void FreeMemory(const VmaAllocation allocation);
5801 
5802  VkResult ResizeAllocation(
5803  const VmaAllocation alloc,
5804  VkDeviceSize newSize);
5805 
5806  void CalculateStats(VmaStats* pStats);
5807 
5808 #if VMA_STATS_STRING_ENABLED
5809  void PrintDetailedMap(class VmaJsonWriter& json);
5810 #endif
5811 
5812  VkResult Defragment(
5813  VmaAllocation* pAllocations,
5814  size_t allocationCount,
5815  VkBool32* pAllocationsChanged,
5816  const VmaDefragmentationInfo* pDefragmentationInfo,
5817  VmaDefragmentationStats* pDefragmentationStats);
5818 
5819  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5820  bool TouchAllocation(VmaAllocation hAllocation);
5821 
5822  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5823  void DestroyPool(VmaPool pool);
5824  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5825 
5826  void SetCurrentFrameIndex(uint32_t frameIndex);
5827  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5828 
5829  void MakePoolAllocationsLost(
5830  VmaPool hPool,
5831  size_t* pLostAllocationCount);
5832  VkResult CheckPoolCorruption(VmaPool hPool);
5833  VkResult CheckCorruption(uint32_t memoryTypeBits);
5834 
5835  void CreateLostAllocation(VmaAllocation* pAllocation);
5836 
5837  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5838  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5839 
5840  VkResult Map(VmaAllocation hAllocation, void** ppData);
5841  void Unmap(VmaAllocation hAllocation);
5842 
5843  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5844  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5845 
5846  void FlushOrInvalidateAllocation(
5847  VmaAllocation hAllocation,
5848  VkDeviceSize offset, VkDeviceSize size,
5849  VMA_CACHE_OPERATION op);
5850 
5851  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5852 
5853 private:
5854  VkDeviceSize m_PreferredLargeHeapBlockSize;
5855 
5856  VkPhysicalDevice m_PhysicalDevice;
5857  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5858 
5859  VMA_MUTEX m_PoolsMutex;
5860  // Protected by m_PoolsMutex. Sorted by pointer value.
5861  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5862  uint32_t m_NextPoolId;
5863 
5864  VmaVulkanFunctions m_VulkanFunctions;
5865 
5866 #if VMA_RECORDING_ENABLED
5867  VmaRecorder* m_pRecorder;
5868 #endif
5869 
5870  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5871 
5872  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5873 
5874  VkResult AllocateMemoryOfType(
5875  VkDeviceSize size,
5876  VkDeviceSize alignment,
5877  bool dedicatedAllocation,
5878  VkBuffer dedicatedBuffer,
5879  VkImage dedicatedImage,
5880  const VmaAllocationCreateInfo& createInfo,
5881  uint32_t memTypeIndex,
5882  VmaSuballocationType suballocType,
5883  VmaAllocation* pAllocation);
5884 
5885  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5886  VkResult AllocateDedicatedMemory(
5887  VkDeviceSize size,
5888  VmaSuballocationType suballocType,
5889  uint32_t memTypeIndex,
5890  bool map,
5891  bool isUserDataString,
5892  void* pUserData,
5893  VkBuffer dedicatedBuffer,
5894  VkImage dedicatedImage,
5895  VmaAllocation* pAllocation);
5896 
5897  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5898  void FreeDedicatedMemory(VmaAllocation allocation);
5899 };
5900 
5902 // Memory allocation #2 after VmaAllocator_T definition
5903 
5904 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5905 {
5906  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5907 }
5908 
5909 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5910 {
5911  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5912 }
5913 
5914 template<typename T>
5915 static T* VmaAllocate(VmaAllocator hAllocator)
5916 {
5917  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5918 }
5919 
5920 template<typename T>
5921 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5922 {
5923  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5924 }
5925 
5926 template<typename T>
5927 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5928 {
5929  if(ptr != VMA_NULL)
5930  {
5931  ptr->~T();
5932  VmaFree(hAllocator, ptr);
5933  }
5934 }
5935 
5936 template<typename T>
5937 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5938 {
5939  if(ptr != VMA_NULL)
5940  {
5941  for(size_t i = count; i--; )
5942  ptr[i].~T();
5943  VmaFree(hAllocator, ptr);
5944  }
5945 }
5946 
5948 // VmaStringBuilder
5949 
5950 #if VMA_STATS_STRING_ENABLED
5951 
5952 class VmaStringBuilder
5953 {
5954 public:
5955  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5956  size_t GetLength() const { return m_Data.size(); }
5957  const char* GetData() const { return m_Data.data(); }
5958 
5959  void Add(char ch) { m_Data.push_back(ch); }
5960  void Add(const char* pStr);
5961  void AddNewLine() { Add('\n'); }
5962  void AddNumber(uint32_t num);
5963  void AddNumber(uint64_t num);
5964  void AddPointer(const void* ptr);
5965 
5966 private:
5967  VmaVector< char, VmaStlAllocator<char> > m_Data;
5968 };
5969 
5970 void VmaStringBuilder::Add(const char* pStr)
5971 {
5972  const size_t strLen = strlen(pStr);
5973  if(strLen > 0)
5974  {
5975  const size_t oldCount = m_Data.size();
5976  m_Data.resize(oldCount + strLen);
5977  memcpy(m_Data.data() + oldCount, pStr, strLen);
5978  }
5979 }
5980 
5981 void VmaStringBuilder::AddNumber(uint32_t num)
5982 {
5983  char buf[11];
5984  VmaUint32ToStr(buf, sizeof(buf), num);
5985  Add(buf);
5986 }
5987 
5988 void VmaStringBuilder::AddNumber(uint64_t num)
5989 {
5990  char buf[21];
5991  VmaUint64ToStr(buf, sizeof(buf), num);
5992  Add(buf);
5993 }
5994 
5995 void VmaStringBuilder::AddPointer(const void* ptr)
5996 {
5997  char buf[21];
5998  VmaPtrToStr(buf, sizeof(buf), ptr);
5999  Add(buf);
6000 }
6001 
6002 #endif // #if VMA_STATS_STRING_ENABLED
6003 
6005 // VmaJsonWriter
6006 
6007 #if VMA_STATS_STRING_ENABLED
6008 
6009 class VmaJsonWriter
6010 {
6011  VMA_CLASS_NO_COPY(VmaJsonWriter)
6012 public:
6013  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6014  ~VmaJsonWriter();
6015 
6016  void BeginObject(bool singleLine = false);
6017  void EndObject();
6018 
6019  void BeginArray(bool singleLine = false);
6020  void EndArray();
6021 
6022  void WriteString(const char* pStr);
6023  void BeginString(const char* pStr = VMA_NULL);
6024  void ContinueString(const char* pStr);
6025  void ContinueString(uint32_t n);
6026  void ContinueString(uint64_t n);
6027  void ContinueString_Pointer(const void* ptr);
6028  void EndString(const char* pStr = VMA_NULL);
6029 
6030  void WriteNumber(uint32_t n);
6031  void WriteNumber(uint64_t n);
6032  void WriteBool(bool b);
6033  void WriteNull();
6034 
6035 private:
6036  static const char* const INDENT;
6037 
6038  enum COLLECTION_TYPE
6039  {
6040  COLLECTION_TYPE_OBJECT,
6041  COLLECTION_TYPE_ARRAY,
6042  };
6043  struct StackItem
6044  {
6045  COLLECTION_TYPE type;
6046  uint32_t valueCount;
6047  bool singleLineMode;
6048  };
6049 
6050  VmaStringBuilder& m_SB;
6051  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6052  bool m_InsideString;
6053 
6054  void BeginValue(bool isString);
6055  void WriteIndent(bool oneLess = false);
6056 };
6057 
6058 const char* const VmaJsonWriter::INDENT = " ";
6059 
6060 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6061  m_SB(sb),
6062  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6063  m_InsideString(false)
6064 {
6065 }
6066 
6067 VmaJsonWriter::~VmaJsonWriter()
6068 {
6069  VMA_ASSERT(!m_InsideString);
6070  VMA_ASSERT(m_Stack.empty());
6071 }
6072 
6073 void VmaJsonWriter::BeginObject(bool singleLine)
6074 {
6075  VMA_ASSERT(!m_InsideString);
6076 
6077  BeginValue(false);
6078  m_SB.Add('{');
6079 
6080  StackItem item;
6081  item.type = COLLECTION_TYPE_OBJECT;
6082  item.valueCount = 0;
6083  item.singleLineMode = singleLine;
6084  m_Stack.push_back(item);
6085 }
6086 
6087 void VmaJsonWriter::EndObject()
6088 {
6089  VMA_ASSERT(!m_InsideString);
6090 
6091  WriteIndent(true);
6092  m_SB.Add('}');
6093 
6094  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6095  m_Stack.pop_back();
6096 }
6097 
6098 void VmaJsonWriter::BeginArray(bool singleLine)
6099 {
6100  VMA_ASSERT(!m_InsideString);
6101 
6102  BeginValue(false);
6103  m_SB.Add('[');
6104 
6105  StackItem item;
6106  item.type = COLLECTION_TYPE_ARRAY;
6107  item.valueCount = 0;
6108  item.singleLineMode = singleLine;
6109  m_Stack.push_back(item);
6110 }
6111 
6112 void VmaJsonWriter::EndArray()
6113 {
6114  VMA_ASSERT(!m_InsideString);
6115 
6116  WriteIndent(true);
6117  m_SB.Add(']');
6118 
6119  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6120  m_Stack.pop_back();
6121 }
6122 
6123 void VmaJsonWriter::WriteString(const char* pStr)
6124 {
6125  BeginString(pStr);
6126  EndString();
6127 }
6128 
6129 void VmaJsonWriter::BeginString(const char* pStr)
6130 {
6131  VMA_ASSERT(!m_InsideString);
6132 
6133  BeginValue(true);
6134  m_SB.Add('"');
6135  m_InsideString = true;
6136  if(pStr != VMA_NULL && pStr[0] != '\0')
6137  {
6138  ContinueString(pStr);
6139  }
6140 }
6141 
6142 void VmaJsonWriter::ContinueString(const char* pStr)
6143 {
6144  VMA_ASSERT(m_InsideString);
6145 
6146  const size_t strLen = strlen(pStr);
6147  for(size_t i = 0; i < strLen; ++i)
6148  {
6149  char ch = pStr[i];
6150  if(ch == '\\')
6151  {
6152  m_SB.Add("\\\\");
6153  }
6154  else if(ch == '"')
6155  {
6156  m_SB.Add("\\\"");
6157  }
6158  else if(ch >= 32)
6159  {
6160  m_SB.Add(ch);
6161  }
6162  else switch(ch)
6163  {
6164  case '\b':
6165  m_SB.Add("\\b");
6166  break;
6167  case '\f':
6168  m_SB.Add("\\f");
6169  break;
6170  case '\n':
6171  m_SB.Add("\\n");
6172  break;
6173  case '\r':
6174  m_SB.Add("\\r");
6175  break;
6176  case '\t':
6177  m_SB.Add("\\t");
6178  break;
6179  default:
6180  VMA_ASSERT(0 && "Character not currently supported.");
6181  break;
6182  }
6183  }
6184 }
6185 
6186 void VmaJsonWriter::ContinueString(uint32_t n)
6187 {
6188  VMA_ASSERT(m_InsideString);
6189  m_SB.AddNumber(n);
6190 }
6191 
6192 void VmaJsonWriter::ContinueString(uint64_t n)
6193 {
6194  VMA_ASSERT(m_InsideString);
6195  m_SB.AddNumber(n);
6196 }
6197 
6198 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6199 {
6200  VMA_ASSERT(m_InsideString);
6201  m_SB.AddPointer(ptr);
6202 }
6203 
6204 void VmaJsonWriter::EndString(const char* pStr)
6205 {
6206  VMA_ASSERT(m_InsideString);
6207  if(pStr != VMA_NULL && pStr[0] != '\0')
6208  {
6209  ContinueString(pStr);
6210  }
6211  m_SB.Add('"');
6212  m_InsideString = false;
6213 }
6214 
6215 void VmaJsonWriter::WriteNumber(uint32_t n)
6216 {
6217  VMA_ASSERT(!m_InsideString);
6218  BeginValue(false);
6219  m_SB.AddNumber(n);
6220 }
6221 
6222 void VmaJsonWriter::WriteNumber(uint64_t n)
6223 {
6224  VMA_ASSERT(!m_InsideString);
6225  BeginValue(false);
6226  m_SB.AddNumber(n);
6227 }
6228 
6229 void VmaJsonWriter::WriteBool(bool b)
6230 {
6231  VMA_ASSERT(!m_InsideString);
6232  BeginValue(false);
6233  m_SB.Add(b ? "true" : "false");
6234 }
6235 
6236 void VmaJsonWriter::WriteNull()
6237 {
6238  VMA_ASSERT(!m_InsideString);
6239  BeginValue(false);
6240  m_SB.Add("null");
6241 }
6242 
6243 void VmaJsonWriter::BeginValue(bool isString)
6244 {
6245  if(!m_Stack.empty())
6246  {
6247  StackItem& currItem = m_Stack.back();
6248  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6249  currItem.valueCount % 2 == 0)
6250  {
6251  VMA_ASSERT(isString);
6252  }
6253 
6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6255  currItem.valueCount % 2 != 0)
6256  {
6257  m_SB.Add(": ");
6258  }
6259  else if(currItem.valueCount > 0)
6260  {
6261  m_SB.Add(", ");
6262  WriteIndent();
6263  }
6264  else
6265  {
6266  WriteIndent();
6267  }
6268  ++currItem.valueCount;
6269  }
6270 }
6271 
6272 void VmaJsonWriter::WriteIndent(bool oneLess)
6273 {
6274  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6275  {
6276  m_SB.AddNewLine();
6277 
6278  size_t count = m_Stack.size();
6279  if(count > 0 && oneLess)
6280  {
6281  --count;
6282  }
6283  for(size_t i = 0; i < count; ++i)
6284  {
6285  m_SB.Add(INDENT);
6286  }
6287  }
6288 }
6289 
6290 #endif // #if VMA_STATS_STRING_ENABLED
6291 
6293 
6294 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6295 {
6296  if(IsUserDataString())
6297  {
6298  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6299 
6300  FreeUserDataString(hAllocator);
6301 
6302  if(pUserData != VMA_NULL)
6303  {
6304  const char* const newStrSrc = (char*)pUserData;
6305  const size_t newStrLen = strlen(newStrSrc);
6306  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6307  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6308  m_pUserData = newStrDst;
6309  }
6310  }
6311  else
6312  {
6313  m_pUserData = pUserData;
6314  }
6315 }
6316 
6317 void VmaAllocation_T::ChangeBlockAllocation(
6318  VmaAllocator hAllocator,
6319  VmaDeviceMemoryBlock* block,
6320  VkDeviceSize offset)
6321 {
6322  VMA_ASSERT(block != VMA_NULL);
6323  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6324 
6325  // Move mapping reference counter from old block to new block.
6326  if(block != m_BlockAllocation.m_Block)
6327  {
6328  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6329  if(IsPersistentMap())
6330  ++mapRefCount;
6331  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6332  block->Map(hAllocator, mapRefCount, VMA_NULL);
6333  }
6334 
6335  m_BlockAllocation.m_Block = block;
6336  m_BlockAllocation.m_Offset = offset;
6337 }
6338 
6339 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
6340 {
6341  VMA_ASSERT(newSize > 0);
6342  m_Size = newSize;
6343 }
6344 
6345 VkDeviceSize VmaAllocation_T::GetOffset() const
6346 {
6347  switch(m_Type)
6348  {
6349  case ALLOCATION_TYPE_BLOCK:
6350  return m_BlockAllocation.m_Offset;
6351  case ALLOCATION_TYPE_DEDICATED:
6352  return 0;
6353  default:
6354  VMA_ASSERT(0);
6355  return 0;
6356  }
6357 }
6358 
6359 VkDeviceMemory VmaAllocation_T::GetMemory() const
6360 {
6361  switch(m_Type)
6362  {
6363  case ALLOCATION_TYPE_BLOCK:
6364  return m_BlockAllocation.m_Block->GetDeviceMemory();
6365  case ALLOCATION_TYPE_DEDICATED:
6366  return m_DedicatedAllocation.m_hMemory;
6367  default:
6368  VMA_ASSERT(0);
6369  return VK_NULL_HANDLE;
6370  }
6371 }
6372 
6373 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6374 {
6375  switch(m_Type)
6376  {
6377  case ALLOCATION_TYPE_BLOCK:
6378  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6379  case ALLOCATION_TYPE_DEDICATED:
6380  return m_DedicatedAllocation.m_MemoryTypeIndex;
6381  default:
6382  VMA_ASSERT(0);
6383  return UINT32_MAX;
6384  }
6385 }
6386 
6387 void* VmaAllocation_T::GetMappedData() const
6388 {
6389  switch(m_Type)
6390  {
6391  case ALLOCATION_TYPE_BLOCK:
6392  if(m_MapCount != 0)
6393  {
6394  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6395  VMA_ASSERT(pBlockData != VMA_NULL);
6396  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6397  }
6398  else
6399  {
6400  return VMA_NULL;
6401  }
6402  break;
6403  case ALLOCATION_TYPE_DEDICATED:
6404  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6405  return m_DedicatedAllocation.m_pMappedData;
6406  default:
6407  VMA_ASSERT(0);
6408  return VMA_NULL;
6409  }
6410 }
6411 
6412 bool VmaAllocation_T::CanBecomeLost() const
6413 {
6414  switch(m_Type)
6415  {
6416  case ALLOCATION_TYPE_BLOCK:
6417  return m_BlockAllocation.m_CanBecomeLost;
6418  case ALLOCATION_TYPE_DEDICATED:
6419  return false;
6420  default:
6421  VMA_ASSERT(0);
6422  return false;
6423  }
6424 }
6425 
6426 VmaPool VmaAllocation_T::GetPool() const
6427 {
6428  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6429  return m_BlockAllocation.m_hPool;
6430 }
6431 
6432 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6433 {
6434  VMA_ASSERT(CanBecomeLost());
6435 
6436  /*
6437  Warning: This is a carefully designed algorithm.
6438  Do not modify unless you really know what you're doing :)
6439  */
6440  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6441  for(;;)
6442  {
6443  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6444  {
6445  VMA_ASSERT(0);
6446  return false;
6447  }
6448  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6449  {
6450  return false;
6451  }
6452  else // Last use time earlier than current time.
6453  {
6454  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6455  {
6456  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6457  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6458  return true;
6459  }
6460  }
6461  }
6462 }
6463 
6464 #if VMA_STATS_STRING_ENABLED
6465 
6466 // Correspond to values of enum VmaSuballocationType.
6467 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6468  "FREE",
6469  "UNKNOWN",
6470  "BUFFER",
6471  "IMAGE_UNKNOWN",
6472  "IMAGE_LINEAR",
6473  "IMAGE_OPTIMAL",
6474 };
6475 
6476 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6477 {
6478  json.WriteString("Type");
6479  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6480 
6481  json.WriteString("Size");
6482  json.WriteNumber(m_Size);
6483 
6484  if(m_pUserData != VMA_NULL)
6485  {
6486  json.WriteString("UserData");
6487  if(IsUserDataString())
6488  {
6489  json.WriteString((const char*)m_pUserData);
6490  }
6491  else
6492  {
6493  json.BeginString();
6494  json.ContinueString_Pointer(m_pUserData);
6495  json.EndString();
6496  }
6497  }
6498 
6499  json.WriteString("CreationFrameIndex");
6500  json.WriteNumber(m_CreationFrameIndex);
6501 
6502  json.WriteString("LastUseFrameIndex");
6503  json.WriteNumber(GetLastUseFrameIndex());
6504 
6505  if(m_BufferImageUsage != 0)
6506  {
6507  json.WriteString("Usage");
6508  json.WriteNumber(m_BufferImageUsage);
6509  }
6510 }
6511 
6512 #endif
6513 
6514 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6515 {
6516  VMA_ASSERT(IsUserDataString());
6517  if(m_pUserData != VMA_NULL)
6518  {
6519  char* const oldStr = (char*)m_pUserData;
6520  const size_t oldStrLen = strlen(oldStr);
6521  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6522  m_pUserData = VMA_NULL;
6523  }
6524 }
6525 
6526 void VmaAllocation_T::BlockAllocMap()
6527 {
6528  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6529 
6530  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6531  {
6532  ++m_MapCount;
6533  }
6534  else
6535  {
6536  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6537  }
6538 }
6539 
6540 void VmaAllocation_T::BlockAllocUnmap()
6541 {
6542  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6543 
6544  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6545  {
6546  --m_MapCount;
6547  }
6548  else
6549  {
6550  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6551  }
6552 }
6553 
6554 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6555 {
6556  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6557 
6558  if(m_MapCount != 0)
6559  {
6560  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6561  {
6562  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6563  *ppData = m_DedicatedAllocation.m_pMappedData;
6564  ++m_MapCount;
6565  return VK_SUCCESS;
6566  }
6567  else
6568  {
6569  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6570  return VK_ERROR_MEMORY_MAP_FAILED;
6571  }
6572  }
6573  else
6574  {
6575  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6576  hAllocator->m_hDevice,
6577  m_DedicatedAllocation.m_hMemory,
6578  0, // offset
6579  VK_WHOLE_SIZE,
6580  0, // flags
6581  ppData);
6582  if(result == VK_SUCCESS)
6583  {
6584  m_DedicatedAllocation.m_pMappedData = *ppData;
6585  m_MapCount = 1;
6586  }
6587  return result;
6588  }
6589 }
6590 
6591 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6592 {
6593  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6594 
6595  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6596  {
6597  --m_MapCount;
6598  if(m_MapCount == 0)
6599  {
6600  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6601  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6602  hAllocator->m_hDevice,
6603  m_DedicatedAllocation.m_hMemory);
6604  }
6605  }
6606  else
6607  {
6608  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6609  }
6610 }
6611 
6612 #if VMA_STATS_STRING_ENABLED
6613 
6614 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6615 {
6616  json.BeginObject();
6617 
6618  json.WriteString("Blocks");
6619  json.WriteNumber(stat.blockCount);
6620 
6621  json.WriteString("Allocations");
6622  json.WriteNumber(stat.allocationCount);
6623 
6624  json.WriteString("UnusedRanges");
6625  json.WriteNumber(stat.unusedRangeCount);
6626 
6627  json.WriteString("UsedBytes");
6628  json.WriteNumber(stat.usedBytes);
6629 
6630  json.WriteString("UnusedBytes");
6631  json.WriteNumber(stat.unusedBytes);
6632 
6633  if(stat.allocationCount > 1)
6634  {
6635  json.WriteString("AllocationSize");
6636  json.BeginObject(true);
6637  json.WriteString("Min");
6638  json.WriteNumber(stat.allocationSizeMin);
6639  json.WriteString("Avg");
6640  json.WriteNumber(stat.allocationSizeAvg);
6641  json.WriteString("Max");
6642  json.WriteNumber(stat.allocationSizeMax);
6643  json.EndObject();
6644  }
6645 
6646  if(stat.unusedRangeCount > 1)
6647  {
6648  json.WriteString("UnusedRangeSize");
6649  json.BeginObject(true);
6650  json.WriteString("Min");
6651  json.WriteNumber(stat.unusedRangeSizeMin);
6652  json.WriteString("Avg");
6653  json.WriteNumber(stat.unusedRangeSizeAvg);
6654  json.WriteString("Max");
6655  json.WriteNumber(stat.unusedRangeSizeMax);
6656  json.EndObject();
6657  }
6658 
6659  json.EndObject();
6660 }
6661 
6662 #endif // #if VMA_STATS_STRING_ENABLED
6663 
6664 struct VmaSuballocationItemSizeLess
6665 {
6666  bool operator()(
6667  const VmaSuballocationList::iterator lhs,
6668  const VmaSuballocationList::iterator rhs) const
6669  {
6670  return lhs->size < rhs->size;
6671  }
6672  bool operator()(
6673  const VmaSuballocationList::iterator lhs,
6674  VkDeviceSize rhsSize) const
6675  {
6676  return lhs->size < rhsSize;
6677  }
6678 };
6679 
6680 
6682 // class VmaBlockMetadata
6683 
6684 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6685  m_Size(0),
6686  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6687 {
6688 }
6689 
6690 #if VMA_STATS_STRING_ENABLED
6691 
6692 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6693  VkDeviceSize unusedBytes,
6694  size_t allocationCount,
6695  size_t unusedRangeCount) const
6696 {
6697  json.BeginObject();
6698 
6699  json.WriteString("TotalBytes");
6700  json.WriteNumber(GetSize());
6701 
6702  json.WriteString("UnusedBytes");
6703  json.WriteNumber(unusedBytes);
6704 
6705  json.WriteString("Allocations");
6706  json.WriteNumber((uint64_t)allocationCount);
6707 
6708  json.WriteString("UnusedRanges");
6709  json.WriteNumber((uint64_t)unusedRangeCount);
6710 
6711  json.WriteString("Suballocations");
6712  json.BeginArray();
6713 }
6714 
6715 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6716  VkDeviceSize offset,
6717  VmaAllocation hAllocation) const
6718 {
6719  json.BeginObject(true);
6720 
6721  json.WriteString("Offset");
6722  json.WriteNumber(offset);
6723 
6724  hAllocation->PrintParameters(json);
6725 
6726  json.EndObject();
6727 }
6728 
6729 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6730  VkDeviceSize offset,
6731  VkDeviceSize size) const
6732 {
6733  json.BeginObject(true);
6734 
6735  json.WriteString("Offset");
6736  json.WriteNumber(offset);
6737 
6738  json.WriteString("Type");
6739  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6740 
6741  json.WriteString("Size");
6742  json.WriteNumber(size);
6743 
6744  json.EndObject();
6745 }
6746 
6747 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6748 {
6749  json.EndArray();
6750  json.EndObject();
6751 }
6752 
6753 #endif // #if VMA_STATS_STRING_ENABLED
6754 
6756 // class VmaBlockMetadata_Generic
6757 
6758 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6759  VmaBlockMetadata(hAllocator),
6760  m_FreeCount(0),
6761  m_SumFreeSize(0),
6762  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6763  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6764 {
6765 }
6766 
6767 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6768 {
6769 }
6770 
6771 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6772 {
6773  VmaBlockMetadata::Init(size);
6774 
6775  m_FreeCount = 1;
6776  m_SumFreeSize = size;
6777 
6778  VmaSuballocation suballoc = {};
6779  suballoc.offset = 0;
6780  suballoc.size = size;
6781  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6782  suballoc.hAllocation = VK_NULL_HANDLE;
6783 
6784  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6785  m_Suballocations.push_back(suballoc);
6786  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6787  --suballocItem;
6788  m_FreeSuballocationsBySize.push_back(suballocItem);
6789 }
6790 
6791 bool VmaBlockMetadata_Generic::Validate() const
6792 {
6793  VMA_VALIDATE(!m_Suballocations.empty());
6794 
6795  // Expected offset of new suballocation as calculated from previous ones.
6796  VkDeviceSize calculatedOffset = 0;
6797  // Expected number of free suballocations as calculated from traversing their list.
6798  uint32_t calculatedFreeCount = 0;
6799  // Expected sum size of free suballocations as calculated from traversing their list.
6800  VkDeviceSize calculatedSumFreeSize = 0;
6801  // Expected number of free suballocations that should be registered in
6802  // m_FreeSuballocationsBySize calculated from traversing their list.
6803  size_t freeSuballocationsToRegister = 0;
6804  // True if previous visited suballocation was free.
6805  bool prevFree = false;
6806 
6807  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6808  suballocItem != m_Suballocations.cend();
6809  ++suballocItem)
6810  {
6811  const VmaSuballocation& subAlloc = *suballocItem;
6812 
6813  // Actual offset of this suballocation doesn't match expected one.
6814  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6815 
6816  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6817  // Two adjacent free suballocations are invalid. They should be merged.
6818  VMA_VALIDATE(!prevFree || !currFree);
6819 
6820  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6821 
6822  if(currFree)
6823  {
6824  calculatedSumFreeSize += subAlloc.size;
6825  ++calculatedFreeCount;
6826  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6827  {
6828  ++freeSuballocationsToRegister;
6829  }
6830 
6831  // Margin required between allocations - every free space must be at least that large.
6832  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6833  }
6834  else
6835  {
6836  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6837  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6838 
6839  // Margin required between allocations - previous allocation must be free.
6840  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6841  }
6842 
6843  calculatedOffset += subAlloc.size;
6844  prevFree = currFree;
6845  }
6846 
6847  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6848  // match expected one.
6849  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6850 
6851  VkDeviceSize lastSize = 0;
6852  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6853  {
6854  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6855 
6856  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6857  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6858  // They must be sorted by size ascending.
6859  VMA_VALIDATE(suballocItem->size >= lastSize);
6860 
6861  lastSize = suballocItem->size;
6862  }
6863 
6864  // Check if totals match calculacted values.
6865  VMA_VALIDATE(ValidateFreeSuballocationList());
6866  VMA_VALIDATE(calculatedOffset == GetSize());
6867  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6868  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6869 
6870  return true;
6871 }
6872 
6873 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6874 {
6875  if(!m_FreeSuballocationsBySize.empty())
6876  {
6877  return m_FreeSuballocationsBySize.back()->size;
6878  }
6879  else
6880  {
6881  return 0;
6882  }
6883 }
6884 
6885 bool VmaBlockMetadata_Generic::IsEmpty() const
6886 {
6887  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6888 }
6889 
6890 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6891 {
6892  outInfo.blockCount = 1;
6893 
6894  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6895  outInfo.allocationCount = rangeCount - m_FreeCount;
6896  outInfo.unusedRangeCount = m_FreeCount;
6897 
6898  outInfo.unusedBytes = m_SumFreeSize;
6899  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6900 
6901  outInfo.allocationSizeMin = UINT64_MAX;
6902  outInfo.allocationSizeMax = 0;
6903  outInfo.unusedRangeSizeMin = UINT64_MAX;
6904  outInfo.unusedRangeSizeMax = 0;
6905 
6906  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6907  suballocItem != m_Suballocations.cend();
6908  ++suballocItem)
6909  {
6910  const VmaSuballocation& suballoc = *suballocItem;
6911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6912  {
6913  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6914  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6915  }
6916  else
6917  {
6918  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6919  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6920  }
6921  }
6922 }
6923 
6924 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6925 {
6926  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6927 
6928  inoutStats.size += GetSize();
6929  inoutStats.unusedSize += m_SumFreeSize;
6930  inoutStats.allocationCount += rangeCount - m_FreeCount;
6931  inoutStats.unusedRangeCount += m_FreeCount;
6932  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6933 }
6934 
6935 #if VMA_STATS_STRING_ENABLED
6936 
6937 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6938 {
6939  PrintDetailedMap_Begin(json,
6940  m_SumFreeSize, // unusedBytes
6941  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6942  m_FreeCount); // unusedRangeCount
6943 
6944  size_t i = 0;
6945  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6946  suballocItem != m_Suballocations.cend();
6947  ++suballocItem, ++i)
6948  {
6949  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6950  {
6951  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6952  }
6953  else
6954  {
6955  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6956  }
6957  }
6958 
6959  PrintDetailedMap_End(json);
6960 }
6961 
6962 #endif // #if VMA_STATS_STRING_ENABLED
6963 
6964 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6965  uint32_t currentFrameIndex,
6966  uint32_t frameInUseCount,
6967  VkDeviceSize bufferImageGranularity,
6968  VkDeviceSize allocSize,
6969  VkDeviceSize allocAlignment,
6970  bool upperAddress,
6971  VmaSuballocationType allocType,
6972  bool canMakeOtherLost,
6973  uint32_t strategy,
6974  VmaAllocationRequest* pAllocationRequest)
6975 {
6976  VMA_ASSERT(allocSize > 0);
6977  VMA_ASSERT(!upperAddress);
6978  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6979  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6980  VMA_HEAVY_ASSERT(Validate());
6981 
6982  // There is not enough total free space in this block to fullfill the request: Early return.
6983  if(canMakeOtherLost == false &&
6984  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6985  {
6986  return false;
6987  }
6988 
6989  // New algorithm, efficiently searching freeSuballocationsBySize.
6990  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6991  if(freeSuballocCount > 0)
6992  {
6994  {
6995  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6996  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6997  m_FreeSuballocationsBySize.data(),
6998  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6999  allocSize + 2 * VMA_DEBUG_MARGIN,
7000  VmaSuballocationItemSizeLess());
7001  size_t index = it - m_FreeSuballocationsBySize.data();
7002  for(; index < freeSuballocCount; ++index)
7003  {
7004  if(CheckAllocation(
7005  currentFrameIndex,
7006  frameInUseCount,
7007  bufferImageGranularity,
7008  allocSize,
7009  allocAlignment,
7010  allocType,
7011  m_FreeSuballocationsBySize[index],
7012  false, // canMakeOtherLost
7013  &pAllocationRequest->offset,
7014  &pAllocationRequest->itemsToMakeLostCount,
7015  &pAllocationRequest->sumFreeSize,
7016  &pAllocationRequest->sumItemSize))
7017  {
7018  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7019  return true;
7020  }
7021  }
7022  }
7023  else // WORST_FIT, FIRST_FIT
7024  {
7025  // Search staring from biggest suballocations.
7026  for(size_t index = freeSuballocCount; index--; )
7027  {
7028  if(CheckAllocation(
7029  currentFrameIndex,
7030  frameInUseCount,
7031  bufferImageGranularity,
7032  allocSize,
7033  allocAlignment,
7034  allocType,
7035  m_FreeSuballocationsBySize[index],
7036  false, // canMakeOtherLost
7037  &pAllocationRequest->offset,
7038  &pAllocationRequest->itemsToMakeLostCount,
7039  &pAllocationRequest->sumFreeSize,
7040  &pAllocationRequest->sumItemSize))
7041  {
7042  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7043  return true;
7044  }
7045  }
7046  }
7047  }
7048 
7049  if(canMakeOtherLost)
7050  {
7051  // Brute-force algorithm. TODO: Come up with something better.
7052 
7053  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7054  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7055 
7056  VmaAllocationRequest tmpAllocRequest = {};
7057  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7058  suballocIt != m_Suballocations.end();
7059  ++suballocIt)
7060  {
7061  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7062  suballocIt->hAllocation->CanBecomeLost())
7063  {
7064  if(CheckAllocation(
7065  currentFrameIndex,
7066  frameInUseCount,
7067  bufferImageGranularity,
7068  allocSize,
7069  allocAlignment,
7070  allocType,
7071  suballocIt,
7072  canMakeOtherLost,
7073  &tmpAllocRequest.offset,
7074  &tmpAllocRequest.itemsToMakeLostCount,
7075  &tmpAllocRequest.sumFreeSize,
7076  &tmpAllocRequest.sumItemSize))
7077  {
7078  tmpAllocRequest.item = suballocIt;
7079 
7080  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7082  {
7083  *pAllocationRequest = tmpAllocRequest;
7084  }
7085  }
7086  }
7087  }
7088 
7089  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7090  {
7091  return true;
7092  }
7093  }
7094 
7095  return false;
7096 }
7097 
7098 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7099  uint32_t currentFrameIndex,
7100  uint32_t frameInUseCount,
7101  VmaAllocationRequest* pAllocationRequest)
7102 {
7103  while(pAllocationRequest->itemsToMakeLostCount > 0)
7104  {
7105  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7106  {
7107  ++pAllocationRequest->item;
7108  }
7109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7110  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7111  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7112  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7113  {
7114  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7115  --pAllocationRequest->itemsToMakeLostCount;
7116  }
7117  else
7118  {
7119  return false;
7120  }
7121  }
7122 
7123  VMA_HEAVY_ASSERT(Validate());
7124  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7125  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7126 
7127  return true;
7128 }
7129 
7130 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7131 {
7132  uint32_t lostAllocationCount = 0;
7133  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7134  it != m_Suballocations.end();
7135  ++it)
7136  {
7137  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7138  it->hAllocation->CanBecomeLost() &&
7139  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7140  {
7141  it = FreeSuballocation(it);
7142  ++lostAllocationCount;
7143  }
7144  }
7145  return lostAllocationCount;
7146 }
7147 
7148 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7149 {
7150  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7151  it != m_Suballocations.end();
7152  ++it)
7153  {
7154  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7155  {
7156  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7157  {
7158  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7159  return VK_ERROR_VALIDATION_FAILED_EXT;
7160  }
7161  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7162  {
7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7164  return VK_ERROR_VALIDATION_FAILED_EXT;
7165  }
7166  }
7167  }
7168 
7169  return VK_SUCCESS;
7170 }
7171 
7172 void VmaBlockMetadata_Generic::Alloc(
7173  const VmaAllocationRequest& request,
7174  VmaSuballocationType type,
7175  VkDeviceSize allocSize,
7176  bool upperAddress,
7177  VmaAllocation hAllocation)
7178 {
7179  VMA_ASSERT(!upperAddress);
7180  VMA_ASSERT(request.item != m_Suballocations.end());
7181  VmaSuballocation& suballoc = *request.item;
7182  // Given suballocation is a free block.
7183  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7184  // Given offset is inside this suballocation.
7185  VMA_ASSERT(request.offset >= suballoc.offset);
7186  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7187  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7188  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7189 
7190  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7191  // it to become used.
7192  UnregisterFreeSuballocation(request.item);
7193 
7194  suballoc.offset = request.offset;
7195  suballoc.size = allocSize;
7196  suballoc.type = type;
7197  suballoc.hAllocation = hAllocation;
7198 
7199  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7200  if(paddingEnd)
7201  {
7202  VmaSuballocation paddingSuballoc = {};
7203  paddingSuballoc.offset = request.offset + allocSize;
7204  paddingSuballoc.size = paddingEnd;
7205  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7206  VmaSuballocationList::iterator next = request.item;
7207  ++next;
7208  const VmaSuballocationList::iterator paddingEndItem =
7209  m_Suballocations.insert(next, paddingSuballoc);
7210  RegisterFreeSuballocation(paddingEndItem);
7211  }
7212 
7213  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7214  if(paddingBegin)
7215  {
7216  VmaSuballocation paddingSuballoc = {};
7217  paddingSuballoc.offset = request.offset - paddingBegin;
7218  paddingSuballoc.size = paddingBegin;
7219  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7220  const VmaSuballocationList::iterator paddingBeginItem =
7221  m_Suballocations.insert(request.item, paddingSuballoc);
7222  RegisterFreeSuballocation(paddingBeginItem);
7223  }
7224 
7225  // Update totals.
7226  m_FreeCount = m_FreeCount - 1;
7227  if(paddingBegin > 0)
7228  {
7229  ++m_FreeCount;
7230  }
7231  if(paddingEnd > 0)
7232  {
7233  ++m_FreeCount;
7234  }
7235  m_SumFreeSize -= allocSize;
7236 }
7237 
7238 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7239 {
7240  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7241  suballocItem != m_Suballocations.end();
7242  ++suballocItem)
7243  {
7244  VmaSuballocation& suballoc = *suballocItem;
7245  if(suballoc.hAllocation == allocation)
7246  {
7247  FreeSuballocation(suballocItem);
7248  VMA_HEAVY_ASSERT(Validate());
7249  return;
7250  }
7251  }
7252  VMA_ASSERT(0 && "Not found!");
7253 }
7254 
7255 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7256 {
7257  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7258  suballocItem != m_Suballocations.end();
7259  ++suballocItem)
7260  {
7261  VmaSuballocation& suballoc = *suballocItem;
7262  if(suballoc.offset == offset)
7263  {
7264  FreeSuballocation(suballocItem);
7265  return;
7266  }
7267  }
7268  VMA_ASSERT(0 && "Not found!");
7269 }
7270 
7271 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
7272 {
7273  typedef VmaSuballocationList::iterator iter_type;
7274  for(iter_type suballocItem = m_Suballocations.begin();
7275  suballocItem != m_Suballocations.end();
7276  ++suballocItem)
7277  {
7278  VmaSuballocation& suballoc = *suballocItem;
7279  if(suballoc.hAllocation == alloc)
7280  {
7281  iter_type nextItem = suballocItem;
7282  ++nextItem;
7283 
7284  // Should have been ensured on higher level.
7285  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
7286 
7287  // Shrinking.
7288  if(newSize < alloc->GetSize())
7289  {
7290  const VkDeviceSize sizeDiff = suballoc.size - newSize;
7291 
7292  // There is next item.
7293  if(nextItem != m_Suballocations.end())
7294  {
7295  // Next item is free.
7296  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7297  {
7298  // Grow this next item backward.
7299  UnregisterFreeSuballocation(nextItem);
7300  nextItem->offset -= sizeDiff;
7301  nextItem->size += sizeDiff;
7302  RegisterFreeSuballocation(nextItem);
7303  }
7304  // Next item is not free.
7305  else
7306  {
7307  // Create free item after current one.
7308  VmaSuballocation newFreeSuballoc;
7309  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7310  newFreeSuballoc.offset = suballoc.offset + newSize;
7311  newFreeSuballoc.size = sizeDiff;
7312  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7313  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
7314  RegisterFreeSuballocation(newFreeSuballocIt);
7315 
7316  ++m_FreeCount;
7317  }
7318  }
7319  // This is the last item.
7320  else
7321  {
7322  // Create free item at the end.
7323  VmaSuballocation newFreeSuballoc;
7324  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
7325  newFreeSuballoc.offset = suballoc.offset + newSize;
7326  newFreeSuballoc.size = sizeDiff;
7327  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7328  m_Suballocations.push_back(newFreeSuballoc);
7329 
7330  iter_type newFreeSuballocIt = m_Suballocations.end();
7331  RegisterFreeSuballocation(--newFreeSuballocIt);
7332 
7333  ++m_FreeCount;
7334  }
7335 
7336  suballoc.size = newSize;
7337  m_SumFreeSize += sizeDiff;
7338  }
7339  // Growing.
7340  else
7341  {
7342  const VkDeviceSize sizeDiff = newSize - suballoc.size;
7343 
7344  // There is next item.
7345  if(nextItem != m_Suballocations.end())
7346  {
7347  // Next item is free.
7348  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7349  {
7350  // There is not enough free space, including margin.
7351  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
7352  {
7353  return false;
7354  }
7355 
7356  // There is more free space than required.
7357  if(nextItem->size > sizeDiff)
7358  {
7359  // Move and shrink this next item.
7360  UnregisterFreeSuballocation(nextItem);
7361  nextItem->offset += sizeDiff;
7362  nextItem->size -= sizeDiff;
7363  RegisterFreeSuballocation(nextItem);
7364  }
7365  // There is exactly the amount of free space required.
7366  else
7367  {
7368  // Remove this next free item.
7369  UnregisterFreeSuballocation(nextItem);
7370  m_Suballocations.erase(nextItem);
7371  --m_FreeCount;
7372  }
7373  }
7374  // Next item is not free - there is no space to grow.
7375  else
7376  {
7377  return false;
7378  }
7379  }
7380  // This is the last item - there is no space to grow.
7381  else
7382  {
7383  return false;
7384  }
7385 
7386  suballoc.size = newSize;
7387  m_SumFreeSize -= sizeDiff;
7388  }
7389 
7390  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
7391  return true;
7392  }
7393  }
7394  VMA_ASSERT(0 && "Not found!");
7395  return false;
7396 }
7397 
7398 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7399 {
7400  VkDeviceSize lastSize = 0;
7401  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7402  {
7403  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7404 
7405  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7406  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7407  VMA_VALIDATE(it->size >= lastSize);
7408  lastSize = it->size;
7409  }
7410  return true;
7411 }
7412 
7413 bool VmaBlockMetadata_Generic::CheckAllocation(
7414  uint32_t currentFrameIndex,
7415  uint32_t frameInUseCount,
7416  VkDeviceSize bufferImageGranularity,
7417  VkDeviceSize allocSize,
7418  VkDeviceSize allocAlignment,
7419  VmaSuballocationType allocType,
7420  VmaSuballocationList::const_iterator suballocItem,
7421  bool canMakeOtherLost,
7422  VkDeviceSize* pOffset,
7423  size_t* itemsToMakeLostCount,
7424  VkDeviceSize* pSumFreeSize,
7425  VkDeviceSize* pSumItemSize) const
7426 {
7427  VMA_ASSERT(allocSize > 0);
7428  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7429  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7430  VMA_ASSERT(pOffset != VMA_NULL);
7431 
7432  *itemsToMakeLostCount = 0;
7433  *pSumFreeSize = 0;
7434  *pSumItemSize = 0;
7435 
7436  if(canMakeOtherLost)
7437  {
7438  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7439  {
7440  *pSumFreeSize = suballocItem->size;
7441  }
7442  else
7443  {
7444  if(suballocItem->hAllocation->CanBecomeLost() &&
7445  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7446  {
7447  ++*itemsToMakeLostCount;
7448  *pSumItemSize = suballocItem->size;
7449  }
7450  else
7451  {
7452  return false;
7453  }
7454  }
7455 
7456  // Remaining size is too small for this request: Early return.
7457  if(GetSize() - suballocItem->offset < allocSize)
7458  {
7459  return false;
7460  }
7461 
7462  // Start from offset equal to beginning of this suballocation.
7463  *pOffset = suballocItem->offset;
7464 
7465  // Apply VMA_DEBUG_MARGIN at the beginning.
7466  if(VMA_DEBUG_MARGIN > 0)
7467  {
7468  *pOffset += VMA_DEBUG_MARGIN;
7469  }
7470 
7471  // Apply alignment.
7472  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7473 
7474  // Check previous suballocations for BufferImageGranularity conflicts.
7475  // Make bigger alignment if necessary.
7476  if(bufferImageGranularity > 1)
7477  {
7478  bool bufferImageGranularityConflict = false;
7479  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7480  while(prevSuballocItem != m_Suballocations.cbegin())
7481  {
7482  --prevSuballocItem;
7483  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7484  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7485  {
7486  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7487  {
7488  bufferImageGranularityConflict = true;
7489  break;
7490  }
7491  }
7492  else
7493  // Already on previous page.
7494  break;
7495  }
7496  if(bufferImageGranularityConflict)
7497  {
7498  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7499  }
7500  }
7501 
7502  // Now that we have final *pOffset, check if we are past suballocItem.
7503  // If yes, return false - this function should be called for another suballocItem as starting point.
7504  if(*pOffset >= suballocItem->offset + suballocItem->size)
7505  {
7506  return false;
7507  }
7508 
7509  // Calculate padding at the beginning based on current offset.
7510  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7511 
7512  // Calculate required margin at the end.
7513  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7514 
7515  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7516  // Another early return check.
7517  if(suballocItem->offset + totalSize > GetSize())
7518  {
7519  return false;
7520  }
7521 
7522  // Advance lastSuballocItem until desired size is reached.
7523  // Update itemsToMakeLostCount.
7524  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7525  if(totalSize > suballocItem->size)
7526  {
7527  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7528  while(remainingSize > 0)
7529  {
7530  ++lastSuballocItem;
7531  if(lastSuballocItem == m_Suballocations.cend())
7532  {
7533  return false;
7534  }
7535  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7536  {
7537  *pSumFreeSize += lastSuballocItem->size;
7538  }
7539  else
7540  {
7541  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7542  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7543  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7544  {
7545  ++*itemsToMakeLostCount;
7546  *pSumItemSize += lastSuballocItem->size;
7547  }
7548  else
7549  {
7550  return false;
7551  }
7552  }
7553  remainingSize = (lastSuballocItem->size < remainingSize) ?
7554  remainingSize - lastSuballocItem->size : 0;
7555  }
7556  }
7557 
7558  // Check next suballocations for BufferImageGranularity conflicts.
7559  // If conflict exists, we must mark more allocations lost or fail.
7560  if(bufferImageGranularity > 1)
7561  {
7562  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7563  ++nextSuballocItem;
7564  while(nextSuballocItem != m_Suballocations.cend())
7565  {
7566  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7567  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7568  {
7569  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7570  {
7571  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7572  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7573  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7574  {
7575  ++*itemsToMakeLostCount;
7576  }
7577  else
7578  {
7579  return false;
7580  }
7581  }
7582  }
7583  else
7584  {
7585  // Already on next page.
7586  break;
7587  }
7588  ++nextSuballocItem;
7589  }
7590  }
7591  }
7592  else
7593  {
7594  const VmaSuballocation& suballoc = *suballocItem;
7595  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7596 
7597  *pSumFreeSize = suballoc.size;
7598 
7599  // Size of this suballocation is too small for this request: Early return.
7600  if(suballoc.size < allocSize)
7601  {
7602  return false;
7603  }
7604 
7605  // Start from offset equal to beginning of this suballocation.
7606  *pOffset = suballoc.offset;
7607 
7608  // Apply VMA_DEBUG_MARGIN at the beginning.
7609  if(VMA_DEBUG_MARGIN > 0)
7610  {
7611  *pOffset += VMA_DEBUG_MARGIN;
7612  }
7613 
7614  // Apply alignment.
7615  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7616 
7617  // Check previous suballocations for BufferImageGranularity conflicts.
7618  // Make bigger alignment if necessary.
7619  if(bufferImageGranularity > 1)
7620  {
7621  bool bufferImageGranularityConflict = false;
7622  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7623  while(prevSuballocItem != m_Suballocations.cbegin())
7624  {
7625  --prevSuballocItem;
7626  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7627  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7628  {
7629  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7630  {
7631  bufferImageGranularityConflict = true;
7632  break;
7633  }
7634  }
7635  else
7636  // Already on previous page.
7637  break;
7638  }
7639  if(bufferImageGranularityConflict)
7640  {
7641  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7642  }
7643  }
7644 
7645  // Calculate padding at the beginning based on current offset.
7646  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7647 
7648  // Calculate required margin at the end.
7649  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7650 
7651  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7652  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7653  {
7654  return false;
7655  }
7656 
7657  // Check next suballocations for BufferImageGranularity conflicts.
7658  // If conflict exists, allocation cannot be made here.
7659  if(bufferImageGranularity > 1)
7660  {
7661  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7662  ++nextSuballocItem;
7663  while(nextSuballocItem != m_Suballocations.cend())
7664  {
7665  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7666  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7667  {
7668  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7669  {
7670  return false;
7671  }
7672  }
7673  else
7674  {
7675  // Already on next page.
7676  break;
7677  }
7678  ++nextSuballocItem;
7679  }
7680  }
7681  }
7682 
7683  // All tests passed: Success. pOffset is already filled.
7684  return true;
7685 }
7686 
7687 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7688 {
7689  VMA_ASSERT(item != m_Suballocations.end());
7690  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7691 
7692  VmaSuballocationList::iterator nextItem = item;
7693  ++nextItem;
7694  VMA_ASSERT(nextItem != m_Suballocations.end());
7695  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7696 
7697  item->size += nextItem->size;
7698  --m_FreeCount;
7699  m_Suballocations.erase(nextItem);
7700 }
7701 
7702 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7703 {
7704  // Change this suballocation to be marked as free.
7705  VmaSuballocation& suballoc = *suballocItem;
7706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7707  suballoc.hAllocation = VK_NULL_HANDLE;
7708 
7709  // Update totals.
7710  ++m_FreeCount;
7711  m_SumFreeSize += suballoc.size;
7712 
7713  // Merge with previous and/or next suballocation if it's also free.
7714  bool mergeWithNext = false;
7715  bool mergeWithPrev = false;
7716 
7717  VmaSuballocationList::iterator nextItem = suballocItem;
7718  ++nextItem;
7719  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7720  {
7721  mergeWithNext = true;
7722  }
7723 
7724  VmaSuballocationList::iterator prevItem = suballocItem;
7725  if(suballocItem != m_Suballocations.begin())
7726  {
7727  --prevItem;
7728  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7729  {
7730  mergeWithPrev = true;
7731  }
7732  }
7733 
7734  if(mergeWithNext)
7735  {
7736  UnregisterFreeSuballocation(nextItem);
7737  MergeFreeWithNext(suballocItem);
7738  }
7739 
7740  if(mergeWithPrev)
7741  {
7742  UnregisterFreeSuballocation(prevItem);
7743  MergeFreeWithNext(prevItem);
7744  RegisterFreeSuballocation(prevItem);
7745  return prevItem;
7746  }
7747  else
7748  {
7749  RegisterFreeSuballocation(suballocItem);
7750  return suballocItem;
7751  }
7752 }
7753 
7754 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7755 {
7756  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7757  VMA_ASSERT(item->size > 0);
7758 
7759  // You may want to enable this validation at the beginning or at the end of
7760  // this function, depending on what do you want to check.
7761  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7762 
7763  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7764  {
7765  if(m_FreeSuballocationsBySize.empty())
7766  {
7767  m_FreeSuballocationsBySize.push_back(item);
7768  }
7769  else
7770  {
7771  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7772  }
7773  }
7774 
7775  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7776 }
7777 
7778 
7779 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7780 {
7781  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7782  VMA_ASSERT(item->size > 0);
7783 
7784  // You may want to enable this validation at the beginning or at the end of
7785  // this function, depending on what do you want to check.
7786  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7787 
7788  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7789  {
7790  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7791  m_FreeSuballocationsBySize.data(),
7792  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7793  item,
7794  VmaSuballocationItemSizeLess());
7795  for(size_t index = it - m_FreeSuballocationsBySize.data();
7796  index < m_FreeSuballocationsBySize.size();
7797  ++index)
7798  {
7799  if(m_FreeSuballocationsBySize[index] == item)
7800  {
7801  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7802  return;
7803  }
7804  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7805  }
7806  VMA_ASSERT(0 && "Not found.");
7807  }
7808 
7809  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7810 }
7811 
7813 // class VmaBlockMetadata_Linear
7814 
7815 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7816  VmaBlockMetadata(hAllocator),
7817  m_SumFreeSize(0),
7818  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7819  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7820  m_1stVectorIndex(0),
7821  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7822  m_1stNullItemsBeginCount(0),
7823  m_1stNullItemsMiddleCount(0),
7824  m_2ndNullItemsCount(0)
7825 {
7826 }
7827 
7828 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7829 {
7830 }
7831 
7832 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7833 {
7834  VmaBlockMetadata::Init(size);
7835  m_SumFreeSize = size;
7836 }
7837 
7838 bool VmaBlockMetadata_Linear::Validate() const
7839 {
7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7842 
7843  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7844  VMA_VALIDATE(!suballocations1st.empty() ||
7845  suballocations2nd.empty() ||
7846  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7847 
7848  if(!suballocations1st.empty())
7849  {
7850  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7851  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7852  // Null item at the end should be just pop_back().
7853  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7854  }
7855  if(!suballocations2nd.empty())
7856  {
7857  // Null item at the end should be just pop_back().
7858  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7859  }
7860 
7861  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7862  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7863 
7864  VkDeviceSize sumUsedSize = 0;
7865  const size_t suballoc1stCount = suballocations1st.size();
7866  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7867 
7868  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7869  {
7870  const size_t suballoc2ndCount = suballocations2nd.size();
7871  size_t nullItem2ndCount = 0;
7872  for(size_t i = 0; i < suballoc2ndCount; ++i)
7873  {
7874  const VmaSuballocation& suballoc = suballocations2nd[i];
7875  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7876 
7877  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7878  VMA_VALIDATE(suballoc.offset >= offset);
7879 
7880  if(!currFree)
7881  {
7882  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7883  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7884  sumUsedSize += suballoc.size;
7885  }
7886  else
7887  {
7888  ++nullItem2ndCount;
7889  }
7890 
7891  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7892  }
7893 
7894  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7895  }
7896 
7897  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7898  {
7899  const VmaSuballocation& suballoc = suballocations1st[i];
7900  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7901  suballoc.hAllocation == VK_NULL_HANDLE);
7902  }
7903 
7904  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7905 
7906  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7907  {
7908  const VmaSuballocation& suballoc = suballocations1st[i];
7909  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7910 
7911  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7912  VMA_VALIDATE(suballoc.offset >= offset);
7913  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7914 
7915  if(!currFree)
7916  {
7917  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7918  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7919  sumUsedSize += suballoc.size;
7920  }
7921  else
7922  {
7923  ++nullItem1stCount;
7924  }
7925 
7926  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7927  }
7928  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7929 
7930  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7931  {
7932  const size_t suballoc2ndCount = suballocations2nd.size();
7933  size_t nullItem2ndCount = 0;
7934  for(size_t i = suballoc2ndCount; i--; )
7935  {
7936  const VmaSuballocation& suballoc = suballocations2nd[i];
7937  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7938 
7939  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7940  VMA_VALIDATE(suballoc.offset >= offset);
7941 
7942  if(!currFree)
7943  {
7944  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7945  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7946  sumUsedSize += suballoc.size;
7947  }
7948  else
7949  {
7950  ++nullItem2ndCount;
7951  }
7952 
7953  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7954  }
7955 
7956  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7957  }
7958 
7959  VMA_VALIDATE(offset <= GetSize());
7960  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7961 
7962  return true;
7963 }
7964 
7965 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7966 {
7967  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7968  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7969 }
7970 
7971 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7972 {
7973  const VkDeviceSize size = GetSize();
7974 
7975  /*
7976  We don't consider gaps inside allocation vectors with freed allocations because
7977  they are not suitable for reuse in linear allocator. We consider only space that
7978  is available for new allocations.
7979  */
7980  if(IsEmpty())
7981  {
7982  return size;
7983  }
7984 
7985  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7986 
7987  switch(m_2ndVectorMode)
7988  {
7989  case SECOND_VECTOR_EMPTY:
7990  /*
7991  Available space is after end of 1st, as well as before beginning of 1st (which
7992  whould make it a ring buffer).
7993  */
7994  {
7995  const size_t suballocations1stCount = suballocations1st.size();
7996  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7997  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7998  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7999  return VMA_MAX(
8000  firstSuballoc.offset,
8001  size - (lastSuballoc.offset + lastSuballoc.size));
8002  }
8003  break;
8004 
8005  case SECOND_VECTOR_RING_BUFFER:
8006  /*
8007  Available space is only between end of 2nd and beginning of 1st.
8008  */
8009  {
8010  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8011  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8012  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8013  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8014  }
8015  break;
8016 
8017  case SECOND_VECTOR_DOUBLE_STACK:
8018  /*
8019  Available space is only between end of 1st and top of 2nd.
8020  */
8021  {
8022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8023  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8024  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8025  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8026  }
8027  break;
8028 
8029  default:
8030  VMA_ASSERT(0);
8031  return 0;
8032  }
8033 }
8034 
8035 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8036 {
8037  const VkDeviceSize size = GetSize();
8038  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8039  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8040  const size_t suballoc1stCount = suballocations1st.size();
8041  const size_t suballoc2ndCount = suballocations2nd.size();
8042 
8043  outInfo.blockCount = 1;
8044  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8045  outInfo.unusedRangeCount = 0;
8046  outInfo.usedBytes = 0;
8047  outInfo.allocationSizeMin = UINT64_MAX;
8048  outInfo.allocationSizeMax = 0;
8049  outInfo.unusedRangeSizeMin = UINT64_MAX;
8050  outInfo.unusedRangeSizeMax = 0;
8051 
8052  VkDeviceSize lastOffset = 0;
8053 
8054  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8055  {
8056  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8057  size_t nextAlloc2ndIndex = 0;
8058  while(lastOffset < freeSpace2ndTo1stEnd)
8059  {
8060  // Find next non-null allocation or move nextAllocIndex to the end.
8061  while(nextAlloc2ndIndex < suballoc2ndCount &&
8062  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8063  {
8064  ++nextAlloc2ndIndex;
8065  }
8066 
8067  // Found non-null allocation.
8068  if(nextAlloc2ndIndex < suballoc2ndCount)
8069  {
8070  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8071 
8072  // 1. Process free space before this allocation.
8073  if(lastOffset < suballoc.offset)
8074  {
8075  // There is free space from lastOffset to suballoc.offset.
8076  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8077  ++outInfo.unusedRangeCount;
8078  outInfo.unusedBytes += unusedRangeSize;
8079  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8080  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8081  }
8082 
8083  // 2. Process this allocation.
8084  // There is allocation with suballoc.offset, suballoc.size.
8085  outInfo.usedBytes += suballoc.size;
8086  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8087  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8088 
8089  // 3. Prepare for next iteration.
8090  lastOffset = suballoc.offset + suballoc.size;
8091  ++nextAlloc2ndIndex;
8092  }
8093  // We are at the end.
8094  else
8095  {
8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8097  if(lastOffset < freeSpace2ndTo1stEnd)
8098  {
8099  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8100  ++outInfo.unusedRangeCount;
8101  outInfo.unusedBytes += unusedRangeSize;
8102  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8103  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8104  }
8105 
8106  // End of loop.
8107  lastOffset = freeSpace2ndTo1stEnd;
8108  }
8109  }
8110  }
8111 
8112  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8113  const VkDeviceSize freeSpace1stTo2ndEnd =
8114  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8115  while(lastOffset < freeSpace1stTo2ndEnd)
8116  {
8117  // Find next non-null allocation or move nextAllocIndex to the end.
8118  while(nextAlloc1stIndex < suballoc1stCount &&
8119  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8120  {
8121  ++nextAlloc1stIndex;
8122  }
8123 
8124  // Found non-null allocation.
8125  if(nextAlloc1stIndex < suballoc1stCount)
8126  {
8127  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8128 
8129  // 1. Process free space before this allocation.
8130  if(lastOffset < suballoc.offset)
8131  {
8132  // There is free space from lastOffset to suballoc.offset.
8133  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8134  ++outInfo.unusedRangeCount;
8135  outInfo.unusedBytes += unusedRangeSize;
8136  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8137  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8138  }
8139 
8140  // 2. Process this allocation.
8141  // There is allocation with suballoc.offset, suballoc.size.
8142  outInfo.usedBytes += suballoc.size;
8143  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8144  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8145 
8146  // 3. Prepare for next iteration.
8147  lastOffset = suballoc.offset + suballoc.size;
8148  ++nextAlloc1stIndex;
8149  }
8150  // We are at the end.
8151  else
8152  {
8153  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8154  if(lastOffset < freeSpace1stTo2ndEnd)
8155  {
8156  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8157  ++outInfo.unusedRangeCount;
8158  outInfo.unusedBytes += unusedRangeSize;
8159  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8160  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8161  }
8162 
8163  // End of loop.
8164  lastOffset = freeSpace1stTo2ndEnd;
8165  }
8166  }
8167 
8168  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8169  {
8170  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8171  while(lastOffset < size)
8172  {
8173  // Find next non-null allocation or move nextAllocIndex to the end.
8174  while(nextAlloc2ndIndex != SIZE_MAX &&
8175  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8176  {
8177  --nextAlloc2ndIndex;
8178  }
8179 
8180  // Found non-null allocation.
8181  if(nextAlloc2ndIndex != SIZE_MAX)
8182  {
8183  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8184 
8185  // 1. Process free space before this allocation.
8186  if(lastOffset < suballoc.offset)
8187  {
8188  // There is free space from lastOffset to suballoc.offset.
8189  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8190  ++outInfo.unusedRangeCount;
8191  outInfo.unusedBytes += unusedRangeSize;
8192  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8193  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8194  }
8195 
8196  // 2. Process this allocation.
8197  // There is allocation with suballoc.offset, suballoc.size.
8198  outInfo.usedBytes += suballoc.size;
8199  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8200  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8201 
8202  // 3. Prepare for next iteration.
8203  lastOffset = suballoc.offset + suballoc.size;
8204  --nextAlloc2ndIndex;
8205  }
8206  // We are at the end.
8207  else
8208  {
8209  // There is free space from lastOffset to size.
8210  if(lastOffset < size)
8211  {
8212  const VkDeviceSize unusedRangeSize = size - lastOffset;
8213  ++outInfo.unusedRangeCount;
8214  outInfo.unusedBytes += unusedRangeSize;
8215  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8216  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8217  }
8218 
8219  // End of loop.
8220  lastOffset = size;
8221  }
8222  }
8223  }
8224 
8225  outInfo.unusedBytes = size - outInfo.usedBytes;
8226 }
8227 
8228 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8229 {
8230  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8231  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8232  const VkDeviceSize size = GetSize();
8233  const size_t suballoc1stCount = suballocations1st.size();
8234  const size_t suballoc2ndCount = suballocations2nd.size();
8235 
8236  inoutStats.size += size;
8237 
8238  VkDeviceSize lastOffset = 0;
8239 
8240  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8241  {
8242  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8243  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8244  while(lastOffset < freeSpace2ndTo1stEnd)
8245  {
8246  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8247  while(nextAlloc2ndIndex < suballoc2ndCount &&
8248  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8249  {
8250  ++nextAlloc2ndIndex;
8251  }
8252 
8253  // Found non-null allocation.
8254  if(nextAlloc2ndIndex < suballoc2ndCount)
8255  {
8256  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8257 
8258  // 1. Process free space before this allocation.
8259  if(lastOffset < suballoc.offset)
8260  {
8261  // There is free space from lastOffset to suballoc.offset.
8262  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8263  inoutStats.unusedSize += unusedRangeSize;
8264  ++inoutStats.unusedRangeCount;
8265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8266  }
8267 
8268  // 2. Process this allocation.
8269  // There is allocation with suballoc.offset, suballoc.size.
8270  ++inoutStats.allocationCount;
8271 
8272  // 3. Prepare for next iteration.
8273  lastOffset = suballoc.offset + suballoc.size;
8274  ++nextAlloc2ndIndex;
8275  }
8276  // We are at the end.
8277  else
8278  {
8279  if(lastOffset < freeSpace2ndTo1stEnd)
8280  {
8281  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8282  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8283  inoutStats.unusedSize += unusedRangeSize;
8284  ++inoutStats.unusedRangeCount;
8285  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8286  }
8287 
8288  // End of loop.
8289  lastOffset = freeSpace2ndTo1stEnd;
8290  }
8291  }
8292  }
8293 
8294  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8295  const VkDeviceSize freeSpace1stTo2ndEnd =
8296  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8297  while(lastOffset < freeSpace1stTo2ndEnd)
8298  {
8299  // Find next non-null allocation or move nextAllocIndex to the end.
8300  while(nextAlloc1stIndex < suballoc1stCount &&
8301  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8302  {
8303  ++nextAlloc1stIndex;
8304  }
8305 
8306  // Found non-null allocation.
8307  if(nextAlloc1stIndex < suballoc1stCount)
8308  {
8309  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8310 
8311  // 1. Process free space before this allocation.
8312  if(lastOffset < suballoc.offset)
8313  {
8314  // There is free space from lastOffset to suballoc.offset.
8315  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8316  inoutStats.unusedSize += unusedRangeSize;
8317  ++inoutStats.unusedRangeCount;
8318  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8319  }
8320 
8321  // 2. Process this allocation.
8322  // There is allocation with suballoc.offset, suballoc.size.
8323  ++inoutStats.allocationCount;
8324 
8325  // 3. Prepare for next iteration.
8326  lastOffset = suballoc.offset + suballoc.size;
8327  ++nextAlloc1stIndex;
8328  }
8329  // We are at the end.
8330  else
8331  {
8332  if(lastOffset < freeSpace1stTo2ndEnd)
8333  {
8334  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8335  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8336  inoutStats.unusedSize += unusedRangeSize;
8337  ++inoutStats.unusedRangeCount;
8338  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8339  }
8340 
8341  // End of loop.
8342  lastOffset = freeSpace1stTo2ndEnd;
8343  }
8344  }
8345 
8346  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8347  {
8348  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8349  while(lastOffset < size)
8350  {
8351  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8352  while(nextAlloc2ndIndex != SIZE_MAX &&
8353  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8354  {
8355  --nextAlloc2ndIndex;
8356  }
8357 
8358  // Found non-null allocation.
8359  if(nextAlloc2ndIndex != SIZE_MAX)
8360  {
8361  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8362 
8363  // 1. Process free space before this allocation.
8364  if(lastOffset < suballoc.offset)
8365  {
8366  // There is free space from lastOffset to suballoc.offset.
8367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8368  inoutStats.unusedSize += unusedRangeSize;
8369  ++inoutStats.unusedRangeCount;
8370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8371  }
8372 
8373  // 2. Process this allocation.
8374  // There is allocation with suballoc.offset, suballoc.size.
8375  ++inoutStats.allocationCount;
8376 
8377  // 3. Prepare for next iteration.
8378  lastOffset = suballoc.offset + suballoc.size;
8379  --nextAlloc2ndIndex;
8380  }
8381  // We are at the end.
8382  else
8383  {
8384  if(lastOffset < size)
8385  {
8386  // There is free space from lastOffset to size.
8387  const VkDeviceSize unusedRangeSize = size - lastOffset;
8388  inoutStats.unusedSize += unusedRangeSize;
8389  ++inoutStats.unusedRangeCount;
8390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8391  }
8392 
8393  // End of loop.
8394  lastOffset = size;
8395  }
8396  }
8397  }
8398 }
8399 
8400 #if VMA_STATS_STRING_ENABLED
8401 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8402 {
8403  const VkDeviceSize size = GetSize();
8404  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8405  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8406  const size_t suballoc1stCount = suballocations1st.size();
8407  const size_t suballoc2ndCount = suballocations2nd.size();
8408 
8409  // FIRST PASS
8410 
8411  size_t unusedRangeCount = 0;
8412  VkDeviceSize usedBytes = 0;
8413 
8414  VkDeviceSize lastOffset = 0;
8415 
8416  size_t alloc2ndCount = 0;
8417  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8418  {
8419  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8420  size_t nextAlloc2ndIndex = 0;
8421  while(lastOffset < freeSpace2ndTo1stEnd)
8422  {
8423  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8424  while(nextAlloc2ndIndex < suballoc2ndCount &&
8425  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8426  {
8427  ++nextAlloc2ndIndex;
8428  }
8429 
8430  // Found non-null allocation.
8431  if(nextAlloc2ndIndex < suballoc2ndCount)
8432  {
8433  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8434 
8435  // 1. Process free space before this allocation.
8436  if(lastOffset < suballoc.offset)
8437  {
8438  // There is free space from lastOffset to suballoc.offset.
8439  ++unusedRangeCount;
8440  }
8441 
8442  // 2. Process this allocation.
8443  // There is allocation with suballoc.offset, suballoc.size.
8444  ++alloc2ndCount;
8445  usedBytes += suballoc.size;
8446 
8447  // 3. Prepare for next iteration.
8448  lastOffset = suballoc.offset + suballoc.size;
8449  ++nextAlloc2ndIndex;
8450  }
8451  // We are at the end.
8452  else
8453  {
8454  if(lastOffset < freeSpace2ndTo1stEnd)
8455  {
8456  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8457  ++unusedRangeCount;
8458  }
8459 
8460  // End of loop.
8461  lastOffset = freeSpace2ndTo1stEnd;
8462  }
8463  }
8464  }
8465 
8466  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8467  size_t alloc1stCount = 0;
8468  const VkDeviceSize freeSpace1stTo2ndEnd =
8469  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8470  while(lastOffset < freeSpace1stTo2ndEnd)
8471  {
8472  // Find next non-null allocation or move nextAllocIndex to the end.
8473  while(nextAlloc1stIndex < suballoc1stCount &&
8474  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8475  {
8476  ++nextAlloc1stIndex;
8477  }
8478 
8479  // Found non-null allocation.
8480  if(nextAlloc1stIndex < suballoc1stCount)
8481  {
8482  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8483 
8484  // 1. Process free space before this allocation.
8485  if(lastOffset < suballoc.offset)
8486  {
8487  // There is free space from lastOffset to suballoc.offset.
8488  ++unusedRangeCount;
8489  }
8490 
8491  // 2. Process this allocation.
8492  // There is allocation with suballoc.offset, suballoc.size.
8493  ++alloc1stCount;
8494  usedBytes += suballoc.size;
8495 
8496  // 3. Prepare for next iteration.
8497  lastOffset = suballoc.offset + suballoc.size;
8498  ++nextAlloc1stIndex;
8499  }
8500  // We are at the end.
8501  else
8502  {
8503  if(lastOffset < size)
8504  {
8505  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8506  ++unusedRangeCount;
8507  }
8508 
8509  // End of loop.
8510  lastOffset = freeSpace1stTo2ndEnd;
8511  }
8512  }
8513 
8514  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8515  {
8516  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8517  while(lastOffset < size)
8518  {
8519  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8520  while(nextAlloc2ndIndex != SIZE_MAX &&
8521  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8522  {
8523  --nextAlloc2ndIndex;
8524  }
8525 
8526  // Found non-null allocation.
8527  if(nextAlloc2ndIndex != SIZE_MAX)
8528  {
8529  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8530 
8531  // 1. Process free space before this allocation.
8532  if(lastOffset < suballoc.offset)
8533  {
8534  // There is free space from lastOffset to suballoc.offset.
8535  ++unusedRangeCount;
8536  }
8537 
8538  // 2. Process this allocation.
8539  // There is allocation with suballoc.offset, suballoc.size.
8540  ++alloc2ndCount;
8541  usedBytes += suballoc.size;
8542 
8543  // 3. Prepare for next iteration.
8544  lastOffset = suballoc.offset + suballoc.size;
8545  --nextAlloc2ndIndex;
8546  }
8547  // We are at the end.
8548  else
8549  {
8550  if(lastOffset < size)
8551  {
8552  // There is free space from lastOffset to size.
8553  ++unusedRangeCount;
8554  }
8555 
8556  // End of loop.
8557  lastOffset = size;
8558  }
8559  }
8560  }
8561 
8562  const VkDeviceSize unusedBytes = size - usedBytes;
8563  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8564 
8565  // SECOND PASS
8566  lastOffset = 0;
8567 
8568  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8569  {
8570  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8571  size_t nextAlloc2ndIndex = 0;
8572  while(lastOffset < freeSpace2ndTo1stEnd)
8573  {
8574  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8575  while(nextAlloc2ndIndex < suballoc2ndCount &&
8576  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8577  {
8578  ++nextAlloc2ndIndex;
8579  }
8580 
8581  // Found non-null allocation.
8582  if(nextAlloc2ndIndex < suballoc2ndCount)
8583  {
8584  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8585 
8586  // 1. Process free space before this allocation.
8587  if(lastOffset < suballoc.offset)
8588  {
8589  // There is free space from lastOffset to suballoc.offset.
8590  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8591  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8592  }
8593 
8594  // 2. Process this allocation.
8595  // There is allocation with suballoc.offset, suballoc.size.
8596  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8597 
8598  // 3. Prepare for next iteration.
8599  lastOffset = suballoc.offset + suballoc.size;
8600  ++nextAlloc2ndIndex;
8601  }
8602  // We are at the end.
8603  else
8604  {
8605  if(lastOffset < freeSpace2ndTo1stEnd)
8606  {
8607  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8608  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8609  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8610  }
8611 
8612  // End of loop.
8613  lastOffset = freeSpace2ndTo1stEnd;
8614  }
8615  }
8616  }
8617 
8618  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8619  while(lastOffset < freeSpace1stTo2ndEnd)
8620  {
8621  // Find next non-null allocation or move nextAllocIndex to the end.
8622  while(nextAlloc1stIndex < suballoc1stCount &&
8623  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8624  {
8625  ++nextAlloc1stIndex;
8626  }
8627 
8628  // Found non-null allocation.
8629  if(nextAlloc1stIndex < suballoc1stCount)
8630  {
8631  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8632 
8633  // 1. Process free space before this allocation.
8634  if(lastOffset < suballoc.offset)
8635  {
8636  // There is free space from lastOffset to suballoc.offset.
8637  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8638  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8639  }
8640 
8641  // 2. Process this allocation.
8642  // There is allocation with suballoc.offset, suballoc.size.
8643  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8644 
8645  // 3. Prepare for next iteration.
8646  lastOffset = suballoc.offset + suballoc.size;
8647  ++nextAlloc1stIndex;
8648  }
8649  // We are at the end.
8650  else
8651  {
8652  if(lastOffset < freeSpace1stTo2ndEnd)
8653  {
8654  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8655  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8656  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8657  }
8658 
8659  // End of loop.
8660  lastOffset = freeSpace1stTo2ndEnd;
8661  }
8662  }
8663 
8664  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8665  {
8666  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8667  while(lastOffset < size)
8668  {
8669  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8670  while(nextAlloc2ndIndex != SIZE_MAX &&
8671  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8672  {
8673  --nextAlloc2ndIndex;
8674  }
8675 
8676  // Found non-null allocation.
8677  if(nextAlloc2ndIndex != SIZE_MAX)
8678  {
8679  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8680 
8681  // 1. Process free space before this allocation.
8682  if(lastOffset < suballoc.offset)
8683  {
8684  // There is free space from lastOffset to suballoc.offset.
8685  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8687  }
8688 
8689  // 2. Process this allocation.
8690  // There is allocation with suballoc.offset, suballoc.size.
8691  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8692 
8693  // 3. Prepare for next iteration.
8694  lastOffset = suballoc.offset + suballoc.size;
8695  --nextAlloc2ndIndex;
8696  }
8697  // We are at the end.
8698  else
8699  {
8700  if(lastOffset < size)
8701  {
8702  // There is free space from lastOffset to size.
8703  const VkDeviceSize unusedRangeSize = size - lastOffset;
8704  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8705  }
8706 
8707  // End of loop.
8708  lastOffset = size;
8709  }
8710  }
8711  }
8712 
8713  PrintDetailedMap_End(json);
8714 }
8715 #endif // #if VMA_STATS_STRING_ENABLED
8716 
8717 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8718  uint32_t currentFrameIndex,
8719  uint32_t frameInUseCount,
8720  VkDeviceSize bufferImageGranularity,
8721  VkDeviceSize allocSize,
8722  VkDeviceSize allocAlignment,
8723  bool upperAddress,
8724  VmaSuballocationType allocType,
8725  bool canMakeOtherLost,
8726  uint32_t strategy,
8727  VmaAllocationRequest* pAllocationRequest)
8728 {
8729  VMA_ASSERT(allocSize > 0);
8730  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8731  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8732  VMA_HEAVY_ASSERT(Validate());
8733 
8734  const VkDeviceSize size = GetSize();
8735  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8736  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8737 
8738  if(upperAddress)
8739  {
8740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8741  {
8742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8743  return false;
8744  }
8745 
8746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8747  if(allocSize > size)
8748  {
8749  return false;
8750  }
8751  VkDeviceSize resultBaseOffset = size - allocSize;
8752  if(!suballocations2nd.empty())
8753  {
8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8755  resultBaseOffset = lastSuballoc.offset - allocSize;
8756  if(allocSize > lastSuballoc.offset)
8757  {
8758  return false;
8759  }
8760  }
8761 
8762  // Start from offset equal to end of free space.
8763  VkDeviceSize resultOffset = resultBaseOffset;
8764 
8765  // Apply VMA_DEBUG_MARGIN at the end.
8766  if(VMA_DEBUG_MARGIN > 0)
8767  {
8768  if(resultOffset < VMA_DEBUG_MARGIN)
8769  {
8770  return false;
8771  }
8772  resultOffset -= VMA_DEBUG_MARGIN;
8773  }
8774 
8775  // Apply alignment.
8776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8777 
8778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8779  // Make bigger alignment if necessary.
8780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8781  {
8782  bool bufferImageGranularityConflict = false;
8783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8784  {
8785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8787  {
8788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8789  {
8790  bufferImageGranularityConflict = true;
8791  break;
8792  }
8793  }
8794  else
8795  // Already on previous page.
8796  break;
8797  }
8798  if(bufferImageGranularityConflict)
8799  {
8800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8801  }
8802  }
8803 
8804  // There is enough free space.
8805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8806  suballocations1st.back().offset + suballocations1st.back().size :
8807  0;
8808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8809  {
8810  // Check previous suballocations for BufferImageGranularity conflicts.
8811  // If conflict exists, allocation cannot be made here.
8812  if(bufferImageGranularity > 1)
8813  {
8814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8815  {
8816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8818  {
8819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8820  {
8821  return false;
8822  }
8823  }
8824  else
8825  {
8826  // Already on next page.
8827  break;
8828  }
8829  }
8830  }
8831 
8832  // All tests passed: Success.
8833  pAllocationRequest->offset = resultOffset;
8834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8835  pAllocationRequest->sumItemSize = 0;
8836  // pAllocationRequest->item unused.
8837  pAllocationRequest->itemsToMakeLostCount = 0;
8838  return true;
8839  }
8840  }
8841  else // !upperAddress
8842  {
8843  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8844  {
8845  // Try to allocate at the end of 1st vector.
8846 
8847  VkDeviceSize resultBaseOffset = 0;
8848  if(!suballocations1st.empty())
8849  {
8850  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8851  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8852  }
8853 
8854  // Start from offset equal to beginning of free space.
8855  VkDeviceSize resultOffset = resultBaseOffset;
8856 
8857  // Apply VMA_DEBUG_MARGIN at the beginning.
8858  if(VMA_DEBUG_MARGIN > 0)
8859  {
8860  resultOffset += VMA_DEBUG_MARGIN;
8861  }
8862 
8863  // Apply alignment.
8864  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8865 
8866  // Check previous suballocations for BufferImageGranularity conflicts.
8867  // Make bigger alignment if necessary.
8868  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8869  {
8870  bool bufferImageGranularityConflict = false;
8871  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8872  {
8873  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8874  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8875  {
8876  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8877  {
8878  bufferImageGranularityConflict = true;
8879  break;
8880  }
8881  }
8882  else
8883  // Already on previous page.
8884  break;
8885  }
8886  if(bufferImageGranularityConflict)
8887  {
8888  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8889  }
8890  }
8891 
8892  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8893  suballocations2nd.back().offset : size;
8894 
8895  // There is enough free space at the end after alignment.
8896  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8897  {
8898  // Check next suballocations for BufferImageGranularity conflicts.
8899  // If conflict exists, allocation cannot be made here.
8900  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8901  {
8902  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8903  {
8904  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8905  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8906  {
8907  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8908  {
8909  return false;
8910  }
8911  }
8912  else
8913  {
8914  // Already on previous page.
8915  break;
8916  }
8917  }
8918  }
8919 
8920  // All tests passed: Success.
8921  pAllocationRequest->offset = resultOffset;
8922  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8923  pAllocationRequest->sumItemSize = 0;
8924  // pAllocationRequest->item unused.
8925  pAllocationRequest->itemsToMakeLostCount = 0;
8926  return true;
8927  }
8928  }
8929 
8930  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8931  // beginning of 1st vector as the end of free space.
8932  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8933  {
8934  VMA_ASSERT(!suballocations1st.empty());
8935 
8936  VkDeviceSize resultBaseOffset = 0;
8937  if(!suballocations2nd.empty())
8938  {
8939  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8940  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8941  }
8942 
8943  // Start from offset equal to beginning of free space.
8944  VkDeviceSize resultOffset = resultBaseOffset;
8945 
8946  // Apply VMA_DEBUG_MARGIN at the beginning.
8947  if(VMA_DEBUG_MARGIN > 0)
8948  {
8949  resultOffset += VMA_DEBUG_MARGIN;
8950  }
8951 
8952  // Apply alignment.
8953  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8954 
8955  // Check previous suballocations for BufferImageGranularity conflicts.
8956  // Make bigger alignment if necessary.
8957  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8958  {
8959  bool bufferImageGranularityConflict = false;
8960  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8961  {
8962  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8963  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8964  {
8965  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8966  {
8967  bufferImageGranularityConflict = true;
8968  break;
8969  }
8970  }
8971  else
8972  // Already on previous page.
8973  break;
8974  }
8975  if(bufferImageGranularityConflict)
8976  {
8977  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8978  }
8979  }
8980 
8981  pAllocationRequest->itemsToMakeLostCount = 0;
8982  pAllocationRequest->sumItemSize = 0;
8983  size_t index1st = m_1stNullItemsBeginCount;
8984 
8985  if(canMakeOtherLost)
8986  {
8987  while(index1st < suballocations1st.size() &&
8988  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8989  {
8990  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8991  const VmaSuballocation& suballoc = suballocations1st[index1st];
8992  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8993  {
8994  // No problem.
8995  }
8996  else
8997  {
8998  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8999  if(suballoc.hAllocation->CanBecomeLost() &&
9000  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9001  {
9002  ++pAllocationRequest->itemsToMakeLostCount;
9003  pAllocationRequest->sumItemSize += suballoc.size;
9004  }
9005  else
9006  {
9007  return false;
9008  }
9009  }
9010  ++index1st;
9011  }
9012 
9013  // Check next suballocations for BufferImageGranularity conflicts.
9014  // If conflict exists, we must mark more allocations lost or fail.
9015  if(bufferImageGranularity > 1)
9016  {
9017  while(index1st < suballocations1st.size())
9018  {
9019  const VmaSuballocation& suballoc = suballocations1st[index1st];
9020  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9021  {
9022  if(suballoc.hAllocation != VK_NULL_HANDLE)
9023  {
9024  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9025  if(suballoc.hAllocation->CanBecomeLost() &&
9026  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9027  {
9028  ++pAllocationRequest->itemsToMakeLostCount;
9029  pAllocationRequest->sumItemSize += suballoc.size;
9030  }
9031  else
9032  {
9033  return false;
9034  }
9035  }
9036  }
9037  else
9038  {
9039  // Already on next page.
9040  break;
9041  }
9042  ++index1st;
9043  }
9044  }
9045  }
9046 
9047  // There is enough free space at the end after alignment.
9048  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9049  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9050  {
9051  // Check next suballocations for BufferImageGranularity conflicts.
9052  // If conflict exists, allocation cannot be made here.
9053  if(bufferImageGranularity > 1)
9054  {
9055  for(size_t nextSuballocIndex = index1st;
9056  nextSuballocIndex < suballocations1st.size();
9057  nextSuballocIndex++)
9058  {
9059  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9060  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9061  {
9062  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9063  {
9064  return false;
9065  }
9066  }
9067  else
9068  {
9069  // Already on next page.
9070  break;
9071  }
9072  }
9073  }
9074 
9075  // All tests passed: Success.
9076  pAllocationRequest->offset = resultOffset;
9077  pAllocationRequest->sumFreeSize =
9078  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9079  - resultBaseOffset
9080  - pAllocationRequest->sumItemSize;
9081  // pAllocationRequest->item unused.
9082  return true;
9083  }
9084  }
9085  }
9086 
9087  return false;
9088 }
9089 
9090 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9091  uint32_t currentFrameIndex,
9092  uint32_t frameInUseCount,
9093  VmaAllocationRequest* pAllocationRequest)
9094 {
9095  if(pAllocationRequest->itemsToMakeLostCount == 0)
9096  {
9097  return true;
9098  }
9099 
9100  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9101 
9102  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9103  size_t index1st = m_1stNullItemsBeginCount;
9104  size_t madeLostCount = 0;
9105  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9106  {
9107  VMA_ASSERT(index1st < suballocations1st.size());
9108  VmaSuballocation& suballoc = suballocations1st[index1st];
9109  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9110  {
9111  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9112  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9113  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9114  {
9115  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9116  suballoc.hAllocation = VK_NULL_HANDLE;
9117  m_SumFreeSize += suballoc.size;
9118  ++m_1stNullItemsMiddleCount;
9119  ++madeLostCount;
9120  }
9121  else
9122  {
9123  return false;
9124  }
9125  }
9126  ++index1st;
9127  }
9128 
9129  CleanupAfterFree();
9130  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9131 
9132  return true;
9133 }
9134 
9135 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9136 {
9137  uint32_t lostAllocationCount = 0;
9138 
9139  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9140  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9141  {
9142  VmaSuballocation& suballoc = suballocations1st[i];
9143  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9144  suballoc.hAllocation->CanBecomeLost() &&
9145  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9146  {
9147  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9148  suballoc.hAllocation = VK_NULL_HANDLE;
9149  ++m_1stNullItemsMiddleCount;
9150  m_SumFreeSize += suballoc.size;
9151  ++lostAllocationCount;
9152  }
9153  }
9154 
9155  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9156  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9157  {
9158  VmaSuballocation& suballoc = suballocations2nd[i];
9159  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9160  suballoc.hAllocation->CanBecomeLost() &&
9161  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9162  {
9163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9164  suballoc.hAllocation = VK_NULL_HANDLE;
9165  ++m_2ndNullItemsCount;
9166  ++lostAllocationCount;
9167  }
9168  }
9169 
9170  if(lostAllocationCount)
9171  {
9172  CleanupAfterFree();
9173  }
9174 
9175  return lostAllocationCount;
9176 }
9177 
9178 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9179 {
9180  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9181  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9182  {
9183  const VmaSuballocation& suballoc = suballocations1st[i];
9184  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9185  {
9186  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9187  {
9188  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9189  return VK_ERROR_VALIDATION_FAILED_EXT;
9190  }
9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9192  {
9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9194  return VK_ERROR_VALIDATION_FAILED_EXT;
9195  }
9196  }
9197  }
9198 
9199  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9200  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9201  {
9202  const VmaSuballocation& suballoc = suballocations2nd[i];
9203  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9204  {
9205  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9206  {
9207  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9208  return VK_ERROR_VALIDATION_FAILED_EXT;
9209  }
9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9211  {
9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9213  return VK_ERROR_VALIDATION_FAILED_EXT;
9214  }
9215  }
9216  }
9217 
9218  return VK_SUCCESS;
9219 }
9220 
9221 void VmaBlockMetadata_Linear::Alloc(
9222  const VmaAllocationRequest& request,
9223  VmaSuballocationType type,
9224  VkDeviceSize allocSize,
9225  bool upperAddress,
9226  VmaAllocation hAllocation)
9227 {
9228  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9229 
9230  if(upperAddress)
9231  {
9232  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9233  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9234  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9235  suballocations2nd.push_back(newSuballoc);
9236  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9237  }
9238  else
9239  {
9240  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9241 
9242  // First allocation.
9243  if(suballocations1st.empty())
9244  {
9245  suballocations1st.push_back(newSuballoc);
9246  }
9247  else
9248  {
9249  // New allocation at the end of 1st vector.
9250  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9251  {
9252  // Check if it fits before the end of the block.
9253  VMA_ASSERT(request.offset + allocSize <= GetSize());
9254  suballocations1st.push_back(newSuballoc);
9255  }
9256  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9257  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9258  {
9259  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9260 
9261  switch(m_2ndVectorMode)
9262  {
9263  case SECOND_VECTOR_EMPTY:
9264  // First allocation from second part ring buffer.
9265  VMA_ASSERT(suballocations2nd.empty());
9266  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9267  break;
9268  case SECOND_VECTOR_RING_BUFFER:
9269  // 2-part ring buffer is already started.
9270  VMA_ASSERT(!suballocations2nd.empty());
9271  break;
9272  case SECOND_VECTOR_DOUBLE_STACK:
9273  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9274  break;
9275  default:
9276  VMA_ASSERT(0);
9277  }
9278 
9279  suballocations2nd.push_back(newSuballoc);
9280  }
9281  else
9282  {
9283  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9284  }
9285  }
9286  }
9287 
9288  m_SumFreeSize -= newSuballoc.size;
9289 }
9290 
9291 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9292 {
9293  FreeAtOffset(allocation->GetOffset());
9294 }
9295 
9296 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9297 {
9298  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9300 
9301  if(!suballocations1st.empty())
9302  {
9303  // First allocation: Mark it as next empty at the beginning.
9304  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9305  if(firstSuballoc.offset == offset)
9306  {
9307  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9308  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9309  m_SumFreeSize += firstSuballoc.size;
9310  ++m_1stNullItemsBeginCount;
9311  CleanupAfterFree();
9312  return;
9313  }
9314  }
9315 
9316  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9317  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9318  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9319  {
9320  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9321  if(lastSuballoc.offset == offset)
9322  {
9323  m_SumFreeSize += lastSuballoc.size;
9324  suballocations2nd.pop_back();
9325  CleanupAfterFree();
9326  return;
9327  }
9328  }
9329  // Last allocation in 1st vector.
9330  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9331  {
9332  VmaSuballocation& lastSuballoc = suballocations1st.back();
9333  if(lastSuballoc.offset == offset)
9334  {
9335  m_SumFreeSize += lastSuballoc.size;
9336  suballocations1st.pop_back();
9337  CleanupAfterFree();
9338  return;
9339  }
9340  }
9341 
9342  // Item from the middle of 1st vector.
9343  {
9344  VmaSuballocation refSuballoc;
9345  refSuballoc.offset = offset;
9346  // Rest of members stays uninitialized intentionally for better performance.
9347  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9348  suballocations1st.begin() + m_1stNullItemsBeginCount,
9349  suballocations1st.end(),
9350  refSuballoc);
9351  if(it != suballocations1st.end())
9352  {
9353  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9354  it->hAllocation = VK_NULL_HANDLE;
9355  ++m_1stNullItemsMiddleCount;
9356  m_SumFreeSize += it->size;
9357  CleanupAfterFree();
9358  return;
9359  }
9360  }
9361 
9362  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9363  {
9364  // Item from the middle of 2nd vector.
9365  VmaSuballocation refSuballoc;
9366  refSuballoc.offset = offset;
9367  // Rest of members stays uninitialized intentionally for better performance.
9368  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9369  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9370  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9371  if(it != suballocations2nd.end())
9372  {
9373  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9374  it->hAllocation = VK_NULL_HANDLE;
9375  ++m_2ndNullItemsCount;
9376  m_SumFreeSize += it->size;
9377  CleanupAfterFree();
9378  return;
9379  }
9380  }
9381 
9382  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9383 }
9384 
9385 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9386 {
9387  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9388  const size_t suballocCount = AccessSuballocations1st().size();
9389  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9390 }
9391 
9392 void VmaBlockMetadata_Linear::CleanupAfterFree()
9393 {
9394  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9395  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9396 
9397  if(IsEmpty())
9398  {
9399  suballocations1st.clear();
9400  suballocations2nd.clear();
9401  m_1stNullItemsBeginCount = 0;
9402  m_1stNullItemsMiddleCount = 0;
9403  m_2ndNullItemsCount = 0;
9404  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9405  }
9406  else
9407  {
9408  const size_t suballoc1stCount = suballocations1st.size();
9409  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9410  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9411 
9412  // Find more null items at the beginning of 1st vector.
9413  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9414  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9415  {
9416  ++m_1stNullItemsBeginCount;
9417  --m_1stNullItemsMiddleCount;
9418  }
9419 
9420  // Find more null items at the end of 1st vector.
9421  while(m_1stNullItemsMiddleCount > 0 &&
9422  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9423  {
9424  --m_1stNullItemsMiddleCount;
9425  suballocations1st.pop_back();
9426  }
9427 
9428  // Find more null items at the end of 2nd vector.
9429  while(m_2ndNullItemsCount > 0 &&
9430  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9431  {
9432  --m_2ndNullItemsCount;
9433  suballocations2nd.pop_back();
9434  }
9435 
9436  if(ShouldCompact1st())
9437  {
9438  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9439  size_t srcIndex = m_1stNullItemsBeginCount;
9440  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9441  {
9442  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9443  {
9444  ++srcIndex;
9445  }
9446  if(dstIndex != srcIndex)
9447  {
9448  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9449  }
9450  ++srcIndex;
9451  }
9452  suballocations1st.resize(nonNullItemCount);
9453  m_1stNullItemsBeginCount = 0;
9454  m_1stNullItemsMiddleCount = 0;
9455  }
9456 
9457  // 2nd vector became empty.
9458  if(suballocations2nd.empty())
9459  {
9460  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9461  }
9462 
9463  // 1st vector became empty.
9464  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9465  {
9466  suballocations1st.clear();
9467  m_1stNullItemsBeginCount = 0;
9468 
9469  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9470  {
9471  // Swap 1st with 2nd. Now 2nd is empty.
9472  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9473  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9474  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9475  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9476  {
9477  ++m_1stNullItemsBeginCount;
9478  --m_1stNullItemsMiddleCount;
9479  }
9480  m_2ndNullItemsCount = 0;
9481  m_1stVectorIndex ^= 1;
9482  }
9483  }
9484  }
9485 
9486  VMA_HEAVY_ASSERT(Validate());
9487 }
9488 
9489 
9491 // class VmaBlockMetadata_Buddy
9492 
9493 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9494  VmaBlockMetadata(hAllocator),
9495  m_Root(VMA_NULL),
9496  m_AllocationCount(0),
9497  m_FreeCount(1),
9498  m_SumFreeSize(0)
9499 {
9500  memset(m_FreeList, 0, sizeof(m_FreeList));
9501 }
9502 
9503 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9504 {
9505  DeleteNode(m_Root);
9506 }
9507 
9508 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9509 {
9510  VmaBlockMetadata::Init(size);
9511 
9512  m_UsableSize = VmaPrevPow2(size);
9513  m_SumFreeSize = m_UsableSize;
9514 
9515  // Calculate m_LevelCount.
9516  m_LevelCount = 1;
9517  while(m_LevelCount < MAX_LEVELS &&
9518  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9519  {
9520  ++m_LevelCount;
9521  }
9522 
9523  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9524  rootNode->offset = 0;
9525  rootNode->type = Node::TYPE_FREE;
9526  rootNode->parent = VMA_NULL;
9527  rootNode->buddy = VMA_NULL;
9528 
9529  m_Root = rootNode;
9530  AddToFreeListFront(0, rootNode);
9531 }
9532 
9533 bool VmaBlockMetadata_Buddy::Validate() const
9534 {
9535  // Validate tree.
9536  ValidationContext ctx;
9537  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9538  {
9539  VMA_VALIDATE(false && "ValidateNode failed.");
9540  }
9541  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9542  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9543 
9544  // Validate free node lists.
9545  for(uint32_t level = 0; level < m_LevelCount; ++level)
9546  {
9547  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9548  m_FreeList[level].front->free.prev == VMA_NULL);
9549 
9550  for(Node* node = m_FreeList[level].front;
9551  node != VMA_NULL;
9552  node = node->free.next)
9553  {
9554  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9555 
9556  if(node->free.next == VMA_NULL)
9557  {
9558  VMA_VALIDATE(m_FreeList[level].back == node);
9559  }
9560  else
9561  {
9562  VMA_VALIDATE(node->free.next->free.prev == node);
9563  }
9564  }
9565  }
9566 
9567  // Validate that free lists ar higher levels are empty.
9568  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9569  {
9570  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9571  }
9572 
9573  return true;
9574 }
9575 
9576 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9577 {
9578  for(uint32_t level = 0; level < m_LevelCount; ++level)
9579  {
9580  if(m_FreeList[level].front != VMA_NULL)
9581  {
9582  return LevelToNodeSize(level);
9583  }
9584  }
9585  return 0;
9586 }
9587 
9588 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9589 {
9590  const VkDeviceSize unusableSize = GetUnusableSize();
9591 
9592  outInfo.blockCount = 1;
9593 
9594  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9595  outInfo.usedBytes = outInfo.unusedBytes = 0;
9596 
9597  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9598  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9599  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9600 
9601  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9602 
9603  if(unusableSize > 0)
9604  {
9605  ++outInfo.unusedRangeCount;
9606  outInfo.unusedBytes += unusableSize;
9607  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9608  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9609  }
9610 }
9611 
9612 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9613 {
9614  const VkDeviceSize unusableSize = GetUnusableSize();
9615 
9616  inoutStats.size += GetSize();
9617  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9618  inoutStats.allocationCount += m_AllocationCount;
9619  inoutStats.unusedRangeCount += m_FreeCount;
9620  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9621 
9622  if(unusableSize > 0)
9623  {
9624  ++inoutStats.unusedRangeCount;
9625  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9626  }
9627 }
9628 
9629 #if VMA_STATS_STRING_ENABLED
9630 
9631 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9632 {
9633  // TODO optimize
9634  VmaStatInfo stat;
9635  CalcAllocationStatInfo(stat);
9636 
9637  PrintDetailedMap_Begin(
9638  json,
9639  stat.unusedBytes,
9640  stat.allocationCount,
9641  stat.unusedRangeCount);
9642 
9643  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9644 
9645  const VkDeviceSize unusableSize = GetUnusableSize();
9646  if(unusableSize > 0)
9647  {
9648  PrintDetailedMap_UnusedRange(json,
9649  m_UsableSize, // offset
9650  unusableSize); // size
9651  }
9652 
9653  PrintDetailedMap_End(json);
9654 }
9655 
9656 #endif // #if VMA_STATS_STRING_ENABLED
9657 
9658 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9659  uint32_t currentFrameIndex,
9660  uint32_t frameInUseCount,
9661  VkDeviceSize bufferImageGranularity,
9662  VkDeviceSize allocSize,
9663  VkDeviceSize allocAlignment,
9664  bool upperAddress,
9665  VmaSuballocationType allocType,
9666  bool canMakeOtherLost,
9667  uint32_t strategy,
9668  VmaAllocationRequest* pAllocationRequest)
9669 {
9670  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9671 
9672  // Simple way to respect bufferImageGranularity. May be optimized some day.
9673  // Whenever it might be an OPTIMAL image...
9674  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9675  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9676  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9677  {
9678  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9679  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9680  }
9681 
9682  if(allocSize > m_UsableSize)
9683  {
9684  return false;
9685  }
9686 
9687  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9688  for(uint32_t level = targetLevel + 1; level--; )
9689  {
9690  for(Node* freeNode = m_FreeList[level].front;
9691  freeNode != VMA_NULL;
9692  freeNode = freeNode->free.next)
9693  {
9694  if(freeNode->offset % allocAlignment == 0)
9695  {
9696  pAllocationRequest->offset = freeNode->offset;
9697  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9698  pAllocationRequest->sumItemSize = 0;
9699  pAllocationRequest->itemsToMakeLostCount = 0;
9700  pAllocationRequest->customData = (void*)(uintptr_t)level;
9701  return true;
9702  }
9703  }
9704  }
9705 
9706  return false;
9707 }
9708 
9709 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9710  uint32_t currentFrameIndex,
9711  uint32_t frameInUseCount,
9712  VmaAllocationRequest* pAllocationRequest)
9713 {
9714  /*
9715  Lost allocations are not supported in buddy allocator at the moment.
9716  Support might be added in the future.
9717  */
9718  return pAllocationRequest->itemsToMakeLostCount == 0;
9719 }
9720 
9721 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9722 {
9723  /*
9724  Lost allocations are not supported in buddy allocator at the moment.
9725  Support might be added in the future.
9726  */
9727  return 0;
9728 }
9729 
9730 void VmaBlockMetadata_Buddy::Alloc(
9731  const VmaAllocationRequest& request,
9732  VmaSuballocationType type,
9733  VkDeviceSize allocSize,
9734  bool upperAddress,
9735  VmaAllocation hAllocation)
9736 {
9737  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9738  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9739 
9740  Node* currNode = m_FreeList[currLevel].front;
9741  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9742  while(currNode->offset != request.offset)
9743  {
9744  currNode = currNode->free.next;
9745  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9746  }
9747 
9748  // Go down, splitting free nodes.
9749  while(currLevel < targetLevel)
9750  {
9751  // currNode is already first free node at currLevel.
9752  // Remove it from list of free nodes at this currLevel.
9753  RemoveFromFreeList(currLevel, currNode);
9754 
9755  const uint32_t childrenLevel = currLevel + 1;
9756 
9757  // Create two free sub-nodes.
9758  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9759  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9760 
9761  leftChild->offset = currNode->offset;
9762  leftChild->type = Node::TYPE_FREE;
9763  leftChild->parent = currNode;
9764  leftChild->buddy = rightChild;
9765 
9766  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9767  rightChild->type = Node::TYPE_FREE;
9768  rightChild->parent = currNode;
9769  rightChild->buddy = leftChild;
9770 
9771  // Convert current currNode to split type.
9772  currNode->type = Node::TYPE_SPLIT;
9773  currNode->split.leftChild = leftChild;
9774 
9775  // Add child nodes to free list. Order is important!
9776  AddToFreeListFront(childrenLevel, rightChild);
9777  AddToFreeListFront(childrenLevel, leftChild);
9778 
9779  ++m_FreeCount;
9780  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9781  ++currLevel;
9782  currNode = m_FreeList[currLevel].front;
9783 
9784  /*
9785  We can be sure that currNode, as left child of node previously split,
9786  also fullfills the alignment requirement.
9787  */
9788  }
9789 
9790  // Remove from free list.
9791  VMA_ASSERT(currLevel == targetLevel &&
9792  currNode != VMA_NULL &&
9793  currNode->type == Node::TYPE_FREE);
9794  RemoveFromFreeList(currLevel, currNode);
9795 
9796  // Convert to allocation node.
9797  currNode->type = Node::TYPE_ALLOCATION;
9798  currNode->allocation.alloc = hAllocation;
9799 
9800  ++m_AllocationCount;
9801  --m_FreeCount;
9802  m_SumFreeSize -= allocSize;
9803 }
9804 
9805 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9806 {
9807  if(node->type == Node::TYPE_SPLIT)
9808  {
9809  DeleteNode(node->split.leftChild->buddy);
9810  DeleteNode(node->split.leftChild);
9811  }
9812 
9813  vma_delete(GetAllocationCallbacks(), node);
9814 }
9815 
9816 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9817 {
9818  VMA_VALIDATE(level < m_LevelCount);
9819  VMA_VALIDATE(curr->parent == parent);
9820  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9821  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9822  switch(curr->type)
9823  {
9824  case Node::TYPE_FREE:
9825  // curr->free.prev, next are validated separately.
9826  ctx.calculatedSumFreeSize += levelNodeSize;
9827  ++ctx.calculatedFreeCount;
9828  break;
9829  case Node::TYPE_ALLOCATION:
9830  ++ctx.calculatedAllocationCount;
9831  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9832  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9833  break;
9834  case Node::TYPE_SPLIT:
9835  {
9836  const uint32_t childrenLevel = level + 1;
9837  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9838  const Node* const leftChild = curr->split.leftChild;
9839  VMA_VALIDATE(leftChild != VMA_NULL);
9840  VMA_VALIDATE(leftChild->offset == curr->offset);
9841  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9842  {
9843  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9844  }
9845  const Node* const rightChild = leftChild->buddy;
9846  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9847  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9848  {
9849  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9850  }
9851  }
9852  break;
9853  default:
9854  return false;
9855  }
9856 
9857  return true;
9858 }
9859 
9860 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9861 {
9862  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9863  uint32_t level = 0;
9864  VkDeviceSize currLevelNodeSize = m_UsableSize;
9865  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9866  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9867  {
9868  ++level;
9869  currLevelNodeSize = nextLevelNodeSize;
9870  nextLevelNodeSize = currLevelNodeSize >> 1;
9871  }
9872  return level;
9873 }
9874 
9875 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9876 {
9877  // Find node and level.
9878  Node* node = m_Root;
9879  VkDeviceSize nodeOffset = 0;
9880  uint32_t level = 0;
9881  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9882  while(node->type == Node::TYPE_SPLIT)
9883  {
9884  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9885  if(offset < nodeOffset + nextLevelSize)
9886  {
9887  node = node->split.leftChild;
9888  }
9889  else
9890  {
9891  node = node->split.leftChild->buddy;
9892  nodeOffset += nextLevelSize;
9893  }
9894  ++level;
9895  levelNodeSize = nextLevelSize;
9896  }
9897 
9898  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9899  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9900 
9901  ++m_FreeCount;
9902  --m_AllocationCount;
9903  m_SumFreeSize += alloc->GetSize();
9904 
9905  node->type = Node::TYPE_FREE;
9906 
9907  // Join free nodes if possible.
9908  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9909  {
9910  RemoveFromFreeList(level, node->buddy);
9911  Node* const parent = node->parent;
9912 
9913  vma_delete(GetAllocationCallbacks(), node->buddy);
9914  vma_delete(GetAllocationCallbacks(), node);
9915  parent->type = Node::TYPE_FREE;
9916 
9917  node = parent;
9918  --level;
9919  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9920  --m_FreeCount;
9921  }
9922 
9923  AddToFreeListFront(level, node);
9924 }
9925 
9926 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9927 {
9928  switch(node->type)
9929  {
9930  case Node::TYPE_FREE:
9931  ++outInfo.unusedRangeCount;
9932  outInfo.unusedBytes += levelNodeSize;
9933  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9934  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9935  break;
9936  case Node::TYPE_ALLOCATION:
9937  {
9938  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9939  ++outInfo.allocationCount;
9940  outInfo.usedBytes += allocSize;
9941  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9942  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9943 
9944  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9945  if(unusedRangeSize > 0)
9946  {
9947  ++outInfo.unusedRangeCount;
9948  outInfo.unusedBytes += unusedRangeSize;
9949  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9950  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9951  }
9952  }
9953  break;
9954  case Node::TYPE_SPLIT:
9955  {
9956  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9957  const Node* const leftChild = node->split.leftChild;
9958  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9959  const Node* const rightChild = leftChild->buddy;
9960  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9961  }
9962  break;
9963  default:
9964  VMA_ASSERT(0);
9965  }
9966 }
9967 
9968 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9969 {
9970  VMA_ASSERT(node->type == Node::TYPE_FREE);
9971 
9972  // List is empty.
9973  Node* const frontNode = m_FreeList[level].front;
9974  if(frontNode == VMA_NULL)
9975  {
9976  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9977  node->free.prev = node->free.next = VMA_NULL;
9978  m_FreeList[level].front = m_FreeList[level].back = node;
9979  }
9980  else
9981  {
9982  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9983  node->free.prev = VMA_NULL;
9984  node->free.next = frontNode;
9985  frontNode->free.prev = node;
9986  m_FreeList[level].front = node;
9987  }
9988 }
9989 
9990 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9991 {
9992  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9993 
9994  // It is at the front.
9995  if(node->free.prev == VMA_NULL)
9996  {
9997  VMA_ASSERT(m_FreeList[level].front == node);
9998  m_FreeList[level].front = node->free.next;
9999  }
10000  else
10001  {
10002  Node* const prevFreeNode = node->free.prev;
10003  VMA_ASSERT(prevFreeNode->free.next == node);
10004  prevFreeNode->free.next = node->free.next;
10005  }
10006 
10007  // It is at the back.
10008  if(node->free.next == VMA_NULL)
10009  {
10010  VMA_ASSERT(m_FreeList[level].back == node);
10011  m_FreeList[level].back = node->free.prev;
10012  }
10013  else
10014  {
10015  Node* const nextFreeNode = node->free.next;
10016  VMA_ASSERT(nextFreeNode->free.prev == node);
10017  nextFreeNode->free.prev = node->free.prev;
10018  }
10019 }
10020 
10021 #if VMA_STATS_STRING_ENABLED
10022 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10023 {
10024  switch(node->type)
10025  {
10026  case Node::TYPE_FREE:
10027  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10028  break;
10029  case Node::TYPE_ALLOCATION:
10030  {
10031  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10032  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10033  if(allocSize < levelNodeSize)
10034  {
10035  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10036  }
10037  }
10038  break;
10039  case Node::TYPE_SPLIT:
10040  {
10041  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10042  const Node* const leftChild = node->split.leftChild;
10043  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10044  const Node* const rightChild = leftChild->buddy;
10045  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10046  }
10047  break;
10048  default:
10049  VMA_ASSERT(0);
10050  }
10051 }
10052 #endif // #if VMA_STATS_STRING_ENABLED
10053 
10054 
10056 // class VmaDeviceMemoryBlock
10057 
10058 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10059  m_pMetadata(VMA_NULL),
10060  m_MemoryTypeIndex(UINT32_MAX),
10061  m_Id(0),
10062  m_hMemory(VK_NULL_HANDLE),
10063  m_MapCount(0),
10064  m_pMappedData(VMA_NULL)
10065 {
10066 }
10067 
10068 void VmaDeviceMemoryBlock::Init(
10069  VmaAllocator hAllocator,
10070  uint32_t newMemoryTypeIndex,
10071  VkDeviceMemory newMemory,
10072  VkDeviceSize newSize,
10073  uint32_t id,
10074  uint32_t algorithm)
10075 {
10076  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10077 
10078  m_MemoryTypeIndex = newMemoryTypeIndex;
10079  m_Id = id;
10080  m_hMemory = newMemory;
10081 
10082  switch(algorithm)
10083  {
10085  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10086  break;
10088  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10089  break;
10090  default:
10091  VMA_ASSERT(0);
10092  // Fall-through.
10093  case 0:
10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10095  }
10096  m_pMetadata->Init(newSize);
10097 }
10098 
10099 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10100 {
10101  // This is the most important assert in the entire library.
10102  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10103  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10104 
10105  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10106  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10107  m_hMemory = VK_NULL_HANDLE;
10108 
10109  vma_delete(allocator, m_pMetadata);
10110  m_pMetadata = VMA_NULL;
10111 }
10112 
10113 bool VmaDeviceMemoryBlock::Validate() const
10114 {
10115  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10116  (m_pMetadata->GetSize() != 0));
10117 
10118  return m_pMetadata->Validate();
10119 }
10120 
10121 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10122 {
10123  void* pData = nullptr;
10124  VkResult res = Map(hAllocator, 1, &pData);
10125  if(res != VK_SUCCESS)
10126  {
10127  return res;
10128  }
10129 
10130  res = m_pMetadata->CheckCorruption(pData);
10131 
10132  Unmap(hAllocator, 1);
10133 
10134  return res;
10135 }
10136 
10137 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10138 {
10139  if(count == 0)
10140  {
10141  return VK_SUCCESS;
10142  }
10143 
10144  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10145  if(m_MapCount != 0)
10146  {
10147  m_MapCount += count;
10148  VMA_ASSERT(m_pMappedData != VMA_NULL);
10149  if(ppData != VMA_NULL)
10150  {
10151  *ppData = m_pMappedData;
10152  }
10153  return VK_SUCCESS;
10154  }
10155  else
10156  {
10157  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10158  hAllocator->m_hDevice,
10159  m_hMemory,
10160  0, // offset
10161  VK_WHOLE_SIZE,
10162  0, // flags
10163  &m_pMappedData);
10164  if(result == VK_SUCCESS)
10165  {
10166  if(ppData != VMA_NULL)
10167  {
10168  *ppData = m_pMappedData;
10169  }
10170  m_MapCount = count;
10171  }
10172  return result;
10173  }
10174 }
10175 
10176 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10177 {
10178  if(count == 0)
10179  {
10180  return;
10181  }
10182 
10183  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10184  if(m_MapCount >= count)
10185  {
10186  m_MapCount -= count;
10187  if(m_MapCount == 0)
10188  {
10189  m_pMappedData = VMA_NULL;
10190  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10191  }
10192  }
10193  else
10194  {
10195  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10196  }
10197 }
10198 
10199 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10200 {
10201  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10202  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10203 
10204  void* pData;
10205  VkResult res = Map(hAllocator, 1, &pData);
10206  if(res != VK_SUCCESS)
10207  {
10208  return res;
10209  }
10210 
10211  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10212  VmaWriteMagicValue(pData, allocOffset + allocSize);
10213 
10214  Unmap(hAllocator, 1);
10215 
10216  return VK_SUCCESS;
10217 }
10218 
10219 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10220 {
10221  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10222  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10223 
10224  void* pData;
10225  VkResult res = Map(hAllocator, 1, &pData);
10226  if(res != VK_SUCCESS)
10227  {
10228  return res;
10229  }
10230 
10231  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10232  {
10233  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10234  }
10235  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10236  {
10237  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10238  }
10239 
10240  Unmap(hAllocator, 1);
10241 
10242  return VK_SUCCESS;
10243 }
10244 
10245 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10246  const VmaAllocator hAllocator,
10247  const VmaAllocation hAllocation,
10248  VkBuffer hBuffer)
10249 {
10250  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10251  hAllocation->GetBlock() == this);
10252  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10253  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10254  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10255  hAllocator->m_hDevice,
10256  hBuffer,
10257  m_hMemory,
10258  hAllocation->GetOffset());
10259 }
10260 
10261 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10262  const VmaAllocator hAllocator,
10263  const VmaAllocation hAllocation,
10264  VkImage hImage)
10265 {
10266  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10267  hAllocation->GetBlock() == this);
10268  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10270  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10271  hAllocator->m_hDevice,
10272  hImage,
10273  m_hMemory,
10274  hAllocation->GetOffset());
10275 }
10276 
10277 static void InitStatInfo(VmaStatInfo& outInfo)
10278 {
10279  memset(&outInfo, 0, sizeof(outInfo));
10280  outInfo.allocationSizeMin = UINT64_MAX;
10281  outInfo.unusedRangeSizeMin = UINT64_MAX;
10282 }
10283 
10284 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10285 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10286 {
10287  inoutInfo.blockCount += srcInfo.blockCount;
10288  inoutInfo.allocationCount += srcInfo.allocationCount;
10289  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10290  inoutInfo.usedBytes += srcInfo.usedBytes;
10291  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10292  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10293  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10294  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10295  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10296 }
10297 
10298 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10299 {
10300  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10301  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10302  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10304 }
10305 
10306 VmaPool_T::VmaPool_T(
10307  VmaAllocator hAllocator,
10308  const VmaPoolCreateInfo& createInfo,
10309  VkDeviceSize preferredBlockSize) :
10310  m_BlockVector(
10311  hAllocator,
10312  createInfo.memoryTypeIndex,
10313  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10314  createInfo.minBlockCount,
10315  createInfo.maxBlockCount,
10316  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10317  createInfo.frameInUseCount,
10318  true, // isCustomPool
10319  createInfo.blockSize != 0, // explicitBlockSize
10320  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10321  m_Id(0)
10322 {
10323 }
10324 
10325 VmaPool_T::~VmaPool_T()
10326 {
10327 }
10328 
10329 #if VMA_STATS_STRING_ENABLED
10330 
10331 #endif // #if VMA_STATS_STRING_ENABLED
10332 
10333 VmaBlockVector::VmaBlockVector(
10334  VmaAllocator hAllocator,
10335  uint32_t memoryTypeIndex,
10336  VkDeviceSize preferredBlockSize,
10337  size_t minBlockCount,
10338  size_t maxBlockCount,
10339  VkDeviceSize bufferImageGranularity,
10340  uint32_t frameInUseCount,
10341  bool isCustomPool,
10342  bool explicitBlockSize,
10343  uint32_t algorithm) :
10344  m_hAllocator(hAllocator),
10345  m_MemoryTypeIndex(memoryTypeIndex),
10346  m_PreferredBlockSize(preferredBlockSize),
10347  m_MinBlockCount(minBlockCount),
10348  m_MaxBlockCount(maxBlockCount),
10349  m_BufferImageGranularity(bufferImageGranularity),
10350  m_FrameInUseCount(frameInUseCount),
10351  m_IsCustomPool(isCustomPool),
10352  m_ExplicitBlockSize(explicitBlockSize),
10353  m_Algorithm(algorithm),
10354  m_HasEmptyBlock(false),
10355  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10356  m_pDefragmentator(VMA_NULL),
10357  m_NextBlockId(0)
10358 {
10359 }
10360 
10361 VmaBlockVector::~VmaBlockVector()
10362 {
10363  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10364 
10365  for(size_t i = m_Blocks.size(); i--; )
10366  {
10367  m_Blocks[i]->Destroy(m_hAllocator);
10368  vma_delete(m_hAllocator, m_Blocks[i]);
10369  }
10370 }
10371 
10372 VkResult VmaBlockVector::CreateMinBlocks()
10373 {
10374  for(size_t i = 0; i < m_MinBlockCount; ++i)
10375  {
10376  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10377  if(res != VK_SUCCESS)
10378  {
10379  return res;
10380  }
10381  }
10382  return VK_SUCCESS;
10383 }
10384 
10385 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10386 {
10387  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10388 
10389  const size_t blockCount = m_Blocks.size();
10390 
10391  pStats->size = 0;
10392  pStats->unusedSize = 0;
10393  pStats->allocationCount = 0;
10394  pStats->unusedRangeCount = 0;
10395  pStats->unusedRangeSizeMax = 0;
10396  pStats->blockCount = blockCount;
10397 
10398  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10399  {
10400  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10401  VMA_ASSERT(pBlock);
10402  VMA_HEAVY_ASSERT(pBlock->Validate());
10403  pBlock->m_pMetadata->AddPoolStats(*pStats);
10404  }
10405 }
10406 
10407 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10408 {
10409  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10410  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10411  (VMA_DEBUG_MARGIN > 0) &&
10412  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10413 }
10414 
10415 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10416 
10417 VkResult VmaBlockVector::Allocate(
10418  VmaPool hCurrentPool,
10419  uint32_t currentFrameIndex,
10420  VkDeviceSize size,
10421  VkDeviceSize alignment,
10422  const VmaAllocationCreateInfo& createInfo,
10423  VmaSuballocationType suballocType,
10424  VmaAllocation* pAllocation)
10425 {
10426  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10427  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10428  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10429  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10430  const bool canCreateNewBlock =
10431  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10432  (m_Blocks.size() < m_MaxBlockCount);
10433  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10434 
10435  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10436  // Which in turn is available only when maxBlockCount = 1.
10437  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10438  {
10439  canMakeOtherLost = false;
10440  }
10441 
10442  // Upper address can only be used with linear allocator and within single memory block.
10443  if(isUpperAddress &&
10444  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10445  {
10446  return VK_ERROR_FEATURE_NOT_PRESENT;
10447  }
10448 
10449  // Validate strategy.
10450  switch(strategy)
10451  {
10452  case 0:
10454  break;
10458  break;
10459  default:
10460  return VK_ERROR_FEATURE_NOT_PRESENT;
10461  }
10462 
10463  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10464  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10465  {
10466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10467  }
10468 
10469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10470 
10471  /*
10472  Under certain condition, this whole section can be skipped for optimization, so
10473  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10474  e.g. for custom pools with linear algorithm.
10475  */
10476  if(!canMakeOtherLost || canCreateNewBlock)
10477  {
10478  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10479  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10481 
10482  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10483  {
10484  // Use only last block.
10485  if(!m_Blocks.empty())
10486  {
10487  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10488  VMA_ASSERT(pCurrBlock);
10489  VkResult res = AllocateFromBlock(
10490  pCurrBlock,
10491  hCurrentPool,
10492  currentFrameIndex,
10493  size,
10494  alignment,
10495  allocFlagsCopy,
10496  createInfo.pUserData,
10497  suballocType,
10498  strategy,
10499  pAllocation);
10500  if(res == VK_SUCCESS)
10501  {
10502  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10503  return VK_SUCCESS;
10504  }
10505  }
10506  }
10507  else
10508  {
10510  {
10511  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10512  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10513  {
10514  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10515  VMA_ASSERT(pCurrBlock);
10516  VkResult res = AllocateFromBlock(
10517  pCurrBlock,
10518  hCurrentPool,
10519  currentFrameIndex,
10520  size,
10521  alignment,
10522  allocFlagsCopy,
10523  createInfo.pUserData,
10524  suballocType,
10525  strategy,
10526  pAllocation);
10527  if(res == VK_SUCCESS)
10528  {
10529  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10530  return VK_SUCCESS;
10531  }
10532  }
10533  }
10534  else // WORST_FIT, FIRST_FIT
10535  {
10536  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10537  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10538  {
10539  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10540  VMA_ASSERT(pCurrBlock);
10541  VkResult res = AllocateFromBlock(
10542  pCurrBlock,
10543  hCurrentPool,
10544  currentFrameIndex,
10545  size,
10546  alignment,
10547  allocFlagsCopy,
10548  createInfo.pUserData,
10549  suballocType,
10550  strategy,
10551  pAllocation);
10552  if(res == VK_SUCCESS)
10553  {
10554  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10555  return VK_SUCCESS;
10556  }
10557  }
10558  }
10559  }
10560 
10561  // 2. Try to create new block.
10562  if(canCreateNewBlock)
10563  {
10564  // Calculate optimal size for new block.
10565  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10566  uint32_t newBlockSizeShift = 0;
10567  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10568 
10569  if(!m_ExplicitBlockSize)
10570  {
10571  // Allocate 1/8, 1/4, 1/2 as first blocks.
10572  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10573  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10574  {
10575  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10576  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10577  {
10578  newBlockSize = smallerNewBlockSize;
10579  ++newBlockSizeShift;
10580  }
10581  else
10582  {
10583  break;
10584  }
10585  }
10586  }
10587 
10588  size_t newBlockIndex = 0;
10589  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10590  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10591  if(!m_ExplicitBlockSize)
10592  {
10593  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10594  {
10595  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10596  if(smallerNewBlockSize >= size)
10597  {
10598  newBlockSize = smallerNewBlockSize;
10599  ++newBlockSizeShift;
10600  res = CreateBlock(newBlockSize, &newBlockIndex);
10601  }
10602  else
10603  {
10604  break;
10605  }
10606  }
10607  }
10608 
10609  if(res == VK_SUCCESS)
10610  {
10611  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10612  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10613 
10614  res = AllocateFromBlock(
10615  pBlock,
10616  hCurrentPool,
10617  currentFrameIndex,
10618  size,
10619  alignment,
10620  allocFlagsCopy,
10621  createInfo.pUserData,
10622  suballocType,
10623  strategy,
10624  pAllocation);
10625  if(res == VK_SUCCESS)
10626  {
10627  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10628  return VK_SUCCESS;
10629  }
10630  else
10631  {
10632  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10633  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10634  }
10635  }
10636  }
10637  }
10638 
10639  // 3. Try to allocate from existing blocks with making other allocations lost.
10640  if(canMakeOtherLost)
10641  {
10642  uint32_t tryIndex = 0;
10643  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10644  {
10645  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10646  VmaAllocationRequest bestRequest = {};
10647  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10648 
10649  // 1. Search existing allocations.
10651  {
10652  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10653  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10654  {
10655  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10656  VMA_ASSERT(pCurrBlock);
10657  VmaAllocationRequest currRequest = {};
10658  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10659  currentFrameIndex,
10660  m_FrameInUseCount,
10661  m_BufferImageGranularity,
10662  size,
10663  alignment,
10664  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10665  suballocType,
10666  canMakeOtherLost,
10667  strategy,
10668  &currRequest))
10669  {
10670  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10671  if(pBestRequestBlock == VMA_NULL ||
10672  currRequestCost < bestRequestCost)
10673  {
10674  pBestRequestBlock = pCurrBlock;
10675  bestRequest = currRequest;
10676  bestRequestCost = currRequestCost;
10677 
10678  if(bestRequestCost == 0)
10679  {
10680  break;
10681  }
10682  }
10683  }
10684  }
10685  }
10686  else // WORST_FIT, FIRST_FIT
10687  {
10688  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10689  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10690  {
10691  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10692  VMA_ASSERT(pCurrBlock);
10693  VmaAllocationRequest currRequest = {};
10694  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10695  currentFrameIndex,
10696  m_FrameInUseCount,
10697  m_BufferImageGranularity,
10698  size,
10699  alignment,
10700  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10701  suballocType,
10702  canMakeOtherLost,
10703  strategy,
10704  &currRequest))
10705  {
10706  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10707  if(pBestRequestBlock == VMA_NULL ||
10708  currRequestCost < bestRequestCost ||
10710  {
10711  pBestRequestBlock = pCurrBlock;
10712  bestRequest = currRequest;
10713  bestRequestCost = currRequestCost;
10714 
10715  if(bestRequestCost == 0 ||
10717  {
10718  break;
10719  }
10720  }
10721  }
10722  }
10723  }
10724 
10725  if(pBestRequestBlock != VMA_NULL)
10726  {
10727  if(mapped)
10728  {
10729  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10730  if(res != VK_SUCCESS)
10731  {
10732  return res;
10733  }
10734  }
10735 
10736  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10737  currentFrameIndex,
10738  m_FrameInUseCount,
10739  &bestRequest))
10740  {
10741  // We no longer have an empty Allocation.
10742  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10743  {
10744  m_HasEmptyBlock = false;
10745  }
10746  // Allocate from this pBlock.
10747  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10748  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10749  (*pAllocation)->InitBlockAllocation(
10750  hCurrentPool,
10751  pBestRequestBlock,
10752  bestRequest.offset,
10753  alignment,
10754  size,
10755  suballocType,
10756  mapped,
10757  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10758  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10759  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10760  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10761  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10762  {
10763  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10764  }
10765  if(IsCorruptionDetectionEnabled())
10766  {
10767  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10768  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10769  }
10770  return VK_SUCCESS;
10771  }
10772  // else: Some allocations must have been touched while we are here. Next try.
10773  }
10774  else
10775  {
10776  // Could not find place in any of the blocks - break outer loop.
10777  break;
10778  }
10779  }
10780  /* Maximum number of tries exceeded - a very unlike event when many other
10781  threads are simultaneously touching allocations making it impossible to make
10782  lost at the same time as we try to allocate. */
10783  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10784  {
10785  return VK_ERROR_TOO_MANY_OBJECTS;
10786  }
10787  }
10788 
10789  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10790 }
10791 
10792 void VmaBlockVector::Free(
10793  VmaAllocation hAllocation)
10794 {
10795  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10796 
10797  // Scope for lock.
10798  {
10799  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10800 
10801  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10802 
10803  if(IsCorruptionDetectionEnabled())
10804  {
10805  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10806  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10807  }
10808 
10809  if(hAllocation->IsPersistentMap())
10810  {
10811  pBlock->Unmap(m_hAllocator, 1);
10812  }
10813 
10814  pBlock->m_pMetadata->Free(hAllocation);
10815  VMA_HEAVY_ASSERT(pBlock->Validate());
10816 
10817  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10818 
10819  // pBlock became empty after this deallocation.
10820  if(pBlock->m_pMetadata->IsEmpty())
10821  {
10822  // Already has empty Allocation. We don't want to have two, so delete this one.
10823  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10824  {
10825  pBlockToDelete = pBlock;
10826  Remove(pBlock);
10827  }
10828  // We now have first empty block.
10829  else
10830  {
10831  m_HasEmptyBlock = true;
10832  }
10833  }
10834  // pBlock didn't become empty, but we have another empty block - find and free that one.
10835  // (This is optional, heuristics.)
10836  else if(m_HasEmptyBlock)
10837  {
10838  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10839  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10840  {
10841  pBlockToDelete = pLastBlock;
10842  m_Blocks.pop_back();
10843  m_HasEmptyBlock = false;
10844  }
10845  }
10846 
10847  IncrementallySortBlocks();
10848  }
10849 
10850  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10851  // lock, for performance reason.
10852  if(pBlockToDelete != VMA_NULL)
10853  {
10854  VMA_DEBUG_LOG(" Deleted empty allocation");
10855  pBlockToDelete->Destroy(m_hAllocator);
10856  vma_delete(m_hAllocator, pBlockToDelete);
10857  }
10858 }
10859 
10860 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10861 {
10862  VkDeviceSize result = 0;
10863  for(size_t i = m_Blocks.size(); i--; )
10864  {
10865  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10866  if(result >= m_PreferredBlockSize)
10867  {
10868  break;
10869  }
10870  }
10871  return result;
10872 }
10873 
10874 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10875 {
10876  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10877  {
10878  if(m_Blocks[blockIndex] == pBlock)
10879  {
10880  VmaVectorRemove(m_Blocks, blockIndex);
10881  return;
10882  }
10883  }
10884  VMA_ASSERT(0);
10885 }
10886 
10887 void VmaBlockVector::IncrementallySortBlocks()
10888 {
10889  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10890  {
10891  // Bubble sort only until first swap.
10892  for(size_t i = 1; i < m_Blocks.size(); ++i)
10893  {
10894  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10895  {
10896  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10897  return;
10898  }
10899  }
10900  }
10901 }
10902 
10903 VkResult VmaBlockVector::AllocateFromBlock(
10904  VmaDeviceMemoryBlock* pBlock,
10905  VmaPool hCurrentPool,
10906  uint32_t currentFrameIndex,
10907  VkDeviceSize size,
10908  VkDeviceSize alignment,
10909  VmaAllocationCreateFlags allocFlags,
10910  void* pUserData,
10911  VmaSuballocationType suballocType,
10912  uint32_t strategy,
10913  VmaAllocation* pAllocation)
10914 {
10915  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10916  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10917  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10918  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10919 
10920  VmaAllocationRequest currRequest = {};
10921  if(pBlock->m_pMetadata->CreateAllocationRequest(
10922  currentFrameIndex,
10923  m_FrameInUseCount,
10924  m_BufferImageGranularity,
10925  size,
10926  alignment,
10927  isUpperAddress,
10928  suballocType,
10929  false, // canMakeOtherLost
10930  strategy,
10931  &currRequest))
10932  {
10933  // Allocate from pCurrBlock.
10934  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10935 
10936  if(mapped)
10937  {
10938  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10939  if(res != VK_SUCCESS)
10940  {
10941  return res;
10942  }
10943  }
10944 
10945  // We no longer have an empty Allocation.
10946  if(pBlock->m_pMetadata->IsEmpty())
10947  {
10948  m_HasEmptyBlock = false;
10949  }
10950 
10951  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10952  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10953  (*pAllocation)->InitBlockAllocation(
10954  hCurrentPool,
10955  pBlock,
10956  currRequest.offset,
10957  alignment,
10958  size,
10959  suballocType,
10960  mapped,
10961  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10962  VMA_HEAVY_ASSERT(pBlock->Validate());
10963  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10964  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10965  {
10966  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10967  }
10968  if(IsCorruptionDetectionEnabled())
10969  {
10970  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10971  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10972  }
10973  return VK_SUCCESS;
10974  }
10975  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10976 }
10977 
10978 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10979 {
10980  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10981  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10982  allocInfo.allocationSize = blockSize;
10983  VkDeviceMemory mem = VK_NULL_HANDLE;
10984  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10985  if(res < 0)
10986  {
10987  return res;
10988  }
10989 
10990  // New VkDeviceMemory successfully created.
10991 
10992  // Create new Allocation for it.
10993  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10994  pBlock->Init(
10995  m_hAllocator,
10996  m_MemoryTypeIndex,
10997  mem,
10998  allocInfo.allocationSize,
10999  m_NextBlockId++,
11000  m_Algorithm);
11001 
11002  m_Blocks.push_back(pBlock);
11003  if(pNewBlockIndex != VMA_NULL)
11004  {
11005  *pNewBlockIndex = m_Blocks.size() - 1;
11006  }
11007 
11008  return VK_SUCCESS;
11009 }
11010 
11011 #if VMA_STATS_STRING_ENABLED
11012 
11013 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
11014 {
11015  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11016 
11017  json.BeginObject();
11018 
11019  if(m_IsCustomPool)
11020  {
11021  json.WriteString("MemoryTypeIndex");
11022  json.WriteNumber(m_MemoryTypeIndex);
11023 
11024  json.WriteString("BlockSize");
11025  json.WriteNumber(m_PreferredBlockSize);
11026 
11027  json.WriteString("BlockCount");
11028  json.BeginObject(true);
11029  if(m_MinBlockCount > 0)
11030  {
11031  json.WriteString("Min");
11032  json.WriteNumber((uint64_t)m_MinBlockCount);
11033  }
11034  if(m_MaxBlockCount < SIZE_MAX)
11035  {
11036  json.WriteString("Max");
11037  json.WriteNumber((uint64_t)m_MaxBlockCount);
11038  }
11039  json.WriteString("Cur");
11040  json.WriteNumber((uint64_t)m_Blocks.size());
11041  json.EndObject();
11042 
11043  if(m_FrameInUseCount > 0)
11044  {
11045  json.WriteString("FrameInUseCount");
11046  json.WriteNumber(m_FrameInUseCount);
11047  }
11048 
11049  if(m_Algorithm != 0)
11050  {
11051  json.WriteString("Algorithm");
11052  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
11053  }
11054  }
11055  else
11056  {
11057  json.WriteString("PreferredBlockSize");
11058  json.WriteNumber(m_PreferredBlockSize);
11059  }
11060 
11061  json.WriteString("Blocks");
11062  json.BeginObject();
11063  for(size_t i = 0; i < m_Blocks.size(); ++i)
11064  {
11065  json.BeginString();
11066  json.ContinueString(m_Blocks[i]->GetId());
11067  json.EndString();
11068 
11069  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
11070  }
11071  json.EndObject();
11072 
11073  json.EndObject();
11074 }
11075 
11076 #endif // #if VMA_STATS_STRING_ENABLED
11077 
11078 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
11079  VmaAllocator hAllocator,
11080  uint32_t currentFrameIndex)
11081 {
11082  if(m_pDefragmentator == VMA_NULL)
11083  {
11084  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11085  hAllocator,
11086  this,
11087  currentFrameIndex);
11088  }
11089 
11090  return m_pDefragmentator;
11091 }
11092 
11093 VkResult VmaBlockVector::Defragment(
11094  VmaDefragmentationStats* pDefragmentationStats,
11095  VkDeviceSize& maxBytesToMove,
11096  uint32_t& maxAllocationsToMove)
11097 {
11098  if(m_pDefragmentator == VMA_NULL)
11099  {
11100  return VK_SUCCESS;
11101  }
11102 
11103  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11104 
11105  // Defragment.
11106  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11107 
11108  // Accumulate statistics.
11109  if(pDefragmentationStats != VMA_NULL)
11110  {
11111  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11112  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11113  pDefragmentationStats->bytesMoved += bytesMoved;
11114  pDefragmentationStats->allocationsMoved += allocationsMoved;
11115  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11116  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11117  maxBytesToMove -= bytesMoved;
11118  maxAllocationsToMove -= allocationsMoved;
11119  }
11120 
11121  // Free empty blocks.
11122  m_HasEmptyBlock = false;
11123  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11124  {
11125  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11126  if(pBlock->m_pMetadata->IsEmpty())
11127  {
11128  if(m_Blocks.size() > m_MinBlockCount)
11129  {
11130  if(pDefragmentationStats != VMA_NULL)
11131  {
11132  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11133  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11134  }
11135 
11136  VmaVectorRemove(m_Blocks, blockIndex);
11137  pBlock->Destroy(m_hAllocator);
11138  vma_delete(m_hAllocator, pBlock);
11139  }
11140  else
11141  {
11142  m_HasEmptyBlock = true;
11143  }
11144  }
11145  }
11146 
11147  return result;
11148 }
11149 
11150 void VmaBlockVector::DestroyDefragmentator()
11151 {
11152  if(m_pDefragmentator != VMA_NULL)
11153  {
11154  vma_delete(m_hAllocator, m_pDefragmentator);
11155  m_pDefragmentator = VMA_NULL;
11156  }
11157 }
11158 
11159 void VmaBlockVector::MakePoolAllocationsLost(
11160  uint32_t currentFrameIndex,
11161  size_t* pLostAllocationCount)
11162 {
11163  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11164  size_t lostAllocationCount = 0;
11165  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11166  {
11167  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11168  VMA_ASSERT(pBlock);
11169  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11170  }
11171  if(pLostAllocationCount != VMA_NULL)
11172  {
11173  *pLostAllocationCount = lostAllocationCount;
11174  }
11175 }
11176 
11177 VkResult VmaBlockVector::CheckCorruption()
11178 {
11179  if(!IsCorruptionDetectionEnabled())
11180  {
11181  return VK_ERROR_FEATURE_NOT_PRESENT;
11182  }
11183 
11184  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11185  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11186  {
11187  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11188  VMA_ASSERT(pBlock);
11189  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11190  if(res != VK_SUCCESS)
11191  {
11192  return res;
11193  }
11194  }
11195  return VK_SUCCESS;
11196 }
11197 
11198 void VmaBlockVector::AddStats(VmaStats* pStats)
11199 {
11200  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11201  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11202 
11203  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11204 
11205  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11206  {
11207  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11208  VMA_ASSERT(pBlock);
11209  VMA_HEAVY_ASSERT(pBlock->Validate());
11210  VmaStatInfo allocationStatInfo;
11211  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11212  VmaAddStatInfo(pStats->total, allocationStatInfo);
11213  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11214  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11215  }
11216 }
11217 
11219 // VmaDefragmentator members definition
11220 
11221 VmaDefragmentator::VmaDefragmentator(
11222  VmaAllocator hAllocator,
11223  VmaBlockVector* pBlockVector,
11224  uint32_t currentFrameIndex) :
11225  m_hAllocator(hAllocator),
11226  m_pBlockVector(pBlockVector),
11227  m_CurrentFrameIndex(currentFrameIndex),
11228  m_BytesMoved(0),
11229  m_AllocationsMoved(0),
11230  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11231  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11232 {
11233  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11234 }
11235 
11236 VmaDefragmentator::~VmaDefragmentator()
11237 {
11238  for(size_t i = m_Blocks.size(); i--; )
11239  {
11240  vma_delete(m_hAllocator, m_Blocks[i]);
11241  }
11242 }
11243 
11244 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11245 {
11246  AllocationInfo allocInfo;
11247  allocInfo.m_hAllocation = hAlloc;
11248  allocInfo.m_pChanged = pChanged;
11249  m_Allocations.push_back(allocInfo);
11250 }
11251 
11252 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11253 {
11254  // It has already been mapped for defragmentation.
11255  if(m_pMappedDataForDefragmentation)
11256  {
11257  *ppMappedData = m_pMappedDataForDefragmentation;
11258  return VK_SUCCESS;
11259  }
11260 
11261  // It is originally mapped.
11262  if(m_pBlock->GetMappedData())
11263  {
11264  *ppMappedData = m_pBlock->GetMappedData();
11265  return VK_SUCCESS;
11266  }
11267 
11268  // Map on first usage.
11269  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11270  *ppMappedData = m_pMappedDataForDefragmentation;
11271  return res;
11272 }
11273 
11274 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11275 {
11276  if(m_pMappedDataForDefragmentation != VMA_NULL)
11277  {
11278  m_pBlock->Unmap(hAllocator, 1);
11279  }
11280 }
11281 
11282 VkResult VmaDefragmentator::DefragmentRound(
11283  VkDeviceSize maxBytesToMove,
11284  uint32_t maxAllocationsToMove)
11285 {
11286  if(m_Blocks.empty())
11287  {
11288  return VK_SUCCESS;
11289  }
11290 
11291  size_t srcBlockIndex = m_Blocks.size() - 1;
11292  size_t srcAllocIndex = SIZE_MAX;
11293  for(;;)
11294  {
11295  // 1. Find next allocation to move.
11296  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11297  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11298  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11299  {
11300  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11301  {
11302  // Finished: no more allocations to process.
11303  if(srcBlockIndex == 0)
11304  {
11305  return VK_SUCCESS;
11306  }
11307  else
11308  {
11309  --srcBlockIndex;
11310  srcAllocIndex = SIZE_MAX;
11311  }
11312  }
11313  else
11314  {
11315  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11316  }
11317  }
11318 
11319  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11320  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11321 
11322  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11323  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11324  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11325  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11326 
11327  // 2. Try to find new place for this allocation in preceding or current block.
11328  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11329  {
11330  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11331  VmaAllocationRequest dstAllocRequest;
11332  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11333  m_CurrentFrameIndex,
11334  m_pBlockVector->GetFrameInUseCount(),
11335  m_pBlockVector->GetBufferImageGranularity(),
11336  size,
11337  alignment,
11338  false, // upperAddress
11339  suballocType,
11340  false, // canMakeOtherLost
11342  &dstAllocRequest) &&
11343  MoveMakesSense(
11344  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11345  {
11346  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11347 
11348  // Reached limit on number of allocations or bytes to move.
11349  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11350  (m_BytesMoved + size > maxBytesToMove))
11351  {
11352  return VK_INCOMPLETE;
11353  }
11354 
11355  void* pDstMappedData = VMA_NULL;
11356  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11357  if(res != VK_SUCCESS)
11358  {
11359  return res;
11360  }
11361 
11362  void* pSrcMappedData = VMA_NULL;
11363  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11364  if(res != VK_SUCCESS)
11365  {
11366  return res;
11367  }
11368 
11369  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11370  memcpy(
11371  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11372  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11373  static_cast<size_t>(size));
11374 
11375  if(VMA_DEBUG_MARGIN > 0)
11376  {
11377  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11378  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11379  }
11380 
11381  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11382  dstAllocRequest,
11383  suballocType,
11384  size,
11385  false, // upperAddress
11386  allocInfo.m_hAllocation);
11387  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11388 
11389  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11390 
11391  if(allocInfo.m_pChanged != VMA_NULL)
11392  {
11393  *allocInfo.m_pChanged = VK_TRUE;
11394  }
11395 
11396  ++m_AllocationsMoved;
11397  m_BytesMoved += size;
11398 
11399  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11400 
11401  break;
11402  }
11403  }
11404 
11405  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11406 
11407  if(srcAllocIndex > 0)
11408  {
11409  --srcAllocIndex;
11410  }
11411  else
11412  {
11413  if(srcBlockIndex > 0)
11414  {
11415  --srcBlockIndex;
11416  srcAllocIndex = SIZE_MAX;
11417  }
11418  else
11419  {
11420  return VK_SUCCESS;
11421  }
11422  }
11423  }
11424 }
11425 
11426 VkResult VmaDefragmentator::Defragment(
11427  VkDeviceSize maxBytesToMove,
11428  uint32_t maxAllocationsToMove)
11429 {
11430  if(m_Allocations.empty())
11431  {
11432  return VK_SUCCESS;
11433  }
11434 
11435  // Create block info for each block.
11436  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11437  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11438  {
11439  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11440  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11441  m_Blocks.push_back(pBlockInfo);
11442  }
11443 
11444  // Sort them by m_pBlock pointer value.
11445  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11446 
11447  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11448  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11449  {
11450  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11451  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11452  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11453  {
11454  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11455  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11456  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11457  {
11458  (*it)->m_Allocations.push_back(allocInfo);
11459  }
11460  else
11461  {
11462  VMA_ASSERT(0);
11463  }
11464  }
11465  }
11466  m_Allocations.clear();
11467 
11468  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11469  {
11470  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11471  pBlockInfo->CalcHasNonMovableAllocations();
11472  pBlockInfo->SortAllocationsBySizeDescecnding();
11473  }
11474 
11475  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11476  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11477 
11478  // Execute defragmentation rounds (the main part).
11479  VkResult result = VK_SUCCESS;
11480  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11481  {
11482  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11483  }
11484 
11485  // Unmap blocks that were mapped for defragmentation.
11486  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11487  {
11488  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11489  }
11490 
11491  return result;
11492 }
11493 
11494 bool VmaDefragmentator::MoveMakesSense(
11495  size_t dstBlockIndex, VkDeviceSize dstOffset,
11496  size_t srcBlockIndex, VkDeviceSize srcOffset)
11497 {
11498  if(dstBlockIndex < srcBlockIndex)
11499  {
11500  return true;
11501  }
11502  if(dstBlockIndex > srcBlockIndex)
11503  {
11504  return false;
11505  }
11506  if(dstOffset < srcOffset)
11507  {
11508  return true;
11509  }
11510  return false;
11511 }
11512 
11514 // VmaRecorder
11515 
11516 #if VMA_RECORDING_ENABLED
11517 
11518 VmaRecorder::VmaRecorder() :
11519  m_UseMutex(true),
11520  m_Flags(0),
11521  m_File(VMA_NULL),
11522  m_Freq(INT64_MAX),
11523  m_StartCounter(INT64_MAX)
11524 {
11525 }
11526 
11527 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11528 {
11529  m_UseMutex = useMutex;
11530  m_Flags = settings.flags;
11531 
11532  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11533  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11534 
11535  // Open file for writing.
11536  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11537  if(err != 0)
11538  {
11539  return VK_ERROR_INITIALIZATION_FAILED;
11540  }
11541 
11542  // Write header.
11543  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11544  fprintf(m_File, "%s\n", "1,4");
11545 
11546  return VK_SUCCESS;
11547 }
11548 
11549 VmaRecorder::~VmaRecorder()
11550 {
11551  if(m_File != VMA_NULL)
11552  {
11553  fclose(m_File);
11554  }
11555 }
11556 
11557 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11558 {
11559  CallParams callParams;
11560  GetBasicParams(callParams);
11561 
11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11563  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11564  Flush();
11565 }
11566 
11567 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11568 {
11569  CallParams callParams;
11570  GetBasicParams(callParams);
11571 
11572  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11573  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11574  Flush();
11575 }
11576 
11577 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11578 {
11579  CallParams callParams;
11580  GetBasicParams(callParams);
11581 
11582  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11583  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11584  createInfo.memoryTypeIndex,
11585  createInfo.flags,
11586  createInfo.blockSize,
11587  (uint64_t)createInfo.minBlockCount,
11588  (uint64_t)createInfo.maxBlockCount,
11589  createInfo.frameInUseCount,
11590  pool);
11591  Flush();
11592 }
11593 
11594 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11595 {
11596  CallParams callParams;
11597  GetBasicParams(callParams);
11598 
11599  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11600  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11601  pool);
11602  Flush();
11603 }
11604 
11605 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11606  const VkMemoryRequirements& vkMemReq,
11607  const VmaAllocationCreateInfo& createInfo,
11608  VmaAllocation allocation)
11609 {
11610  CallParams callParams;
11611  GetBasicParams(callParams);
11612 
11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11614  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11615  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11616  vkMemReq.size,
11617  vkMemReq.alignment,
11618  vkMemReq.memoryTypeBits,
11619  createInfo.flags,
11620  createInfo.usage,
11621  createInfo.requiredFlags,
11622  createInfo.preferredFlags,
11623  createInfo.memoryTypeBits,
11624  createInfo.pool,
11625  allocation,
11626  userDataStr.GetString());
11627  Flush();
11628 }
11629 
11630 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11631  const VkMemoryRequirements& vkMemReq,
11632  bool requiresDedicatedAllocation,
11633  bool prefersDedicatedAllocation,
11634  const VmaAllocationCreateInfo& createInfo,
11635  VmaAllocation allocation)
11636 {
11637  CallParams callParams;
11638  GetBasicParams(callParams);
11639 
11640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11641  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11642  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11643  vkMemReq.size,
11644  vkMemReq.alignment,
11645  vkMemReq.memoryTypeBits,
11646  requiresDedicatedAllocation ? 1 : 0,
11647  prefersDedicatedAllocation ? 1 : 0,
11648  createInfo.flags,
11649  createInfo.usage,
11650  createInfo.requiredFlags,
11651  createInfo.preferredFlags,
11652  createInfo.memoryTypeBits,
11653  createInfo.pool,
11654  allocation,
11655  userDataStr.GetString());
11656  Flush();
11657 }
11658 
11659 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11660  const VkMemoryRequirements& vkMemReq,
11661  bool requiresDedicatedAllocation,
11662  bool prefersDedicatedAllocation,
11663  const VmaAllocationCreateInfo& createInfo,
11664  VmaAllocation allocation)
11665 {
11666  CallParams callParams;
11667  GetBasicParams(callParams);
11668 
11669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11672  vkMemReq.size,
11673  vkMemReq.alignment,
11674  vkMemReq.memoryTypeBits,
11675  requiresDedicatedAllocation ? 1 : 0,
11676  prefersDedicatedAllocation ? 1 : 0,
11677  createInfo.flags,
11678  createInfo.usage,
11679  createInfo.requiredFlags,
11680  createInfo.preferredFlags,
11681  createInfo.memoryTypeBits,
11682  createInfo.pool,
11683  allocation,
11684  userDataStr.GetString());
11685  Flush();
11686 }
11687 
11688 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11689  VmaAllocation allocation)
11690 {
11691  CallParams callParams;
11692  GetBasicParams(callParams);
11693 
11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11695  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11696  allocation);
11697  Flush();
11698 }
11699 
11700 void VmaRecorder::RecordResizeAllocation(
11701  uint32_t frameIndex,
11702  VmaAllocation allocation,
11703  VkDeviceSize newSize)
11704 {
11705  CallParams callParams;
11706  GetBasicParams(callParams);
11707 
11708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11709  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
11710  allocation, newSize);
11711  Flush();
11712 }
11713 
11714 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11715  VmaAllocation allocation,
11716  const void* pUserData)
11717 {
11718  CallParams callParams;
11719  GetBasicParams(callParams);
11720 
11721  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11722  UserDataString userDataStr(
11723  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11724  pUserData);
11725  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11726  allocation,
11727  userDataStr.GetString());
11728  Flush();
11729 }
11730 
11731 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11732  VmaAllocation allocation)
11733 {
11734  CallParams callParams;
11735  GetBasicParams(callParams);
11736 
11737  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11738  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11739  allocation);
11740  Flush();
11741 }
11742 
11743 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11744  VmaAllocation allocation)
11745 {
11746  CallParams callParams;
11747  GetBasicParams(callParams);
11748 
11749  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11750  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11751  allocation);
11752  Flush();
11753 }
11754 
11755 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11756  VmaAllocation allocation)
11757 {
11758  CallParams callParams;
11759  GetBasicParams(callParams);
11760 
11761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11762  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11763  allocation);
11764  Flush();
11765 }
11766 
11767 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11768  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11769 {
11770  CallParams callParams;
11771  GetBasicParams(callParams);
11772 
11773  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11774  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11775  allocation,
11776  offset,
11777  size);
11778  Flush();
11779 }
11780 
11781 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11782  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11783 {
11784  CallParams callParams;
11785  GetBasicParams(callParams);
11786 
11787  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11788  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11789  allocation,
11790  offset,
11791  size);
11792  Flush();
11793 }
11794 
11795 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11796  const VkBufferCreateInfo& bufCreateInfo,
11797  const VmaAllocationCreateInfo& allocCreateInfo,
11798  VmaAllocation allocation)
11799 {
11800  CallParams callParams;
11801  GetBasicParams(callParams);
11802 
11803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11804  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11805  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11806  bufCreateInfo.flags,
11807  bufCreateInfo.size,
11808  bufCreateInfo.usage,
11809  bufCreateInfo.sharingMode,
11810  allocCreateInfo.flags,
11811  allocCreateInfo.usage,
11812  allocCreateInfo.requiredFlags,
11813  allocCreateInfo.preferredFlags,
11814  allocCreateInfo.memoryTypeBits,
11815  allocCreateInfo.pool,
11816  allocation,
11817  userDataStr.GetString());
11818  Flush();
11819 }
11820 
11821 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11822  const VkImageCreateInfo& imageCreateInfo,
11823  const VmaAllocationCreateInfo& allocCreateInfo,
11824  VmaAllocation allocation)
11825 {
11826  CallParams callParams;
11827  GetBasicParams(callParams);
11828 
11829  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11830  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11831  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11832  imageCreateInfo.flags,
11833  imageCreateInfo.imageType,
11834  imageCreateInfo.format,
11835  imageCreateInfo.extent.width,
11836  imageCreateInfo.extent.height,
11837  imageCreateInfo.extent.depth,
11838  imageCreateInfo.mipLevels,
11839  imageCreateInfo.arrayLayers,
11840  imageCreateInfo.samples,
11841  imageCreateInfo.tiling,
11842  imageCreateInfo.usage,
11843  imageCreateInfo.sharingMode,
11844  imageCreateInfo.initialLayout,
11845  allocCreateInfo.flags,
11846  allocCreateInfo.usage,
11847  allocCreateInfo.requiredFlags,
11848  allocCreateInfo.preferredFlags,
11849  allocCreateInfo.memoryTypeBits,
11850  allocCreateInfo.pool,
11851  allocation,
11852  userDataStr.GetString());
11853  Flush();
11854 }
11855 
11856 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11857  VmaAllocation allocation)
11858 {
11859  CallParams callParams;
11860  GetBasicParams(callParams);
11861 
11862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11863  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11864  allocation);
11865  Flush();
11866 }
11867 
11868 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11869  VmaAllocation allocation)
11870 {
11871  CallParams callParams;
11872  GetBasicParams(callParams);
11873 
11874  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11875  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11876  allocation);
11877  Flush();
11878 }
11879 
11880 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11881  VmaAllocation allocation)
11882 {
11883  CallParams callParams;
11884  GetBasicParams(callParams);
11885 
11886  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11887  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11888  allocation);
11889  Flush();
11890 }
11891 
11892 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11893  VmaAllocation allocation)
11894 {
11895  CallParams callParams;
11896  GetBasicParams(callParams);
11897 
11898  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11899  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11900  allocation);
11901  Flush();
11902 }
11903 
11904 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11905  VmaPool pool)
11906 {
11907  CallParams callParams;
11908  GetBasicParams(callParams);
11909 
11910  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11911  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11912  pool);
11913  Flush();
11914 }
11915 
11916 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11917 {
11918  if(pUserData != VMA_NULL)
11919  {
11920  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11921  {
11922  m_Str = (const char*)pUserData;
11923  }
11924  else
11925  {
11926  sprintf_s(m_PtrStr, "%p", pUserData);
11927  m_Str = m_PtrStr;
11928  }
11929  }
11930  else
11931  {
11932  m_Str = "";
11933  }
11934 }
11935 
11936 void VmaRecorder::WriteConfiguration(
11937  const VkPhysicalDeviceProperties& devProps,
11938  const VkPhysicalDeviceMemoryProperties& memProps,
11939  bool dedicatedAllocationExtensionEnabled)
11940 {
11941  fprintf(m_File, "Config,Begin\n");
11942 
11943  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11944  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11945  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11946  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11947  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11948  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11949 
11950  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11951  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11952  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11953 
11954  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11955  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11956  {
11957  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11958  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11959  }
11960  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11961  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11962  {
11963  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11964  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11965  }
11966 
11967  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11968 
11969  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11970  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11971  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11972  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11973  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11974  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11975  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11976  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11977  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11978 
11979  fprintf(m_File, "Config,End\n");
11980 }
11981 
11982 void VmaRecorder::GetBasicParams(CallParams& outParams)
11983 {
11984  outParams.threadId = GetCurrentThreadId();
11985 
11986  LARGE_INTEGER counter;
11987  QueryPerformanceCounter(&counter);
11988  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11989 }
11990 
11991 void VmaRecorder::Flush()
11992 {
11993  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11994  {
11995  fflush(m_File);
11996  }
11997 }
11998 
11999 #endif // #if VMA_RECORDING_ENABLED
12000 
12002 // VmaAllocator_T
12003 
12004 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
12005  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
12006  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
12007  m_hDevice(pCreateInfo->device),
12008  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
12009  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
12010  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
12011  m_PreferredLargeHeapBlockSize(0),
12012  m_PhysicalDevice(pCreateInfo->physicalDevice),
12013  m_CurrentFrameIndex(0),
12014  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
12015  m_NextPoolId(0)
12017  ,m_pRecorder(VMA_NULL)
12018 #endif
12019 {
12020  if(VMA_DEBUG_DETECT_CORRUPTION)
12021  {
12022  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
12023  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
12024  }
12025 
12026  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
12027 
12028 #if !(VMA_DEDICATED_ALLOCATION)
12030  {
12031  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
12032  }
12033 #endif
12034 
12035  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
12036  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
12037  memset(&m_MemProps, 0, sizeof(m_MemProps));
12038 
12039  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
12040  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
12041 
12042  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12043  {
12044  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
12045  }
12046 
12047  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
12048  {
12049  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
12050  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
12051  }
12052 
12053  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
12054 
12055  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
12056  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
12057 
12058  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
12059  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
12060  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
12061  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
12062 
12063  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
12064  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
12065 
12066  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
12067  {
12068  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
12069  {
12070  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
12071  if(limit != VK_WHOLE_SIZE)
12072  {
12073  m_HeapSizeLimit[heapIndex] = limit;
12074  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
12075  {
12076  m_MemProps.memoryHeaps[heapIndex].size = limit;
12077  }
12078  }
12079  }
12080  }
12081 
12082  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12083  {
12084  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
12085 
12086  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
12087  this,
12088  memTypeIndex,
12089  preferredBlockSize,
12090  0,
12091  SIZE_MAX,
12092  GetBufferImageGranularity(),
12093  pCreateInfo->frameInUseCount,
12094  false, // isCustomPool
12095  false, // explicitBlockSize
12096  false); // linearAlgorithm
12097  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12098  // becase minBlockCount is 0.
12099  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12100 
12101  }
12102 }
12103 
12104 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12105 {
12106  VkResult res = VK_SUCCESS;
12107 
12108  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12109  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12110  {
12111 #if VMA_RECORDING_ENABLED
12112  m_pRecorder = vma_new(this, VmaRecorder)();
12113  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12114  if(res != VK_SUCCESS)
12115  {
12116  return res;
12117  }
12118  m_pRecorder->WriteConfiguration(
12119  m_PhysicalDeviceProperties,
12120  m_MemProps,
12121  m_UseKhrDedicatedAllocation);
12122  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12123 #else
12124  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12125  return VK_ERROR_FEATURE_NOT_PRESENT;
12126 #endif
12127  }
12128 
12129  return res;
12130 }
12131 
12132 VmaAllocator_T::~VmaAllocator_T()
12133 {
12134 #if VMA_RECORDING_ENABLED
12135  if(m_pRecorder != VMA_NULL)
12136  {
12137  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12138  vma_delete(this, m_pRecorder);
12139  }
12140 #endif
12141 
12142  VMA_ASSERT(m_Pools.empty());
12143 
12144  for(size_t i = GetMemoryTypeCount(); i--; )
12145  {
12146  vma_delete(this, m_pDedicatedAllocations[i]);
12147  vma_delete(this, m_pBlockVectors[i]);
12148  }
12149 }
12150 
12151 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12152 {
12153 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12154  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12155  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12156  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12157  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12158  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12159  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12160  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12161  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12162  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12163  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12164  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12165  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12166  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12167  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12168  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12169  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12170 #if VMA_DEDICATED_ALLOCATION
12171  if(m_UseKhrDedicatedAllocation)
12172  {
12173  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12174  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12175  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12176  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12177  }
12178 #endif // #if VMA_DEDICATED_ALLOCATION
12179 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12180 
12181 #define VMA_COPY_IF_NOT_NULL(funcName) \
12182  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12183 
12184  if(pVulkanFunctions != VMA_NULL)
12185  {
12186  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12187  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12188  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12189  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12190  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12191  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12192  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12193  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12194  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12195  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12196  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12197  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12198  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12199  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12200  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12201  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12202 #if VMA_DEDICATED_ALLOCATION
12203  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12204  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12205 #endif
12206  }
12207 
12208 #undef VMA_COPY_IF_NOT_NULL
12209 
12210  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12211  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12212  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12213  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12214  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12215  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12216  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12217  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12218  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12219  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12220  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12221  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12222  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12223  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12224  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12225  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12226  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12228 #if VMA_DEDICATED_ALLOCATION
12229  if(m_UseKhrDedicatedAllocation)
12230  {
12231  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12232  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12233  }
12234 #endif
12235 }
12236 
12237 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12238 {
12239  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12240  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12241  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12242  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12243 }
12244 
12245 VkResult VmaAllocator_T::AllocateMemoryOfType(
12246  VkDeviceSize size,
12247  VkDeviceSize alignment,
12248  bool dedicatedAllocation,
12249  VkBuffer dedicatedBuffer,
12250  VkImage dedicatedImage,
12251  const VmaAllocationCreateInfo& createInfo,
12252  uint32_t memTypeIndex,
12253  VmaSuballocationType suballocType,
12254  VmaAllocation* pAllocation)
12255 {
12256  VMA_ASSERT(pAllocation != VMA_NULL);
12257  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12258 
12259  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12260 
12261  // If memory type is not HOST_VISIBLE, disable MAPPED.
12262  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12263  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12264  {
12265  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12266  }
12267 
12268  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12269  VMA_ASSERT(blockVector);
12270 
12271  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12272  bool preferDedicatedMemory =
12273  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12274  dedicatedAllocation ||
12275  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12276  size > preferredBlockSize / 2;
12277 
12278  if(preferDedicatedMemory &&
12279  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12280  finalCreateInfo.pool == VK_NULL_HANDLE)
12281  {
12283  }
12284 
12285  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12286  {
12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12288  {
12289  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12290  }
12291  else
12292  {
12293  return AllocateDedicatedMemory(
12294  size,
12295  suballocType,
12296  memTypeIndex,
12297  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12298  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12299  finalCreateInfo.pUserData,
12300  dedicatedBuffer,
12301  dedicatedImage,
12302  pAllocation);
12303  }
12304  }
12305  else
12306  {
12307  VkResult res = blockVector->Allocate(
12308  VK_NULL_HANDLE, // hCurrentPool
12309  m_CurrentFrameIndex.load(),
12310  size,
12311  alignment,
12312  finalCreateInfo,
12313  suballocType,
12314  pAllocation);
12315  if(res == VK_SUCCESS)
12316  {
12317  return res;
12318  }
12319 
12320  // 5. Try dedicated memory.
12321  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12322  {
12323  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12324  }
12325  else
12326  {
12327  res = AllocateDedicatedMemory(
12328  size,
12329  suballocType,
12330  memTypeIndex,
12331  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12332  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12333  finalCreateInfo.pUserData,
12334  dedicatedBuffer,
12335  dedicatedImage,
12336  pAllocation);
12337  if(res == VK_SUCCESS)
12338  {
12339  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12340  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12341  return VK_SUCCESS;
12342  }
12343  else
12344  {
12345  // Everything failed: Return error code.
12346  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12347  return res;
12348  }
12349  }
12350  }
12351 }
12352 
12353 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12354  VkDeviceSize size,
12355  VmaSuballocationType suballocType,
12356  uint32_t memTypeIndex,
12357  bool map,
12358  bool isUserDataString,
12359  void* pUserData,
12360  VkBuffer dedicatedBuffer,
12361  VkImage dedicatedImage,
12362  VmaAllocation* pAllocation)
12363 {
12364  VMA_ASSERT(pAllocation);
12365 
12366  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12367  allocInfo.memoryTypeIndex = memTypeIndex;
12368  allocInfo.allocationSize = size;
12369 
12370 #if VMA_DEDICATED_ALLOCATION
12371  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12372  if(m_UseKhrDedicatedAllocation)
12373  {
12374  if(dedicatedBuffer != VK_NULL_HANDLE)
12375  {
12376  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12377  dedicatedAllocInfo.buffer = dedicatedBuffer;
12378  allocInfo.pNext = &dedicatedAllocInfo;
12379  }
12380  else if(dedicatedImage != VK_NULL_HANDLE)
12381  {
12382  dedicatedAllocInfo.image = dedicatedImage;
12383  allocInfo.pNext = &dedicatedAllocInfo;
12384  }
12385  }
12386 #endif // #if VMA_DEDICATED_ALLOCATION
12387 
12388  // Allocate VkDeviceMemory.
12389  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12390  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12391  if(res < 0)
12392  {
12393  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12394  return res;
12395  }
12396 
12397  void* pMappedData = VMA_NULL;
12398  if(map)
12399  {
12400  res = (*m_VulkanFunctions.vkMapMemory)(
12401  m_hDevice,
12402  hMemory,
12403  0,
12404  VK_WHOLE_SIZE,
12405  0,
12406  &pMappedData);
12407  if(res < 0)
12408  {
12409  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12410  FreeVulkanMemory(memTypeIndex, size, hMemory);
12411  return res;
12412  }
12413  }
12414 
12415  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12416  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12417  (*pAllocation)->SetUserData(this, pUserData);
12418  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12419  {
12420  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12421  }
12422 
12423  // Register it in m_pDedicatedAllocations.
12424  {
12425  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12426  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12427  VMA_ASSERT(pDedicatedAllocations);
12428  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12429  }
12430 
12431  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12432 
12433  return VK_SUCCESS;
12434 }
12435 
12436 void VmaAllocator_T::GetBufferMemoryRequirements(
12437  VkBuffer hBuffer,
12438  VkMemoryRequirements& memReq,
12439  bool& requiresDedicatedAllocation,
12440  bool& prefersDedicatedAllocation) const
12441 {
12442 #if VMA_DEDICATED_ALLOCATION
12443  if(m_UseKhrDedicatedAllocation)
12444  {
12445  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12446  memReqInfo.buffer = hBuffer;
12447 
12448  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12449 
12450  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12451  memReq2.pNext = &memDedicatedReq;
12452 
12453  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12454 
12455  memReq = memReq2.memoryRequirements;
12456  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12457  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12458  }
12459  else
12460 #endif // #if VMA_DEDICATED_ALLOCATION
12461  {
12462  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12463  requiresDedicatedAllocation = false;
12464  prefersDedicatedAllocation = false;
12465  }
12466 }
12467 
12468 void VmaAllocator_T::GetImageMemoryRequirements(
12469  VkImage hImage,
12470  VkMemoryRequirements& memReq,
12471  bool& requiresDedicatedAllocation,
12472  bool& prefersDedicatedAllocation) const
12473 {
12474 #if VMA_DEDICATED_ALLOCATION
12475  if(m_UseKhrDedicatedAllocation)
12476  {
12477  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12478  memReqInfo.image = hImage;
12479 
12480  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12481 
12482  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12483  memReq2.pNext = &memDedicatedReq;
12484 
12485  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12486 
12487  memReq = memReq2.memoryRequirements;
12488  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12489  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12490  }
12491  else
12492 #endif // #if VMA_DEDICATED_ALLOCATION
12493  {
12494  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12495  requiresDedicatedAllocation = false;
12496  prefersDedicatedAllocation = false;
12497  }
12498 }
12499 
12500 VkResult VmaAllocator_T::AllocateMemory(
12501  const VkMemoryRequirements& vkMemReq,
12502  bool requiresDedicatedAllocation,
12503  bool prefersDedicatedAllocation,
12504  VkBuffer dedicatedBuffer,
12505  VkImage dedicatedImage,
12506  const VmaAllocationCreateInfo& createInfo,
12507  VmaSuballocationType suballocType,
12508  VmaAllocation* pAllocation)
12509 {
12510  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12511 
12512  if(vkMemReq.size == 0)
12513  {
12514  return VK_ERROR_VALIDATION_FAILED_EXT;
12515  }
12516  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12517  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12518  {
12519  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12520  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12521  }
12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12524  {
12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12527  }
12528  if(requiresDedicatedAllocation)
12529  {
12530  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12531  {
12532  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12533  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12534  }
12535  if(createInfo.pool != VK_NULL_HANDLE)
12536  {
12537  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12539  }
12540  }
12541  if((createInfo.pool != VK_NULL_HANDLE) &&
12542  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12543  {
12544  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12545  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12546  }
12547 
12548  if(createInfo.pool != VK_NULL_HANDLE)
12549  {
12550  const VkDeviceSize alignmentForPool = VMA_MAX(
12551  vkMemReq.alignment,
12552  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12553  return createInfo.pool->m_BlockVector.Allocate(
12554  createInfo.pool,
12555  m_CurrentFrameIndex.load(),
12556  vkMemReq.size,
12557  alignmentForPool,
12558  createInfo,
12559  suballocType,
12560  pAllocation);
12561  }
12562  else
12563  {
12564  // Bit mask of memory Vulkan types acceptable for this allocation.
12565  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12566  uint32_t memTypeIndex = UINT32_MAX;
12567  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12568  if(res == VK_SUCCESS)
12569  {
12570  VkDeviceSize alignmentForMemType = VMA_MAX(
12571  vkMemReq.alignment,
12572  GetMemoryTypeMinAlignment(memTypeIndex));
12573 
12574  res = AllocateMemoryOfType(
12575  vkMemReq.size,
12576  alignmentForMemType,
12577  requiresDedicatedAllocation || prefersDedicatedAllocation,
12578  dedicatedBuffer,
12579  dedicatedImage,
12580  createInfo,
12581  memTypeIndex,
12582  suballocType,
12583  pAllocation);
12584  // Succeeded on first try.
12585  if(res == VK_SUCCESS)
12586  {
12587  return res;
12588  }
12589  // Allocation from this memory type failed. Try other compatible memory types.
12590  else
12591  {
12592  for(;;)
12593  {
12594  // Remove old memTypeIndex from list of possibilities.
12595  memoryTypeBits &= ~(1u << memTypeIndex);
12596  // Find alternative memTypeIndex.
12597  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12598  if(res == VK_SUCCESS)
12599  {
12600  alignmentForMemType = VMA_MAX(
12601  vkMemReq.alignment,
12602  GetMemoryTypeMinAlignment(memTypeIndex));
12603 
12604  res = AllocateMemoryOfType(
12605  vkMemReq.size,
12606  alignmentForMemType,
12607  requiresDedicatedAllocation || prefersDedicatedAllocation,
12608  dedicatedBuffer,
12609  dedicatedImage,
12610  createInfo,
12611  memTypeIndex,
12612  suballocType,
12613  pAllocation);
12614  // Allocation from this alternative memory type succeeded.
12615  if(res == VK_SUCCESS)
12616  {
12617  return res;
12618  }
12619  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12620  }
12621  // No other matching memory type index could be found.
12622  else
12623  {
12624  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12625  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12626  }
12627  }
12628  }
12629  }
12630  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12631  else
12632  return res;
12633  }
12634 }
12635 
12636 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12637 {
12638  VMA_ASSERT(allocation);
12639 
12640  if(TouchAllocation(allocation))
12641  {
12642  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12643  {
12644  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12645  }
12646 
12647  switch(allocation->GetType())
12648  {
12649  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12650  {
12651  VmaBlockVector* pBlockVector = VMA_NULL;
12652  VmaPool hPool = allocation->GetPool();
12653  if(hPool != VK_NULL_HANDLE)
12654  {
12655  pBlockVector = &hPool->m_BlockVector;
12656  }
12657  else
12658  {
12659  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12660  pBlockVector = m_pBlockVectors[memTypeIndex];
12661  }
12662  pBlockVector->Free(allocation);
12663  }
12664  break;
12665  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12666  FreeDedicatedMemory(allocation);
12667  break;
12668  default:
12669  VMA_ASSERT(0);
12670  }
12671  }
12672 
12673  allocation->SetUserData(this, VMA_NULL);
12674  vma_delete(this, allocation);
12675 }
12676 
12677 VkResult VmaAllocator_T::ResizeAllocation(
12678  const VmaAllocation alloc,
12679  VkDeviceSize newSize)
12680 {
12681  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
12682  {
12683  return VK_ERROR_VALIDATION_FAILED_EXT;
12684  }
12685  if(newSize == alloc->GetSize())
12686  {
12687  return VK_SUCCESS;
12688  }
12689 
12690  switch(alloc->GetType())
12691  {
12692  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12693  return VK_ERROR_FEATURE_NOT_PRESENT;
12694  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12695  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
12696  {
12697  alloc->ChangeSize(newSize);
12698  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
12699  return VK_SUCCESS;
12700  }
12701  else
12702  {
12703  return VK_ERROR_OUT_OF_POOL_MEMORY;
12704  }
12705  default:
12706  VMA_ASSERT(0);
12707  return VK_ERROR_VALIDATION_FAILED_EXT;
12708  }
12709 }
12710 
12711 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12712 {
12713  // Initialize.
12714  InitStatInfo(pStats->total);
12715  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12716  InitStatInfo(pStats->memoryType[i]);
12717  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12718  InitStatInfo(pStats->memoryHeap[i]);
12719 
12720  // Process default pools.
12721  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12722  {
12723  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12724  VMA_ASSERT(pBlockVector);
12725  pBlockVector->AddStats(pStats);
12726  }
12727 
12728  // Process custom pools.
12729  {
12730  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12731  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12732  {
12733  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12734  }
12735  }
12736 
12737  // Process dedicated allocations.
12738  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12739  {
12740  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12741  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12742  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12743  VMA_ASSERT(pDedicatedAllocVector);
12744  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12745  {
12746  VmaStatInfo allocationStatInfo;
12747  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12748  VmaAddStatInfo(pStats->total, allocationStatInfo);
12749  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12750  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12751  }
12752  }
12753 
12754  // Postprocess.
12755  VmaPostprocessCalcStatInfo(pStats->total);
12756  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12757  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12758  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12759  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12760 }
12761 
12762 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12763 
12764 VkResult VmaAllocator_T::Defragment(
12765  VmaAllocation* pAllocations,
12766  size_t allocationCount,
12767  VkBool32* pAllocationsChanged,
12768  const VmaDefragmentationInfo* pDefragmentationInfo,
12769  VmaDefragmentationStats* pDefragmentationStats)
12770 {
12771  if(pAllocationsChanged != VMA_NULL)
12772  {
12773  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12774  }
12775  if(pDefragmentationStats != VMA_NULL)
12776  {
12777  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12778  }
12779 
12780  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12781 
12782  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12783 
12784  const size_t poolCount = m_Pools.size();
12785 
12786  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12787  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12788  {
12789  VmaAllocation hAlloc = pAllocations[allocIndex];
12790  VMA_ASSERT(hAlloc);
12791  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12792  // DedicatedAlloc cannot be defragmented.
12793  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12794  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12795  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12796  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12797  // Lost allocation cannot be defragmented.
12798  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12799  {
12800  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12801 
12802  const VmaPool hAllocPool = hAlloc->GetPool();
12803  // This allocation belongs to custom pool.
12804  if(hAllocPool != VK_NULL_HANDLE)
12805  {
12806  // Pools with linear or buddy algorithm are not defragmented.
12807  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12808  {
12809  pAllocBlockVector = &hAllocPool->m_BlockVector;
12810  }
12811  }
12812  // This allocation belongs to general pool.
12813  else
12814  {
12815  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12816  }
12817 
12818  if(pAllocBlockVector != VMA_NULL)
12819  {
12820  VmaDefragmentator* const pDefragmentator =
12821  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12822  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12823  &pAllocationsChanged[allocIndex] : VMA_NULL;
12824  pDefragmentator->AddAllocation(hAlloc, pChanged);
12825  }
12826  }
12827  }
12828 
12829  VkResult result = VK_SUCCESS;
12830 
12831  // ======== Main processing.
12832 
12833  VkDeviceSize maxBytesToMove = SIZE_MAX;
12834  uint32_t maxAllocationsToMove = UINT32_MAX;
12835  if(pDefragmentationInfo != VMA_NULL)
12836  {
12837  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12838  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12839  }
12840 
12841  // Process standard memory.
12842  for(uint32_t memTypeIndex = 0;
12843  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12844  ++memTypeIndex)
12845  {
12846  // Only HOST_VISIBLE memory types can be defragmented.
12847  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12848  {
12849  result = m_pBlockVectors[memTypeIndex]->Defragment(
12850  pDefragmentationStats,
12851  maxBytesToMove,
12852  maxAllocationsToMove);
12853  }
12854  }
12855 
12856  // Process custom pools.
12857  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12858  {
12859  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12860  pDefragmentationStats,
12861  maxBytesToMove,
12862  maxAllocationsToMove);
12863  }
12864 
12865  // ======== Destroy defragmentators.
12866 
12867  // Process custom pools.
12868  for(size_t poolIndex = poolCount; poolIndex--; )
12869  {
12870  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12871  }
12872 
12873  // Process standard memory.
12874  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12875  {
12876  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12877  {
12878  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12879  }
12880  }
12881 
12882  return result;
12883 }
12884 
12885 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12886 {
12887  if(hAllocation->CanBecomeLost())
12888  {
12889  /*
12890  Warning: This is a carefully designed algorithm.
12891  Do not modify unless you really know what you're doing :)
12892  */
12893  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12894  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12895  for(;;)
12896  {
12897  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12898  {
12899  pAllocationInfo->memoryType = UINT32_MAX;
12900  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12901  pAllocationInfo->offset = 0;
12902  pAllocationInfo->size = hAllocation->GetSize();
12903  pAllocationInfo->pMappedData = VMA_NULL;
12904  pAllocationInfo->pUserData = hAllocation->GetUserData();
12905  return;
12906  }
12907  else if(localLastUseFrameIndex == localCurrFrameIndex)
12908  {
12909  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12910  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12911  pAllocationInfo->offset = hAllocation->GetOffset();
12912  pAllocationInfo->size = hAllocation->GetSize();
12913  pAllocationInfo->pMappedData = VMA_NULL;
12914  pAllocationInfo->pUserData = hAllocation->GetUserData();
12915  return;
12916  }
12917  else // Last use time earlier than current time.
12918  {
12919  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12920  {
12921  localLastUseFrameIndex = localCurrFrameIndex;
12922  }
12923  }
12924  }
12925  }
12926  else
12927  {
12928 #if VMA_STATS_STRING_ENABLED
12929  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12930  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12931  for(;;)
12932  {
12933  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12934  if(localLastUseFrameIndex == localCurrFrameIndex)
12935  {
12936  break;
12937  }
12938  else // Last use time earlier than current time.
12939  {
12940  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12941  {
12942  localLastUseFrameIndex = localCurrFrameIndex;
12943  }
12944  }
12945  }
12946 #endif
12947 
12948  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12949  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12950  pAllocationInfo->offset = hAllocation->GetOffset();
12951  pAllocationInfo->size = hAllocation->GetSize();
12952  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12953  pAllocationInfo->pUserData = hAllocation->GetUserData();
12954  }
12955 }
12956 
12957 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12958 {
12959  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12960  if(hAllocation->CanBecomeLost())
12961  {
12962  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12963  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12964  for(;;)
12965  {
12966  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12967  {
12968  return false;
12969  }
12970  else if(localLastUseFrameIndex == localCurrFrameIndex)
12971  {
12972  return true;
12973  }
12974  else // Last use time earlier than current time.
12975  {
12976  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12977  {
12978  localLastUseFrameIndex = localCurrFrameIndex;
12979  }
12980  }
12981  }
12982  }
12983  else
12984  {
12985 #if VMA_STATS_STRING_ENABLED
12986  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12987  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12988  for(;;)
12989  {
12990  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12991  if(localLastUseFrameIndex == localCurrFrameIndex)
12992  {
12993  break;
12994  }
12995  else // Last use time earlier than current time.
12996  {
12997  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12998  {
12999  localLastUseFrameIndex = localCurrFrameIndex;
13000  }
13001  }
13002  }
13003 #endif
13004 
13005  return true;
13006  }
13007 }
13008 
13009 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
13010 {
13011  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
13012 
13013  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
13014 
13015  if(newCreateInfo.maxBlockCount == 0)
13016  {
13017  newCreateInfo.maxBlockCount = SIZE_MAX;
13018  }
13019  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
13020  {
13021  return VK_ERROR_INITIALIZATION_FAILED;
13022  }
13023 
13024  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
13025 
13026  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
13027 
13028  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
13029  if(res != VK_SUCCESS)
13030  {
13031  vma_delete(this, *pPool);
13032  *pPool = VMA_NULL;
13033  return res;
13034  }
13035 
13036  // Add to m_Pools.
13037  {
13038  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13039  (*pPool)->SetId(m_NextPoolId++);
13040  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
13041  }
13042 
13043  return VK_SUCCESS;
13044 }
13045 
13046 void VmaAllocator_T::DestroyPool(VmaPool pool)
13047 {
13048  // Remove from m_Pools.
13049  {
13050  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13051  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13052  VMA_ASSERT(success && "Pool not found in Allocator.");
13053  }
13054 
13055  vma_delete(this, pool);
13056 }
13057 
13058 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13059 {
13060  pool->m_BlockVector.GetPoolStats(pPoolStats);
13061 }
13062 
13063 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13064 {
13065  m_CurrentFrameIndex.store(frameIndex);
13066 }
13067 
13068 void VmaAllocator_T::MakePoolAllocationsLost(
13069  VmaPool hPool,
13070  size_t* pLostAllocationCount)
13071 {
13072  hPool->m_BlockVector.MakePoolAllocationsLost(
13073  m_CurrentFrameIndex.load(),
13074  pLostAllocationCount);
13075 }
13076 
13077 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13078 {
13079  return hPool->m_BlockVector.CheckCorruption();
13080 }
13081 
13082 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13083 {
13084  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13085 
13086  // Process default pools.
13087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13088  {
13089  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13090  {
13091  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13092  VMA_ASSERT(pBlockVector);
13093  VkResult localRes = pBlockVector->CheckCorruption();
13094  switch(localRes)
13095  {
13096  case VK_ERROR_FEATURE_NOT_PRESENT:
13097  break;
13098  case VK_SUCCESS:
13099  finalRes = VK_SUCCESS;
13100  break;
13101  default:
13102  return localRes;
13103  }
13104  }
13105  }
13106 
13107  // Process custom pools.
13108  {
13109  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13110  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13111  {
13112  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13113  {
13114  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13115  switch(localRes)
13116  {
13117  case VK_ERROR_FEATURE_NOT_PRESENT:
13118  break;
13119  case VK_SUCCESS:
13120  finalRes = VK_SUCCESS;
13121  break;
13122  default:
13123  return localRes;
13124  }
13125  }
13126  }
13127  }
13128 
13129  return finalRes;
13130 }
13131 
13132 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13133 {
13134  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13135  (*pAllocation)->InitLost();
13136 }
13137 
13138 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13139 {
13140  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13141 
13142  VkResult res;
13143  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13144  {
13145  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13146  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13147  {
13148  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13149  if(res == VK_SUCCESS)
13150  {
13151  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13152  }
13153  }
13154  else
13155  {
13156  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13157  }
13158  }
13159  else
13160  {
13161  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13162  }
13163 
13164  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13165  {
13166  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13167  }
13168 
13169  return res;
13170 }
13171 
13172 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13173 {
13174  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13175  {
13176  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13177  }
13178 
13179  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13180 
13181  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13182  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13183  {
13184  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13185  m_HeapSizeLimit[heapIndex] += size;
13186  }
13187 }
13188 
13189 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13190 {
13191  if(hAllocation->CanBecomeLost())
13192  {
13193  return VK_ERROR_MEMORY_MAP_FAILED;
13194  }
13195 
13196  switch(hAllocation->GetType())
13197  {
13198  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13199  {
13200  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13201  char *pBytes = VMA_NULL;
13202  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13203  if(res == VK_SUCCESS)
13204  {
13205  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13206  hAllocation->BlockAllocMap();
13207  }
13208  return res;
13209  }
13210  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13211  return hAllocation->DedicatedAllocMap(this, ppData);
13212  default:
13213  VMA_ASSERT(0);
13214  return VK_ERROR_MEMORY_MAP_FAILED;
13215  }
13216 }
13217 
13218 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13219 {
13220  switch(hAllocation->GetType())
13221  {
13222  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13223  {
13224  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13225  hAllocation->BlockAllocUnmap();
13226  pBlock->Unmap(this, 1);
13227  }
13228  break;
13229  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13230  hAllocation->DedicatedAllocUnmap(this);
13231  break;
13232  default:
13233  VMA_ASSERT(0);
13234  }
13235 }
13236 
13237 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13238 {
13239  VkResult res = VK_SUCCESS;
13240  switch(hAllocation->GetType())
13241  {
13242  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13243  res = GetVulkanFunctions().vkBindBufferMemory(
13244  m_hDevice,
13245  hBuffer,
13246  hAllocation->GetMemory(),
13247  0); //memoryOffset
13248  break;
13249  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13250  {
13251  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13252  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13253  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13254  break;
13255  }
13256  default:
13257  VMA_ASSERT(0);
13258  }
13259  return res;
13260 }
13261 
13262 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13263 {
13264  VkResult res = VK_SUCCESS;
13265  switch(hAllocation->GetType())
13266  {
13267  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13268  res = GetVulkanFunctions().vkBindImageMemory(
13269  m_hDevice,
13270  hImage,
13271  hAllocation->GetMemory(),
13272  0); //memoryOffset
13273  break;
13274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13275  {
13276  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13277  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13278  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13279  break;
13280  }
13281  default:
13282  VMA_ASSERT(0);
13283  }
13284  return res;
13285 }
13286 
13287 void VmaAllocator_T::FlushOrInvalidateAllocation(
13288  VmaAllocation hAllocation,
13289  VkDeviceSize offset, VkDeviceSize size,
13290  VMA_CACHE_OPERATION op)
13291 {
13292  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13293  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13294  {
13295  const VkDeviceSize allocationSize = hAllocation->GetSize();
13296  VMA_ASSERT(offset <= allocationSize);
13297 
13298  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13299 
13300  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13301  memRange.memory = hAllocation->GetMemory();
13302 
13303  switch(hAllocation->GetType())
13304  {
13305  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13306  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13307  if(size == VK_WHOLE_SIZE)
13308  {
13309  memRange.size = allocationSize - memRange.offset;
13310  }
13311  else
13312  {
13313  VMA_ASSERT(offset + size <= allocationSize);
13314  memRange.size = VMA_MIN(
13315  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13316  allocationSize - memRange.offset);
13317  }
13318  break;
13319 
13320  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13321  {
13322  // 1. Still within this allocation.
13323  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13324  if(size == VK_WHOLE_SIZE)
13325  {
13326  size = allocationSize - offset;
13327  }
13328  else
13329  {
13330  VMA_ASSERT(offset + size <= allocationSize);
13331  }
13332  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13333 
13334  // 2. Adjust to whole block.
13335  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13336  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13337  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13338  memRange.offset += allocationOffset;
13339  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13340 
13341  break;
13342  }
13343 
13344  default:
13345  VMA_ASSERT(0);
13346  }
13347 
13348  switch(op)
13349  {
13350  case VMA_CACHE_FLUSH:
13351  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13352  break;
13353  case VMA_CACHE_INVALIDATE:
13354  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13355  break;
13356  default:
13357  VMA_ASSERT(0);
13358  }
13359  }
13360  // else: Just ignore this call.
13361 }
13362 
13363 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13364 {
13365  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13366 
13367  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13368  {
13369  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13370  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13371  VMA_ASSERT(pDedicatedAllocations);
13372  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13373  VMA_ASSERT(success);
13374  }
13375 
13376  VkDeviceMemory hMemory = allocation->GetMemory();
13377 
13378  /*
13379  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13380  before vkFreeMemory.
13381 
13382  if(allocation->GetMappedData() != VMA_NULL)
13383  {
13384  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13385  }
13386  */
13387 
13388  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13389 
13390  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13391 }
13392 
13393 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13394 {
13395  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13396  !hAllocation->CanBecomeLost() &&
13397  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13398  {
13399  void* pData = VMA_NULL;
13400  VkResult res = Map(hAllocation, &pData);
13401  if(res == VK_SUCCESS)
13402  {
13403  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13404  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13405  Unmap(hAllocation);
13406  }
13407  else
13408  {
13409  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13410  }
13411  }
13412 }
13413 
13414 #if VMA_STATS_STRING_ENABLED
13415 
13416 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13417 {
13418  bool dedicatedAllocationsStarted = false;
13419  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13420  {
13421  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13422  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13423  VMA_ASSERT(pDedicatedAllocVector);
13424  if(pDedicatedAllocVector->empty() == false)
13425  {
13426  if(dedicatedAllocationsStarted == false)
13427  {
13428  dedicatedAllocationsStarted = true;
13429  json.WriteString("DedicatedAllocations");
13430  json.BeginObject();
13431  }
13432 
13433  json.BeginString("Type ");
13434  json.ContinueString(memTypeIndex);
13435  json.EndString();
13436 
13437  json.BeginArray();
13438 
13439  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13440  {
13441  json.BeginObject(true);
13442  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13443  hAlloc->PrintParameters(json);
13444  json.EndObject();
13445  }
13446 
13447  json.EndArray();
13448  }
13449  }
13450  if(dedicatedAllocationsStarted)
13451  {
13452  json.EndObject();
13453  }
13454 
13455  {
13456  bool allocationsStarted = false;
13457  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13458  {
13459  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13460  {
13461  if(allocationsStarted == false)
13462  {
13463  allocationsStarted = true;
13464  json.WriteString("DefaultPools");
13465  json.BeginObject();
13466  }
13467 
13468  json.BeginString("Type ");
13469  json.ContinueString(memTypeIndex);
13470  json.EndString();
13471 
13472  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13473  }
13474  }
13475  if(allocationsStarted)
13476  {
13477  json.EndObject();
13478  }
13479  }
13480 
13481  // Custom pools
13482  {
13483  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13484  const size_t poolCount = m_Pools.size();
13485  if(poolCount > 0)
13486  {
13487  json.WriteString("Pools");
13488  json.BeginObject();
13489  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13490  {
13491  json.BeginString();
13492  json.ContinueString(m_Pools[poolIndex]->GetId());
13493  json.EndString();
13494 
13495  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13496  }
13497  json.EndObject();
13498  }
13499  }
13500 }
13501 
13502 #endif // #if VMA_STATS_STRING_ENABLED
13503 
13505 // Public interface
13506 
13507 VkResult vmaCreateAllocator(
13508  const VmaAllocatorCreateInfo* pCreateInfo,
13509  VmaAllocator* pAllocator)
13510 {
13511  VMA_ASSERT(pCreateInfo && pAllocator);
13512  VMA_DEBUG_LOG("vmaCreateAllocator");
13513  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13514  return (*pAllocator)->Init(pCreateInfo);
13515 }
13516 
13517 void vmaDestroyAllocator(
13518  VmaAllocator allocator)
13519 {
13520  if(allocator != VK_NULL_HANDLE)
13521  {
13522  VMA_DEBUG_LOG("vmaDestroyAllocator");
13523  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13524  vma_delete(&allocationCallbacks, allocator);
13525  }
13526 }
13527 
13529  VmaAllocator allocator,
13530  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13531 {
13532  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13533  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13534 }
13535 
13537  VmaAllocator allocator,
13538  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13539 {
13540  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13541  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13542 }
13543 
13545  VmaAllocator allocator,
13546  uint32_t memoryTypeIndex,
13547  VkMemoryPropertyFlags* pFlags)
13548 {
13549  VMA_ASSERT(allocator && pFlags);
13550  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13551  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13552 }
13553 
13555  VmaAllocator allocator,
13556  uint32_t frameIndex)
13557 {
13558  VMA_ASSERT(allocator);
13559  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13560 
13561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13562 
13563  allocator->SetCurrentFrameIndex(frameIndex);
13564 }
13565 
13566 void vmaCalculateStats(
13567  VmaAllocator allocator,
13568  VmaStats* pStats)
13569 {
13570  VMA_ASSERT(allocator && pStats);
13571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13572  allocator->CalculateStats(pStats);
13573 }
13574 
13575 #if VMA_STATS_STRING_ENABLED
13576 
13577 void vmaBuildStatsString(
13578  VmaAllocator allocator,
13579  char** ppStatsString,
13580  VkBool32 detailedMap)
13581 {
13582  VMA_ASSERT(allocator && ppStatsString);
13583  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13584 
13585  VmaStringBuilder sb(allocator);
13586  {
13587  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13588  json.BeginObject();
13589 
13590  VmaStats stats;
13591  allocator->CalculateStats(&stats);
13592 
13593  json.WriteString("Total");
13594  VmaPrintStatInfo(json, stats.total);
13595 
13596  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13597  {
13598  json.BeginString("Heap ");
13599  json.ContinueString(heapIndex);
13600  json.EndString();
13601  json.BeginObject();
13602 
13603  json.WriteString("Size");
13604  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13605 
13606  json.WriteString("Flags");
13607  json.BeginArray(true);
13608  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13609  {
13610  json.WriteString("DEVICE_LOCAL");
13611  }
13612  json.EndArray();
13613 
13614  if(stats.memoryHeap[heapIndex].blockCount > 0)
13615  {
13616  json.WriteString("Stats");
13617  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13618  }
13619 
13620  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13621  {
13622  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13623  {
13624  json.BeginString("Type ");
13625  json.ContinueString(typeIndex);
13626  json.EndString();
13627 
13628  json.BeginObject();
13629 
13630  json.WriteString("Flags");
13631  json.BeginArray(true);
13632  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13633  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13634  {
13635  json.WriteString("DEVICE_LOCAL");
13636  }
13637  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13638  {
13639  json.WriteString("HOST_VISIBLE");
13640  }
13641  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13642  {
13643  json.WriteString("HOST_COHERENT");
13644  }
13645  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13646  {
13647  json.WriteString("HOST_CACHED");
13648  }
13649  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13650  {
13651  json.WriteString("LAZILY_ALLOCATED");
13652  }
13653  json.EndArray();
13654 
13655  if(stats.memoryType[typeIndex].blockCount > 0)
13656  {
13657  json.WriteString("Stats");
13658  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13659  }
13660 
13661  json.EndObject();
13662  }
13663  }
13664 
13665  json.EndObject();
13666  }
13667  if(detailedMap == VK_TRUE)
13668  {
13669  allocator->PrintDetailedMap(json);
13670  }
13671 
13672  json.EndObject();
13673  }
13674 
13675  const size_t len = sb.GetLength();
13676  char* const pChars = vma_new_array(allocator, char, len + 1);
13677  if(len > 0)
13678  {
13679  memcpy(pChars, sb.GetData(), len);
13680  }
13681  pChars[len] = '\0';
13682  *ppStatsString = pChars;
13683 }
13684 
13685 void vmaFreeStatsString(
13686  VmaAllocator allocator,
13687  char* pStatsString)
13688 {
13689  if(pStatsString != VMA_NULL)
13690  {
13691  VMA_ASSERT(allocator);
13692  size_t len = strlen(pStatsString);
13693  vma_delete_array(allocator, pStatsString, len + 1);
13694  }
13695 }
13696 
13697 #endif // #if VMA_STATS_STRING_ENABLED
13698 
13699 /*
13700 This function is not protected by any mutex because it just reads immutable data.
13701 */
13702 VkResult vmaFindMemoryTypeIndex(
13703  VmaAllocator allocator,
13704  uint32_t memoryTypeBits,
13705  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13706  uint32_t* pMemoryTypeIndex)
13707 {
13708  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13709  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13710  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13711 
13712  if(pAllocationCreateInfo->memoryTypeBits != 0)
13713  {
13714  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13715  }
13716 
13717  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13718  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13719 
13720  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13721  if(mapped)
13722  {
13723  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13724  }
13725 
13726  // Convert usage to requiredFlags and preferredFlags.
13727  switch(pAllocationCreateInfo->usage)
13728  {
13730  break;
13732  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13733  {
13734  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13735  }
13736  break;
13738  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13739  break;
13741  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13742  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13743  {
13744  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13745  }
13746  break;
13748  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13749  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13750  break;
13751  default:
13752  break;
13753  }
13754 
13755  *pMemoryTypeIndex = UINT32_MAX;
13756  uint32_t minCost = UINT32_MAX;
13757  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13758  memTypeIndex < allocator->GetMemoryTypeCount();
13759  ++memTypeIndex, memTypeBit <<= 1)
13760  {
13761  // This memory type is acceptable according to memoryTypeBits bitmask.
13762  if((memTypeBit & memoryTypeBits) != 0)
13763  {
13764  const VkMemoryPropertyFlags currFlags =
13765  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13766  // This memory type contains requiredFlags.
13767  if((requiredFlags & ~currFlags) == 0)
13768  {
13769  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13770  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13771  // Remember memory type with lowest cost.
13772  if(currCost < minCost)
13773  {
13774  *pMemoryTypeIndex = memTypeIndex;
13775  if(currCost == 0)
13776  {
13777  return VK_SUCCESS;
13778  }
13779  minCost = currCost;
13780  }
13781  }
13782  }
13783  }
13784  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13785 }
13786 
13788  VmaAllocator allocator,
13789  const VkBufferCreateInfo* pBufferCreateInfo,
13790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13791  uint32_t* pMemoryTypeIndex)
13792 {
13793  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13794  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13795  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13796  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13797 
13798  const VkDevice hDev = allocator->m_hDevice;
13799  VkBuffer hBuffer = VK_NULL_HANDLE;
13800  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13801  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13802  if(res == VK_SUCCESS)
13803  {
13804  VkMemoryRequirements memReq = {};
13805  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13806  hDev, hBuffer, &memReq);
13807 
13808  res = vmaFindMemoryTypeIndex(
13809  allocator,
13810  memReq.memoryTypeBits,
13811  pAllocationCreateInfo,
13812  pMemoryTypeIndex);
13813 
13814  allocator->GetVulkanFunctions().vkDestroyBuffer(
13815  hDev, hBuffer, allocator->GetAllocationCallbacks());
13816  }
13817  return res;
13818 }
13819 
13821  VmaAllocator allocator,
13822  const VkImageCreateInfo* pImageCreateInfo,
13823  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13824  uint32_t* pMemoryTypeIndex)
13825 {
13826  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13827  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13828  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13829  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13830 
13831  const VkDevice hDev = allocator->m_hDevice;
13832  VkImage hImage = VK_NULL_HANDLE;
13833  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13834  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13835  if(res == VK_SUCCESS)
13836  {
13837  VkMemoryRequirements memReq = {};
13838  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13839  hDev, hImage, &memReq);
13840 
13841  res = vmaFindMemoryTypeIndex(
13842  allocator,
13843  memReq.memoryTypeBits,
13844  pAllocationCreateInfo,
13845  pMemoryTypeIndex);
13846 
13847  allocator->GetVulkanFunctions().vkDestroyImage(
13848  hDev, hImage, allocator->GetAllocationCallbacks());
13849  }
13850  return res;
13851 }
13852 
13853 VkResult vmaCreatePool(
13854  VmaAllocator allocator,
13855  const VmaPoolCreateInfo* pCreateInfo,
13856  VmaPool* pPool)
13857 {
13858  VMA_ASSERT(allocator && pCreateInfo && pPool);
13859 
13860  VMA_DEBUG_LOG("vmaCreatePool");
13861 
13862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13863 
13864  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13865 
13866 #if VMA_RECORDING_ENABLED
13867  if(allocator->GetRecorder() != VMA_NULL)
13868  {
13869  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13870  }
13871 #endif
13872 
13873  return res;
13874 }
13875 
13876 void vmaDestroyPool(
13877  VmaAllocator allocator,
13878  VmaPool pool)
13879 {
13880  VMA_ASSERT(allocator);
13881 
13882  if(pool == VK_NULL_HANDLE)
13883  {
13884  return;
13885  }
13886 
13887  VMA_DEBUG_LOG("vmaDestroyPool");
13888 
13889  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13890 
13891 #if VMA_RECORDING_ENABLED
13892  if(allocator->GetRecorder() != VMA_NULL)
13893  {
13894  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13895  }
13896 #endif
13897 
13898  allocator->DestroyPool(pool);
13899 }
13900 
13901 void vmaGetPoolStats(
13902  VmaAllocator allocator,
13903  VmaPool pool,
13904  VmaPoolStats* pPoolStats)
13905 {
13906  VMA_ASSERT(allocator && pool && pPoolStats);
13907 
13908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13909 
13910  allocator->GetPoolStats(pool, pPoolStats);
13911 }
13912 
13914  VmaAllocator allocator,
13915  VmaPool pool,
13916  size_t* pLostAllocationCount)
13917 {
13918  VMA_ASSERT(allocator && pool);
13919 
13920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13921 
13922 #if VMA_RECORDING_ENABLED
13923  if(allocator->GetRecorder() != VMA_NULL)
13924  {
13925  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13926  }
13927 #endif
13928 
13929  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13930 }
13931 
13932 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13933 {
13934  VMA_ASSERT(allocator && pool);
13935 
13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13937 
13938  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13939 
13940  return allocator->CheckPoolCorruption(pool);
13941 }
13942 
13943 VkResult vmaAllocateMemory(
13944  VmaAllocator allocator,
13945  const VkMemoryRequirements* pVkMemoryRequirements,
13946  const VmaAllocationCreateInfo* pCreateInfo,
13947  VmaAllocation* pAllocation,
13948  VmaAllocationInfo* pAllocationInfo)
13949 {
13950  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13951 
13952  VMA_DEBUG_LOG("vmaAllocateMemory");
13953 
13954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13955 
13956  VkResult result = allocator->AllocateMemory(
13957  *pVkMemoryRequirements,
13958  false, // requiresDedicatedAllocation
13959  false, // prefersDedicatedAllocation
13960  VK_NULL_HANDLE, // dedicatedBuffer
13961  VK_NULL_HANDLE, // dedicatedImage
13962  *pCreateInfo,
13963  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13964  pAllocation);
13965 
13966 #if VMA_RECORDING_ENABLED
13967  if(allocator->GetRecorder() != VMA_NULL)
13968  {
13969  allocator->GetRecorder()->RecordAllocateMemory(
13970  allocator->GetCurrentFrameIndex(),
13971  *pVkMemoryRequirements,
13972  *pCreateInfo,
13973  *pAllocation);
13974  }
13975 #endif
13976 
13977  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13978  {
13979  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13980  }
13981 
13982  return result;
13983 }
13984 
13986  VmaAllocator allocator,
13987  VkBuffer buffer,
13988  const VmaAllocationCreateInfo* pCreateInfo,
13989  VmaAllocation* pAllocation,
13990  VmaAllocationInfo* pAllocationInfo)
13991 {
13992  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13993 
13994  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13995 
13996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13997 
13998  VkMemoryRequirements vkMemReq = {};
13999  bool requiresDedicatedAllocation = false;
14000  bool prefersDedicatedAllocation = false;
14001  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14002  requiresDedicatedAllocation,
14003  prefersDedicatedAllocation);
14004 
14005  VkResult result = allocator->AllocateMemory(
14006  vkMemReq,
14007  requiresDedicatedAllocation,
14008  prefersDedicatedAllocation,
14009  buffer, // dedicatedBuffer
14010  VK_NULL_HANDLE, // dedicatedImage
14011  *pCreateInfo,
14012  VMA_SUBALLOCATION_TYPE_BUFFER,
14013  pAllocation);
14014 
14015 #if VMA_RECORDING_ENABLED
14016  if(allocator->GetRecorder() != VMA_NULL)
14017  {
14018  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14019  allocator->GetCurrentFrameIndex(),
14020  vkMemReq,
14021  requiresDedicatedAllocation,
14022  prefersDedicatedAllocation,
14023  *pCreateInfo,
14024  *pAllocation);
14025  }
14026 #endif
14027 
14028  if(pAllocationInfo && result == VK_SUCCESS)
14029  {
14030  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14031  }
14032 
14033  return result;
14034 }
14035 
14036 VkResult vmaAllocateMemoryForImage(
14037  VmaAllocator allocator,
14038  VkImage image,
14039  const VmaAllocationCreateInfo* pCreateInfo,
14040  VmaAllocation* pAllocation,
14041  VmaAllocationInfo* pAllocationInfo)
14042 {
14043  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14044 
14045  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14046 
14047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14048 
14049  VkMemoryRequirements vkMemReq = {};
14050  bool requiresDedicatedAllocation = false;
14051  bool prefersDedicatedAllocation = false;
14052  allocator->GetImageMemoryRequirements(image, vkMemReq,
14053  requiresDedicatedAllocation, prefersDedicatedAllocation);
14054 
14055  VkResult result = allocator->AllocateMemory(
14056  vkMemReq,
14057  requiresDedicatedAllocation,
14058  prefersDedicatedAllocation,
14059  VK_NULL_HANDLE, // dedicatedBuffer
14060  image, // dedicatedImage
14061  *pCreateInfo,
14062  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14063  pAllocation);
14064 
14065 #if VMA_RECORDING_ENABLED
14066  if(allocator->GetRecorder() != VMA_NULL)
14067  {
14068  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14069  allocator->GetCurrentFrameIndex(),
14070  vkMemReq,
14071  requiresDedicatedAllocation,
14072  prefersDedicatedAllocation,
14073  *pCreateInfo,
14074  *pAllocation);
14075  }
14076 #endif
14077 
14078  if(pAllocationInfo && result == VK_SUCCESS)
14079  {
14080  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14081  }
14082 
14083  return result;
14084 }
14085 
14086 void vmaFreeMemory(
14087  VmaAllocator allocator,
14088  VmaAllocation allocation)
14089 {
14090  VMA_ASSERT(allocator);
14091 
14092  if(allocation == VK_NULL_HANDLE)
14093  {
14094  return;
14095  }
14096 
14097  VMA_DEBUG_LOG("vmaFreeMemory");
14098 
14099  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14100 
14101 #if VMA_RECORDING_ENABLED
14102  if(allocator->GetRecorder() != VMA_NULL)
14103  {
14104  allocator->GetRecorder()->RecordFreeMemory(
14105  allocator->GetCurrentFrameIndex(),
14106  allocation);
14107  }
14108 #endif
14109 
14110  allocator->FreeMemory(allocation);
14111 }
14112 
14113 VkResult vmaResizeAllocation(
14114  VmaAllocator allocator,
14115  VmaAllocation allocation,
14116  VkDeviceSize newSize)
14117 {
14118  VMA_ASSERT(allocator && allocation);
14119 
14120  VMA_DEBUG_LOG("vmaResizeAllocation");
14121 
14122  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14123 
14124 #if VMA_RECORDING_ENABLED
14125  if(allocator->GetRecorder() != VMA_NULL)
14126  {
14127  allocator->GetRecorder()->RecordResizeAllocation(
14128  allocator->GetCurrentFrameIndex(),
14129  allocation,
14130  newSize);
14131  }
14132 #endif
14133 
14134  return allocator->ResizeAllocation(allocation, newSize);
14135 }
14136 
14138  VmaAllocator allocator,
14139  VmaAllocation allocation,
14140  VmaAllocationInfo* pAllocationInfo)
14141 {
14142  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14143 
14144  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14145 
14146 #if VMA_RECORDING_ENABLED
14147  if(allocator->GetRecorder() != VMA_NULL)
14148  {
14149  allocator->GetRecorder()->RecordGetAllocationInfo(
14150  allocator->GetCurrentFrameIndex(),
14151  allocation);
14152  }
14153 #endif
14154 
14155  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14156 }
14157 
14158 VkBool32 vmaTouchAllocation(
14159  VmaAllocator allocator,
14160  VmaAllocation allocation)
14161 {
14162  VMA_ASSERT(allocator && allocation);
14163 
14164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14165 
14166 #if VMA_RECORDING_ENABLED
14167  if(allocator->GetRecorder() != VMA_NULL)
14168  {
14169  allocator->GetRecorder()->RecordTouchAllocation(
14170  allocator->GetCurrentFrameIndex(),
14171  allocation);
14172  }
14173 #endif
14174 
14175  return allocator->TouchAllocation(allocation);
14176 }
14177 
14179  VmaAllocator allocator,
14180  VmaAllocation allocation,
14181  void* pUserData)
14182 {
14183  VMA_ASSERT(allocator && allocation);
14184 
14185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14186 
14187  allocation->SetUserData(allocator, pUserData);
14188 
14189 #if VMA_RECORDING_ENABLED
14190  if(allocator->GetRecorder() != VMA_NULL)
14191  {
14192  allocator->GetRecorder()->RecordSetAllocationUserData(
14193  allocator->GetCurrentFrameIndex(),
14194  allocation,
14195  pUserData);
14196  }
14197 #endif
14198 }
14199 
14201  VmaAllocator allocator,
14202  VmaAllocation* pAllocation)
14203 {
14204  VMA_ASSERT(allocator && pAllocation);
14205 
14206  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14207 
14208  allocator->CreateLostAllocation(pAllocation);
14209 
14210 #if VMA_RECORDING_ENABLED
14211  if(allocator->GetRecorder() != VMA_NULL)
14212  {
14213  allocator->GetRecorder()->RecordCreateLostAllocation(
14214  allocator->GetCurrentFrameIndex(),
14215  *pAllocation);
14216  }
14217 #endif
14218 }
14219 
14220 VkResult vmaMapMemory(
14221  VmaAllocator allocator,
14222  VmaAllocation allocation,
14223  void** ppData)
14224 {
14225  VMA_ASSERT(allocator && allocation && ppData);
14226 
14227  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14228 
14229  VkResult res = allocator->Map(allocation, ppData);
14230 
14231 #if VMA_RECORDING_ENABLED
14232  if(allocator->GetRecorder() != VMA_NULL)
14233  {
14234  allocator->GetRecorder()->RecordMapMemory(
14235  allocator->GetCurrentFrameIndex(),
14236  allocation);
14237  }
14238 #endif
14239 
14240  return res;
14241 }
14242 
14243 void vmaUnmapMemory(
14244  VmaAllocator allocator,
14245  VmaAllocation allocation)
14246 {
14247  VMA_ASSERT(allocator && allocation);
14248 
14249  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14250 
14251 #if VMA_RECORDING_ENABLED
14252  if(allocator->GetRecorder() != VMA_NULL)
14253  {
14254  allocator->GetRecorder()->RecordUnmapMemory(
14255  allocator->GetCurrentFrameIndex(),
14256  allocation);
14257  }
14258 #endif
14259 
14260  allocator->Unmap(allocation);
14261 }
14262 
14263 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14264 {
14265  VMA_ASSERT(allocator && allocation);
14266 
14267  VMA_DEBUG_LOG("vmaFlushAllocation");
14268 
14269  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14270 
14271  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14272 
14273 #if VMA_RECORDING_ENABLED
14274  if(allocator->GetRecorder() != VMA_NULL)
14275  {
14276  allocator->GetRecorder()->RecordFlushAllocation(
14277  allocator->GetCurrentFrameIndex(),
14278  allocation, offset, size);
14279  }
14280 #endif
14281 }
14282 
14283 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14284 {
14285  VMA_ASSERT(allocator && allocation);
14286 
14287  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14288 
14289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14290 
14291  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14292 
14293 #if VMA_RECORDING_ENABLED
14294  if(allocator->GetRecorder() != VMA_NULL)
14295  {
14296  allocator->GetRecorder()->RecordInvalidateAllocation(
14297  allocator->GetCurrentFrameIndex(),
14298  allocation, offset, size);
14299  }
14300 #endif
14301 }
14302 
14303 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14304 {
14305  VMA_ASSERT(allocator);
14306 
14307  VMA_DEBUG_LOG("vmaCheckCorruption");
14308 
14309  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14310 
14311  return allocator->CheckCorruption(memoryTypeBits);
14312 }
14313 
14314 VkResult vmaDefragment(
14315  VmaAllocator allocator,
14316  VmaAllocation* pAllocations,
14317  size_t allocationCount,
14318  VkBool32* pAllocationsChanged,
14319  const VmaDefragmentationInfo *pDefragmentationInfo,
14320  VmaDefragmentationStats* pDefragmentationStats)
14321 {
14322  VMA_ASSERT(allocator && pAllocations);
14323 
14324  VMA_DEBUG_LOG("vmaDefragment");
14325 
14326  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14327 
14328  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14329 }
14330 
14331 VkResult vmaBindBufferMemory(
14332  VmaAllocator allocator,
14333  VmaAllocation allocation,
14334  VkBuffer buffer)
14335 {
14336  VMA_ASSERT(allocator && allocation && buffer);
14337 
14338  VMA_DEBUG_LOG("vmaBindBufferMemory");
14339 
14340  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14341 
14342  return allocator->BindBufferMemory(allocation, buffer);
14343 }
14344 
14345 VkResult vmaBindImageMemory(
14346  VmaAllocator allocator,
14347  VmaAllocation allocation,
14348  VkImage image)
14349 {
14350  VMA_ASSERT(allocator && allocation && image);
14351 
14352  VMA_DEBUG_LOG("vmaBindImageMemory");
14353 
14354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14355 
14356  return allocator->BindImageMemory(allocation, image);
14357 }
14358 
14359 VkResult vmaCreateBuffer(
14360  VmaAllocator allocator,
14361  const VkBufferCreateInfo* pBufferCreateInfo,
14362  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14363  VkBuffer* pBuffer,
14364  VmaAllocation* pAllocation,
14365  VmaAllocationInfo* pAllocationInfo)
14366 {
14367  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14368 
14369  if(pBufferCreateInfo->size == 0)
14370  {
14371  return VK_ERROR_VALIDATION_FAILED_EXT;
14372  }
14373 
14374  VMA_DEBUG_LOG("vmaCreateBuffer");
14375 
14376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14377 
14378  *pBuffer = VK_NULL_HANDLE;
14379  *pAllocation = VK_NULL_HANDLE;
14380 
14381  // 1. Create VkBuffer.
14382  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14383  allocator->m_hDevice,
14384  pBufferCreateInfo,
14385  allocator->GetAllocationCallbacks(),
14386  pBuffer);
14387  if(res >= 0)
14388  {
14389  // 2. vkGetBufferMemoryRequirements.
14390  VkMemoryRequirements vkMemReq = {};
14391  bool requiresDedicatedAllocation = false;
14392  bool prefersDedicatedAllocation = false;
14393  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14394  requiresDedicatedAllocation, prefersDedicatedAllocation);
14395 
14396  // Make sure alignment requirements for specific buffer usages reported
14397  // in Physical Device Properties are included in alignment reported by memory requirements.
14398  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14399  {
14400  VMA_ASSERT(vkMemReq.alignment %
14401  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14402  }
14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14404  {
14405  VMA_ASSERT(vkMemReq.alignment %
14406  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14407  }
14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14409  {
14410  VMA_ASSERT(vkMemReq.alignment %
14411  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14412  }
14413 
14414  // 3. Allocate memory using allocator.
14415  res = allocator->AllocateMemory(
14416  vkMemReq,
14417  requiresDedicatedAllocation,
14418  prefersDedicatedAllocation,
14419  *pBuffer, // dedicatedBuffer
14420  VK_NULL_HANDLE, // dedicatedImage
14421  *pAllocationCreateInfo,
14422  VMA_SUBALLOCATION_TYPE_BUFFER,
14423  pAllocation);
14424 
14425 #if VMA_RECORDING_ENABLED
14426  if(allocator->GetRecorder() != VMA_NULL)
14427  {
14428  allocator->GetRecorder()->RecordCreateBuffer(
14429  allocator->GetCurrentFrameIndex(),
14430  *pBufferCreateInfo,
14431  *pAllocationCreateInfo,
14432  *pAllocation);
14433  }
14434 #endif
14435 
14436  if(res >= 0)
14437  {
14438  // 3. Bind buffer with memory.
14439  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14440  if(res >= 0)
14441  {
14442  // All steps succeeded.
14443  #if VMA_STATS_STRING_ENABLED
14444  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14445  #endif
14446  if(pAllocationInfo != VMA_NULL)
14447  {
14448  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14449  }
14450 
14451  return VK_SUCCESS;
14452  }
14453  allocator->FreeMemory(*pAllocation);
14454  *pAllocation = VK_NULL_HANDLE;
14455  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14456  *pBuffer = VK_NULL_HANDLE;
14457  return res;
14458  }
14459  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14460  *pBuffer = VK_NULL_HANDLE;
14461  return res;
14462  }
14463  return res;
14464 }
14465 
14466 void vmaDestroyBuffer(
14467  VmaAllocator allocator,
14468  VkBuffer buffer,
14469  VmaAllocation allocation)
14470 {
14471  VMA_ASSERT(allocator);
14472 
14473  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14474  {
14475  return;
14476  }
14477 
14478  VMA_DEBUG_LOG("vmaDestroyBuffer");
14479 
14480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14481 
14482 #if VMA_RECORDING_ENABLED
14483  if(allocator->GetRecorder() != VMA_NULL)
14484  {
14485  allocator->GetRecorder()->RecordDestroyBuffer(
14486  allocator->GetCurrentFrameIndex(),
14487  allocation);
14488  }
14489 #endif
14490 
14491  if(buffer != VK_NULL_HANDLE)
14492  {
14493  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14494  }
14495 
14496  if(allocation != VK_NULL_HANDLE)
14497  {
14498  allocator->FreeMemory(allocation);
14499  }
14500 }
14501 
14502 VkResult vmaCreateImage(
14503  VmaAllocator allocator,
14504  const VkImageCreateInfo* pImageCreateInfo,
14505  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14506  VkImage* pImage,
14507  VmaAllocation* pAllocation,
14508  VmaAllocationInfo* pAllocationInfo)
14509 {
14510  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14511 
14512  if(pImageCreateInfo->extent.width == 0 ||
14513  pImageCreateInfo->extent.height == 0 ||
14514  pImageCreateInfo->extent.depth == 0 ||
14515  pImageCreateInfo->mipLevels == 0 ||
14516  pImageCreateInfo->arrayLayers == 0)
14517  {
14518  return VK_ERROR_VALIDATION_FAILED_EXT;
14519  }
14520 
14521  VMA_DEBUG_LOG("vmaCreateImage");
14522 
14523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14524 
14525  *pImage = VK_NULL_HANDLE;
14526  *pAllocation = VK_NULL_HANDLE;
14527 
14528  // 1. Create VkImage.
14529  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14530  allocator->m_hDevice,
14531  pImageCreateInfo,
14532  allocator->GetAllocationCallbacks(),
14533  pImage);
14534  if(res >= 0)
14535  {
14536  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14537  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14538  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14539 
14540  // 2. Allocate memory using allocator.
14541  VkMemoryRequirements vkMemReq = {};
14542  bool requiresDedicatedAllocation = false;
14543  bool prefersDedicatedAllocation = false;
14544  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14545  requiresDedicatedAllocation, prefersDedicatedAllocation);
14546 
14547  res = allocator->AllocateMemory(
14548  vkMemReq,
14549  requiresDedicatedAllocation,
14550  prefersDedicatedAllocation,
14551  VK_NULL_HANDLE, // dedicatedBuffer
14552  *pImage, // dedicatedImage
14553  *pAllocationCreateInfo,
14554  suballocType,
14555  pAllocation);
14556 
14557 #if VMA_RECORDING_ENABLED
14558  if(allocator->GetRecorder() != VMA_NULL)
14559  {
14560  allocator->GetRecorder()->RecordCreateImage(
14561  allocator->GetCurrentFrameIndex(),
14562  *pImageCreateInfo,
14563  *pAllocationCreateInfo,
14564  *pAllocation);
14565  }
14566 #endif
14567 
14568  if(res >= 0)
14569  {
14570  // 3. Bind image with memory.
14571  res = allocator->BindImageMemory(*pAllocation, *pImage);
14572  if(res >= 0)
14573  {
14574  // All steps succeeded.
14575  #if VMA_STATS_STRING_ENABLED
14576  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14577  #endif
14578  if(pAllocationInfo != VMA_NULL)
14579  {
14580  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14581  }
14582 
14583  return VK_SUCCESS;
14584  }
14585  allocator->FreeMemory(*pAllocation);
14586  *pAllocation = VK_NULL_HANDLE;
14587  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14588  *pImage = VK_NULL_HANDLE;
14589  return res;
14590  }
14591  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14592  *pImage = VK_NULL_HANDLE;
14593  return res;
14594  }
14595  return res;
14596 }
14597 
14598 void vmaDestroyImage(
14599  VmaAllocator allocator,
14600  VkImage image,
14601  VmaAllocation allocation)
14602 {
14603  VMA_ASSERT(allocator);
14604 
14605  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14606  {
14607  return;
14608  }
14609 
14610  VMA_DEBUG_LOG("vmaDestroyImage");
14611 
14612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14613 
14614 #if VMA_RECORDING_ENABLED
14615  if(allocator->GetRecorder() != VMA_NULL)
14616  {
14617  allocator->GetRecorder()->RecordDestroyImage(
14618  allocator->GetCurrentFrameIndex(),
14619  allocation);
14620  }
14621 #endif
14622 
14623  if(image != VK_NULL_HANDLE)
14624  {
14625  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14626  }
14627  if(allocation != VK_NULL_HANDLE)
14628  {
14629  allocator->FreeMemory(allocation);
14630  }
14631 }
14632 
14633 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1584
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1885
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1641
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1615
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2207
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1596
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1842
Definition: vk_mem_alloc.h:1945
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1588
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2307
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1638
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2577
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2096
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1485
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2188
Definition: vk_mem_alloc.h:1922
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1577
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1995
Definition: vk_mem_alloc.h:1869
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1650
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2124
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1703
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1635
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1873
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1775
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1593
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1774
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2581
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1667
VmaStatInfo total
Definition: vk_mem_alloc.h:1784
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2589
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1979
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2572
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1594
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1519
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1644
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2138
Definition: vk_mem_alloc.h:2132
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1710
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2317
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1589
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1613
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2016
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2158
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2194
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1575
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2141
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1820
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2567
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2585
Definition: vk_mem_alloc.h:1859
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2003
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1592
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1780
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1525
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1546
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1617
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1551
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2587
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1990
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2204
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1585
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1763
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2153
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1538
Definition: vk_mem_alloc.h:2128
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1929
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1776
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1542
Definition: vk_mem_alloc.h:1953
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2144
Definition: vk_mem_alloc.h:1868
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1591
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1985
Definition: vk_mem_alloc.h:1976
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1766
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1587
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2166
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1653
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2197
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1974
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2009
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1691
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1782
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1909
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1775
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1598
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1623
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1540
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1597
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2180
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1590
Definition: vk_mem_alloc.h:1940
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1631
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2331
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1647
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1775
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1772
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2185
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1949
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2312
Definition: vk_mem_alloc.h:1960
Definition: vk_mem_alloc.h:1972
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2583
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1583
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1770
Definition: vk_mem_alloc.h:1825
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2134
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1620
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1768
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1595
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1599
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1896
Definition: vk_mem_alloc.h:1967
Definition: vk_mem_alloc.h:1852
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2326
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1573
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1586
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2113
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2293
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1957
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2078
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1776
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1607
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1783
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2191
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1776
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2298