Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1460 /*
1461 Define this macro to 0/1 to disable/enable support for recording functionality,
1462 available through VmaAllocatorCreateInfo::pRecordSettings.
1463 */
1464 #ifndef VMA_RECORDING_ENABLED
1465  #ifdef _WIN32
1466  #define VMA_RECORDING_ENABLED 1
1467  #else
1468  #define VMA_RECORDING_ENABLED 0
1469  #endif
1470 #endif
1471 
1472 #ifndef NOMINMAX
1473  #define NOMINMAX // For windows.h
1474 #endif
1475 
1476 #include <vulkan/vulkan.h>
1477 
1478 #if VMA_RECORDING_ENABLED
1479  #include <windows.h>
1480 #endif
1481 
1482 #if !defined(VMA_DEDICATED_ALLOCATION)
1483  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1484  #define VMA_DEDICATED_ALLOCATION 1
1485  #else
1486  #define VMA_DEDICATED_ALLOCATION 0
1487  #endif
1488 #endif
1489 
1499 VK_DEFINE_HANDLE(VmaAllocator)
1500 
1501 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1503  VmaAllocator allocator,
1504  uint32_t memoryType,
1505  VkDeviceMemory memory,
1506  VkDeviceSize size);
1508 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1509  VmaAllocator allocator,
1510  uint32_t memoryType,
1511  VkDeviceMemory memory,
1512  VkDeviceSize size);
1513 
1527 
1557 
1560 typedef VkFlags VmaAllocatorCreateFlags;
1561 
1566 typedef struct VmaVulkanFunctions {
1567  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1568  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1569  PFN_vkAllocateMemory vkAllocateMemory;
1570  PFN_vkFreeMemory vkFreeMemory;
1571  PFN_vkMapMemory vkMapMemory;
1572  PFN_vkUnmapMemory vkUnmapMemory;
1573  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1574  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1575  PFN_vkBindBufferMemory vkBindBufferMemory;
1576  PFN_vkBindImageMemory vkBindImageMemory;
1577  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1578  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1579  PFN_vkCreateBuffer vkCreateBuffer;
1580  PFN_vkDestroyBuffer vkDestroyBuffer;
1581  PFN_vkCreateImage vkCreateImage;
1582  PFN_vkDestroyImage vkDestroyImage;
1583 #if VMA_DEDICATED_ALLOCATION
1584  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1585  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1586 #endif
1588 
1590 typedef enum VmaRecordFlagBits {
1597 
1600 typedef VkFlags VmaRecordFlags;
1601 
1603 typedef struct VmaRecordSettings
1604 {
1614  const char* pFilePath;
1616 
1619 {
1623 
1624  VkPhysicalDevice physicalDevice;
1626 
1627  VkDevice device;
1629 
1632 
1633  const VkAllocationCallbacks* pAllocationCallbacks;
1635 
1674  const VkDeviceSize* pHeapSizeLimit;
1695 
1697 VkResult vmaCreateAllocator(
1698  const VmaAllocatorCreateInfo* pCreateInfo,
1699  VmaAllocator* pAllocator);
1700 
1702 void vmaDestroyAllocator(
1703  VmaAllocator allocator);
1704 
1710  VmaAllocator allocator,
1711  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1712 
1718  VmaAllocator allocator,
1719  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1720 
1728  VmaAllocator allocator,
1729  uint32_t memoryTypeIndex,
1730  VkMemoryPropertyFlags* pFlags);
1731 
1741  VmaAllocator allocator,
1742  uint32_t frameIndex);
1743 
1746 typedef struct VmaStatInfo
1747 {
1749  uint32_t blockCount;
1755  VkDeviceSize usedBytes;
1757  VkDeviceSize unusedBytes;
1760 } VmaStatInfo;
1761 
1763 typedef struct VmaStats
1764 {
1765  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1766  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1768 } VmaStats;
1769 
1771 void vmaCalculateStats(
1772  VmaAllocator allocator,
1773  VmaStats* pStats);
1774 
1775 #define VMA_STATS_STRING_ENABLED 1
1776 
1777 #if VMA_STATS_STRING_ENABLED
1778 
1780 
1782 void vmaBuildStatsString(
1783  VmaAllocator allocator,
1784  char** ppStatsString,
1785  VkBool32 detailedMap);
1786 
1787 void vmaFreeStatsString(
1788  VmaAllocator allocator,
1789  char* pStatsString);
1790 
1791 #endif // #if VMA_STATS_STRING_ENABLED
1792 
1801 VK_DEFINE_HANDLE(VmaPool)
1802 
1803 typedef enum VmaMemoryUsage
1804 {
1853 } VmaMemoryUsage;
1854 
1869 
1924 
1937 
1947 
1954 
1958 
1960 {
1973  VkMemoryPropertyFlags requiredFlags;
1978  VkMemoryPropertyFlags preferredFlags;
1986  uint32_t memoryTypeBits;
1999  void* pUserData;
2001 
2018 VkResult vmaFindMemoryTypeIndex(
2019  VmaAllocator allocator,
2020  uint32_t memoryTypeBits,
2021  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2022  uint32_t* pMemoryTypeIndex);
2023 
2037  VmaAllocator allocator,
2038  const VkBufferCreateInfo* pBufferCreateInfo,
2039  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2040  uint32_t* pMemoryTypeIndex);
2041 
2055  VmaAllocator allocator,
2056  const VkImageCreateInfo* pImageCreateInfo,
2057  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2058  uint32_t* pMemoryTypeIndex);
2059 
2080 
2097 
2108 
2114 
2117 typedef VkFlags VmaPoolCreateFlags;
2118 
2121 typedef struct VmaPoolCreateInfo {
2136  VkDeviceSize blockSize;
2165 
2168 typedef struct VmaPoolStats {
2171  VkDeviceSize size;
2174  VkDeviceSize unusedSize;
2187  VkDeviceSize unusedRangeSizeMax;
2190  size_t blockCount;
2191 } VmaPoolStats;
2192 
2199 VkResult vmaCreatePool(
2200  VmaAllocator allocator,
2201  const VmaPoolCreateInfo* pCreateInfo,
2202  VmaPool* pPool);
2203 
2206 void vmaDestroyPool(
2207  VmaAllocator allocator,
2208  VmaPool pool);
2209 
2216 void vmaGetPoolStats(
2217  VmaAllocator allocator,
2218  VmaPool pool,
2219  VmaPoolStats* pPoolStats);
2220 
2228  VmaAllocator allocator,
2229  VmaPool pool,
2230  size_t* pLostAllocationCount);
2231 
2246 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2247 
2272 VK_DEFINE_HANDLE(VmaAllocation)
2273 
2274 
2276 typedef struct VmaAllocationInfo {
2281  uint32_t memoryType;
2290  VkDeviceMemory deviceMemory;
2295  VkDeviceSize offset;
2300  VkDeviceSize size;
2314  void* pUserData;
2316 
2327 VkResult vmaAllocateMemory(
2328  VmaAllocator allocator,
2329  const VkMemoryRequirements* pVkMemoryRequirements,
2330  const VmaAllocationCreateInfo* pCreateInfo,
2331  VmaAllocation* pAllocation,
2332  VmaAllocationInfo* pAllocationInfo);
2333 
2357 VkResult vmaAllocateMemoryPages(
2358  VmaAllocator allocator,
2359  const VkMemoryRequirements* pVkMemoryRequirements,
2360  const VmaAllocationCreateInfo* pCreateInfo,
2361  size_t allocationCount,
2362  VmaAllocation* pAllocations,
2363  VmaAllocationInfo* pAllocationInfo);
2364 
2372  VmaAllocator allocator,
2373  VkBuffer buffer,
2374  const VmaAllocationCreateInfo* pCreateInfo,
2375  VmaAllocation* pAllocation,
2376  VmaAllocationInfo* pAllocationInfo);
2377 
2379 VkResult vmaAllocateMemoryForImage(
2380  VmaAllocator allocator,
2381  VkImage image,
2382  const VmaAllocationCreateInfo* pCreateInfo,
2383  VmaAllocation* pAllocation,
2384  VmaAllocationInfo* pAllocationInfo);
2385 
2390 void vmaFreeMemory(
2391  VmaAllocator allocator,
2392  VmaAllocation allocation);
2393 
2406 void vmaFreeMemoryPages(
2407  VmaAllocator allocator,
2408  size_t allocationCount,
2409  VmaAllocation* pAllocations);
2410 
2428  VmaAllocator allocator,
2429  VmaAllocation allocation,
2430  VmaAllocationInfo* pAllocationInfo);
2431 
2446 VkBool32 vmaTouchAllocation(
2447  VmaAllocator allocator,
2448  VmaAllocation allocation);
2449 
2464  VmaAllocator allocator,
2465  VmaAllocation allocation,
2466  void* pUserData);
2467 
2479  VmaAllocator allocator,
2480  VmaAllocation* pAllocation);
2481 
2516 VkResult vmaMapMemory(
2517  VmaAllocator allocator,
2518  VmaAllocation allocation,
2519  void** ppData);
2520 
2525 void vmaUnmapMemory(
2526  VmaAllocator allocator,
2527  VmaAllocation allocation);
2528 
2541 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2542 
2555 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2556 
2573 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2574 
2576 typedef struct VmaDefragmentationInfo {
2581  VkDeviceSize maxBytesToMove;
2588 
2590 typedef struct VmaDefragmentationStats {
2592  VkDeviceSize bytesMoved;
2594  VkDeviceSize bytesFreed;
2600 
2639 VkResult vmaDefragment(
2640  VmaAllocator allocator,
2641  VmaAllocation* pAllocations,
2642  size_t allocationCount,
2643  VkBool32* pAllocationsChanged,
2644  const VmaDefragmentationInfo *pDefragmentationInfo,
2645  VmaDefragmentationStats* pDefragmentationStats);
2646 
2659 VkResult vmaBindBufferMemory(
2660  VmaAllocator allocator,
2661  VmaAllocation allocation,
2662  VkBuffer buffer);
2663 
2676 VkResult vmaBindImageMemory(
2677  VmaAllocator allocator,
2678  VmaAllocation allocation,
2679  VkImage image);
2680 
2707 VkResult vmaCreateBuffer(
2708  VmaAllocator allocator,
2709  const VkBufferCreateInfo* pBufferCreateInfo,
2710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2711  VkBuffer* pBuffer,
2712  VmaAllocation* pAllocation,
2713  VmaAllocationInfo* pAllocationInfo);
2714 
2726 void vmaDestroyBuffer(
2727  VmaAllocator allocator,
2728  VkBuffer buffer,
2729  VmaAllocation allocation);
2730 
2732 VkResult vmaCreateImage(
2733  VmaAllocator allocator,
2734  const VkImageCreateInfo* pImageCreateInfo,
2735  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2736  VkImage* pImage,
2737  VmaAllocation* pAllocation,
2738  VmaAllocationInfo* pAllocationInfo);
2739 
2751 void vmaDestroyImage(
2752  VmaAllocator allocator,
2753  VkImage image,
2754  VmaAllocation allocation);
2755 
2756 #ifdef __cplusplus
2757 }
2758 #endif
2759 
2760 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2761 
2762 // For Visual Studio IntelliSense.
2763 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2764 #define VMA_IMPLEMENTATION
2765 #endif
2766 
2767 #ifdef VMA_IMPLEMENTATION
2768 #undef VMA_IMPLEMENTATION
2769 
2770 #include <cstdint>
2771 #include <cstdlib>
2772 #include <cstring>
2773 
2774 /*******************************************************************************
2775 CONFIGURATION SECTION
2776 
2777 Define some of these macros before each #include of this header or change them
2778 here if you need other then default behavior depending on your environment.
2779 */
2780 
2781 /*
2782 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2783 internally, like:
2784 
2785  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2786 
2787 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2788 VmaAllocatorCreateInfo::pVulkanFunctions.
2789 */
2790 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2791 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2792 #endif
2793 
2794 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2795 //#define VMA_USE_STL_CONTAINERS 1
2796 
2797 /* Set this macro to 1 to make the library including and using STL containers:
2798 std::pair, std::vector, std::list, std::unordered_map.
2799 
2800 Set it to 0 or undefined to make the library using its own implementation of
2801 the containers.
2802 */
2803 #if VMA_USE_STL_CONTAINERS
2804  #define VMA_USE_STL_VECTOR 1
2805  #define VMA_USE_STL_UNORDERED_MAP 1
2806  #define VMA_USE_STL_LIST 1
2807 #endif
2808 
2809 #if VMA_USE_STL_VECTOR
2810  #include <vector>
2811 #endif
2812 
2813 #if VMA_USE_STL_UNORDERED_MAP
2814  #include <unordered_map>
2815 #endif
2816 
2817 #if VMA_USE_STL_LIST
2818  #include <list>
2819 #endif
2820 
2821 /*
2822 Following headers are used in this CONFIGURATION section only, so feel free to
2823 remove them if not needed.
2824 */
2825 #include <cassert> // for assert
2826 #include <algorithm> // for min, max
2827 #include <mutex> // for std::mutex
2828 #include <atomic> // for std::atomic
2829 
2830 #ifndef VMA_NULL
2831  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2832  #define VMA_NULL nullptr
2833 #endif
2834 
2835 #if defined(__APPLE__) || defined(__ANDROID__)
2836 #include <cstdlib>
2837 void *aligned_alloc(size_t alignment, size_t size)
2838 {
2839  // alignment must be >= sizeof(void*)
2840  if(alignment < sizeof(void*))
2841  {
2842  alignment = sizeof(void*);
2843  }
2844 
2845  void *pointer;
2846  if(posix_memalign(&pointer, alignment, size) == 0)
2847  return pointer;
2848  return VMA_NULL;
2849 }
2850 #endif
2851 
2852 // If your compiler is not compatible with C++11 and definition of
2853 // aligned_alloc() function is missing, uncommeting following line may help:
2854 
2855 //#include <malloc.h>
2856 
2857 // Normal assert to check for programmer's errors, especially in Debug configuration.
2858 #ifndef VMA_ASSERT
2859  #ifdef _DEBUG
2860  #define VMA_ASSERT(expr) assert(expr)
2861  #else
2862  #define VMA_ASSERT(expr)
2863  #endif
2864 #endif
2865 
2866 // Assert that will be called very often, like inside data structures e.g. operator[].
2867 // Making it non-empty can make program slow.
2868 #ifndef VMA_HEAVY_ASSERT
2869  #ifdef _DEBUG
2870  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2871  #else
2872  #define VMA_HEAVY_ASSERT(expr)
2873  #endif
2874 #endif
2875 
2876 #ifndef VMA_ALIGN_OF
2877  #define VMA_ALIGN_OF(type) (__alignof(type))
2878 #endif
2879 
2880 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2881  #if defined(_WIN32)
2882  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2883  #else
2884  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2885  #endif
2886 #endif
2887 
2888 #ifndef VMA_SYSTEM_FREE
2889  #if defined(_WIN32)
2890  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2891  #else
2892  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2893  #endif
2894 #endif
2895 
2896 #ifndef VMA_MIN
2897  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2898 #endif
2899 
2900 #ifndef VMA_MAX
2901  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2902 #endif
2903 
2904 #ifndef VMA_SWAP
2905  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2906 #endif
2907 
2908 #ifndef VMA_SORT
2909  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2910 #endif
2911 
2912 #ifndef VMA_DEBUG_LOG
2913  #define VMA_DEBUG_LOG(format, ...)
2914  /*
2915  #define VMA_DEBUG_LOG(format, ...) do { \
2916  printf(format, __VA_ARGS__); \
2917  printf("\n"); \
2918  } while(false)
2919  */
2920 #endif
2921 
2922 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2923 #if VMA_STATS_STRING_ENABLED
2924  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2925  {
2926  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2927  }
2928  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2929  {
2930  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2931  }
2932  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2933  {
2934  snprintf(outStr, strLen, "%p", ptr);
2935  }
2936 #endif
2937 
2938 #ifndef VMA_MUTEX
2939  class VmaMutex
2940  {
2941  public:
2942  VmaMutex() { }
2943  ~VmaMutex() { }
2944  void Lock() { m_Mutex.lock(); }
2945  void Unlock() { m_Mutex.unlock(); }
2946  private:
2947  std::mutex m_Mutex;
2948  };
2949  #define VMA_MUTEX VmaMutex
2950 #endif
2951 
2952 /*
2953 If providing your own implementation, you need to implement a subset of std::atomic:
2954 
2955 - Constructor(uint32_t desired)
2956 - uint32_t load() const
2957 - void store(uint32_t desired)
2958 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2959 */
2960 #ifndef VMA_ATOMIC_UINT32
2961  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2962 #endif
2963 
2964 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2965 
2969  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2970 #endif
2971 
2972 #ifndef VMA_DEBUG_ALIGNMENT
2973 
2977  #define VMA_DEBUG_ALIGNMENT (1)
2978 #endif
2979 
2980 #ifndef VMA_DEBUG_MARGIN
2981 
2985  #define VMA_DEBUG_MARGIN (0)
2986 #endif
2987 
2988 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2989 
2993  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2994 #endif
2995 
2996 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2997 
3002  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3003 #endif
3004 
3005 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3006 
3010  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3011 #endif
3012 
3013 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3014 
3018  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3019 #endif
3020 
3021 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3022  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3024 #endif
3025 
3026 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3027  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3029 #endif
3030 
3031 #ifndef VMA_CLASS_NO_COPY
3032  #define VMA_CLASS_NO_COPY(className) \
3033  private: \
3034  className(const className&) = delete; \
3035  className& operator=(const className&) = delete;
3036 #endif
3037 
3038 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3039 
3040 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3041 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3042 
3043 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3044 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3045 
3046 /*******************************************************************************
3047 END OF CONFIGURATION
3048 */
3049 
3050 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3051  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3052 
3053 // Returns number of bits set to 1 in (v).
3054 static inline uint32_t VmaCountBitsSet(uint32_t v)
3055 {
3056  uint32_t c = v - ((v >> 1) & 0x55555555);
3057  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3058  c = ((c >> 4) + c) & 0x0F0F0F0F;
3059  c = ((c >> 8) + c) & 0x00FF00FF;
3060  c = ((c >> 16) + c) & 0x0000FFFF;
3061  return c;
3062 }
3063 
3064 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3065 // Use types like uint32_t, uint64_t as T.
3066 template <typename T>
3067 static inline T VmaAlignUp(T val, T align)
3068 {
3069  return (val + align - 1) / align * align;
3070 }
3071 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3072 // Use types like uint32_t, uint64_t as T.
3073 template <typename T>
3074 static inline T VmaAlignDown(T val, T align)
3075 {
3076  return val / align * align;
3077 }
3078 
3079 // Division with mathematical rounding to nearest number.
3080 template <typename T>
3081 static inline T VmaRoundDiv(T x, T y)
3082 {
3083  return (x + (y / (T)2)) / y;
3084 }
3085 
3086 /*
3087 Returns true if given number is a power of two.
3088 T must be unsigned integer number or signed integer but always nonnegative.
3089 For 0 returns true.
3090 */
3091 template <typename T>
3092 inline bool VmaIsPow2(T x)
3093 {
3094  return (x & (x-1)) == 0;
3095 }
3096 
3097 // Returns smallest power of 2 greater or equal to v.
3098 static inline uint32_t VmaNextPow2(uint32_t v)
3099 {
3100  v--;
3101  v |= v >> 1;
3102  v |= v >> 2;
3103  v |= v >> 4;
3104  v |= v >> 8;
3105  v |= v >> 16;
3106  v++;
3107  return v;
3108 }
3109 static inline uint64_t VmaNextPow2(uint64_t v)
3110 {
3111  v--;
3112  v |= v >> 1;
3113  v |= v >> 2;
3114  v |= v >> 4;
3115  v |= v >> 8;
3116  v |= v >> 16;
3117  v |= v >> 32;
3118  v++;
3119  return v;
3120 }
3121 
3122 // Returns largest power of 2 less or equal to v.
3123 static inline uint32_t VmaPrevPow2(uint32_t v)
3124 {
3125  v |= v >> 1;
3126  v |= v >> 2;
3127  v |= v >> 4;
3128  v |= v >> 8;
3129  v |= v >> 16;
3130  v = v ^ (v >> 1);
3131  return v;
3132 }
3133 static inline uint64_t VmaPrevPow2(uint64_t v)
3134 {
3135  v |= v >> 1;
3136  v |= v >> 2;
3137  v |= v >> 4;
3138  v |= v >> 8;
3139  v |= v >> 16;
3140  v |= v >> 32;
3141  v = v ^ (v >> 1);
3142  return v;
3143 }
3144 
3145 static inline bool VmaStrIsEmpty(const char* pStr)
3146 {
3147  return pStr == VMA_NULL || *pStr == '\0';
3148 }
3149 
3150 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3151 {
3152  switch(algorithm)
3153  {
3155  return "Linear";
3157  return "Buddy";
3158  case 0:
3159  return "Default";
3160  default:
3161  VMA_ASSERT(0);
3162  return "";
3163  }
3164 }
3165 
3166 #ifndef VMA_SORT
3167 
3168 template<typename Iterator, typename Compare>
3169 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3170 {
3171  Iterator centerValue = end; --centerValue;
3172  Iterator insertIndex = beg;
3173  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3174  {
3175  if(cmp(*memTypeIndex, *centerValue))
3176  {
3177  if(insertIndex != memTypeIndex)
3178  {
3179  VMA_SWAP(*memTypeIndex, *insertIndex);
3180  }
3181  ++insertIndex;
3182  }
3183  }
3184  if(insertIndex != centerValue)
3185  {
3186  VMA_SWAP(*insertIndex, *centerValue);
3187  }
3188  return insertIndex;
3189 }
3190 
3191 template<typename Iterator, typename Compare>
3192 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3193 {
3194  if(beg < end)
3195  {
3196  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3197  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3198  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3199  }
3200 }
3201 
3202 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3203 
3204 #endif // #ifndef VMA_SORT
3205 
3206 /*
3207 Returns true if two memory blocks occupy overlapping pages.
3208 ResourceA must be in less memory offset than ResourceB.
3209 
3210 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3211 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3212 */
3213 static inline bool VmaBlocksOnSamePage(
3214  VkDeviceSize resourceAOffset,
3215  VkDeviceSize resourceASize,
3216  VkDeviceSize resourceBOffset,
3217  VkDeviceSize pageSize)
3218 {
3219  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3220  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3221  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3222  VkDeviceSize resourceBStart = resourceBOffset;
3223  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3224  return resourceAEndPage == resourceBStartPage;
3225 }
3226 
3227 enum VmaSuballocationType
3228 {
3229  VMA_SUBALLOCATION_TYPE_FREE = 0,
3230  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3231  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3232  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3233  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3234  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3235  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3236 };
3237 
3238 /*
3239 Returns true if given suballocation types could conflict and must respect
3240 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3241 or linear image and another one is optimal image. If type is unknown, behave
3242 conservatively.
3243 */
3244 static inline bool VmaIsBufferImageGranularityConflict(
3245  VmaSuballocationType suballocType1,
3246  VmaSuballocationType suballocType2)
3247 {
3248  if(suballocType1 > suballocType2)
3249  {
3250  VMA_SWAP(suballocType1, suballocType2);
3251  }
3252 
3253  switch(suballocType1)
3254  {
3255  case VMA_SUBALLOCATION_TYPE_FREE:
3256  return false;
3257  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3258  return true;
3259  case VMA_SUBALLOCATION_TYPE_BUFFER:
3260  return
3261  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3262  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3263  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3264  return
3265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3268  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3269  return
3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3271  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3272  return false;
3273  default:
3274  VMA_ASSERT(0);
3275  return true;
3276  }
3277 }
3278 
3279 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3280 {
3281  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3282  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3283  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3284  {
3285  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3286  }
3287 }
3288 
3289 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3290 {
3291  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3292  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3293  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3294  {
3295  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3296  {
3297  return false;
3298  }
3299  }
3300  return true;
3301 }
3302 
3303 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3304 struct VmaMutexLock
3305 {
3306  VMA_CLASS_NO_COPY(VmaMutexLock)
3307 public:
3308  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3309  m_pMutex(useMutex ? &mutex : VMA_NULL)
3310  {
3311  if(m_pMutex)
3312  {
3313  m_pMutex->Lock();
3314  }
3315  }
3316 
3317  ~VmaMutexLock()
3318  {
3319  if(m_pMutex)
3320  {
3321  m_pMutex->Unlock();
3322  }
3323  }
3324 
3325 private:
3326  VMA_MUTEX* m_pMutex;
3327 };
3328 
3329 #if VMA_DEBUG_GLOBAL_MUTEX
3330  static VMA_MUTEX gDebugGlobalMutex;
3331  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3332 #else
3333  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3334 #endif
3335 
3336 // Minimum size of a free suballocation to register it in the free suballocation collection.
3337 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3338 
3339 /*
3340 Performs binary search and returns iterator to first element that is greater or
3341 equal to (key), according to comparison (cmp).
3342 
3343 Cmp should return true if first argument is less than second argument.
3344 
3345 Returned value is the found element, if present in the collection or place where
3346 new element with value (key) should be inserted.
3347 */
3348 template <typename CmpLess, typename IterT, typename KeyT>
3349 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3350 {
3351  size_t down = 0, up = (end - beg);
3352  while(down < up)
3353  {
3354  const size_t mid = (down + up) / 2;
3355  if(cmp(*(beg+mid), key))
3356  {
3357  down = mid + 1;
3358  }
3359  else
3360  {
3361  up = mid;
3362  }
3363  }
3364  return beg + down;
3365 }
3366 
3368 // Memory allocation
3369 
3370 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3371 {
3372  if((pAllocationCallbacks != VMA_NULL) &&
3373  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3374  {
3375  return (*pAllocationCallbacks->pfnAllocation)(
3376  pAllocationCallbacks->pUserData,
3377  size,
3378  alignment,
3379  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3380  }
3381  else
3382  {
3383  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3384  }
3385 }
3386 
3387 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3388 {
3389  if((pAllocationCallbacks != VMA_NULL) &&
3390  (pAllocationCallbacks->pfnFree != VMA_NULL))
3391  {
3392  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3393  }
3394  else
3395  {
3396  VMA_SYSTEM_FREE(ptr);
3397  }
3398 }
3399 
3400 template<typename T>
3401 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3402 {
3403  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3404 }
3405 
3406 template<typename T>
3407 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3408 {
3409  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3410 }
3411 
3412 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3413 
3414 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3415 
3416 template<typename T>
3417 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3418 {
3419  ptr->~T();
3420  VmaFree(pAllocationCallbacks, ptr);
3421 }
3422 
3423 template<typename T>
3424 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3425 {
3426  if(ptr != VMA_NULL)
3427  {
3428  for(size_t i = count; i--; )
3429  {
3430  ptr[i].~T();
3431  }
3432  VmaFree(pAllocationCallbacks, ptr);
3433  }
3434 }
3435 
3436 // STL-compatible allocator.
3437 template<typename T>
3438 class VmaStlAllocator
3439 {
3440 public:
3441  const VkAllocationCallbacks* const m_pCallbacks;
3442  typedef T value_type;
3443 
3444  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3445  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3446 
3447  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3448  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3449 
3450  template<typename U>
3451  bool operator==(const VmaStlAllocator<U>& rhs) const
3452  {
3453  return m_pCallbacks == rhs.m_pCallbacks;
3454  }
3455  template<typename U>
3456  bool operator!=(const VmaStlAllocator<U>& rhs) const
3457  {
3458  return m_pCallbacks != rhs.m_pCallbacks;
3459  }
3460 
3461  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3462 };
3463 
3464 #if VMA_USE_STL_VECTOR
3465 
3466 #define VmaVector std::vector
3467 
3468 template<typename T, typename allocatorT>
3469 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3470 {
3471  vec.insert(vec.begin() + index, item);
3472 }
3473 
3474 template<typename T, typename allocatorT>
3475 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3476 {
3477  vec.erase(vec.begin() + index);
3478 }
3479 
3480 #else // #if VMA_USE_STL_VECTOR
3481 
3482 /* Class with interface compatible with subset of std::vector.
3483 T must be POD because constructors and destructors are not called and memcpy is
3484 used for these objects. */
3485 template<typename T, typename AllocatorT>
3486 class VmaVector
3487 {
3488 public:
3489  typedef T value_type;
3490 
3491  VmaVector(const AllocatorT& allocator) :
3492  m_Allocator(allocator),
3493  m_pArray(VMA_NULL),
3494  m_Count(0),
3495  m_Capacity(0)
3496  {
3497  }
3498 
3499  VmaVector(size_t count, const AllocatorT& allocator) :
3500  m_Allocator(allocator),
3501  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3502  m_Count(count),
3503  m_Capacity(count)
3504  {
3505  }
3506 
3507  VmaVector(const VmaVector<T, AllocatorT>& src) :
3508  m_Allocator(src.m_Allocator),
3509  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3510  m_Count(src.m_Count),
3511  m_Capacity(src.m_Count)
3512  {
3513  if(m_Count != 0)
3514  {
3515  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3516  }
3517  }
3518 
3519  ~VmaVector()
3520  {
3521  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3522  }
3523 
3524  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3525  {
3526  if(&rhs != this)
3527  {
3528  resize(rhs.m_Count);
3529  if(m_Count != 0)
3530  {
3531  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3532  }
3533  }
3534  return *this;
3535  }
3536 
3537  bool empty() const { return m_Count == 0; }
3538  size_t size() const { return m_Count; }
3539  T* data() { return m_pArray; }
3540  const T* data() const { return m_pArray; }
3541 
3542  T& operator[](size_t index)
3543  {
3544  VMA_HEAVY_ASSERT(index < m_Count);
3545  return m_pArray[index];
3546  }
3547  const T& operator[](size_t index) const
3548  {
3549  VMA_HEAVY_ASSERT(index < m_Count);
3550  return m_pArray[index];
3551  }
3552 
3553  T& front()
3554  {
3555  VMA_HEAVY_ASSERT(m_Count > 0);
3556  return m_pArray[0];
3557  }
3558  const T& front() const
3559  {
3560  VMA_HEAVY_ASSERT(m_Count > 0);
3561  return m_pArray[0];
3562  }
3563  T& back()
3564  {
3565  VMA_HEAVY_ASSERT(m_Count > 0);
3566  return m_pArray[m_Count - 1];
3567  }
3568  const T& back() const
3569  {
3570  VMA_HEAVY_ASSERT(m_Count > 0);
3571  return m_pArray[m_Count - 1];
3572  }
3573 
3574  void reserve(size_t newCapacity, bool freeMemory = false)
3575  {
3576  newCapacity = VMA_MAX(newCapacity, m_Count);
3577 
3578  if((newCapacity < m_Capacity) && !freeMemory)
3579  {
3580  newCapacity = m_Capacity;
3581  }
3582 
3583  if(newCapacity != m_Capacity)
3584  {
3585  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3586  if(m_Count != 0)
3587  {
3588  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3589  }
3590  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3591  m_Capacity = newCapacity;
3592  m_pArray = newArray;
3593  }
3594  }
3595 
3596  void resize(size_t newCount, bool freeMemory = false)
3597  {
3598  size_t newCapacity = m_Capacity;
3599  if(newCount > m_Capacity)
3600  {
3601  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3602  }
3603  else if(freeMemory)
3604  {
3605  newCapacity = newCount;
3606  }
3607 
3608  if(newCapacity != m_Capacity)
3609  {
3610  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3611  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3612  if(elementsToCopy != 0)
3613  {
3614  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3615  }
3616  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3617  m_Capacity = newCapacity;
3618  m_pArray = newArray;
3619  }
3620 
3621  m_Count = newCount;
3622  }
3623 
3624  void clear(bool freeMemory = false)
3625  {
3626  resize(0, freeMemory);
3627  }
3628 
3629  void insert(size_t index, const T& src)
3630  {
3631  VMA_HEAVY_ASSERT(index <= m_Count);
3632  const size_t oldCount = size();
3633  resize(oldCount + 1);
3634  if(index < oldCount)
3635  {
3636  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3637  }
3638  m_pArray[index] = src;
3639  }
3640 
3641  void remove(size_t index)
3642  {
3643  VMA_HEAVY_ASSERT(index < m_Count);
3644  const size_t oldCount = size();
3645  if(index < oldCount - 1)
3646  {
3647  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3648  }
3649  resize(oldCount - 1);
3650  }
3651 
3652  void push_back(const T& src)
3653  {
3654  const size_t newIndex = size();
3655  resize(newIndex + 1);
3656  m_pArray[newIndex] = src;
3657  }
3658 
3659  void pop_back()
3660  {
3661  VMA_HEAVY_ASSERT(m_Count > 0);
3662  resize(size() - 1);
3663  }
3664 
3665  void push_front(const T& src)
3666  {
3667  insert(0, src);
3668  }
3669 
3670  void pop_front()
3671  {
3672  VMA_HEAVY_ASSERT(m_Count > 0);
3673  remove(0);
3674  }
3675 
3676  typedef T* iterator;
3677 
3678  iterator begin() { return m_pArray; }
3679  iterator end() { return m_pArray + m_Count; }
3680 
3681 private:
3682  AllocatorT m_Allocator;
3683  T* m_pArray;
3684  size_t m_Count;
3685  size_t m_Capacity;
3686 };
3687 
3688 template<typename T, typename allocatorT>
3689 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3690 {
3691  vec.insert(index, item);
3692 }
3693 
3694 template<typename T, typename allocatorT>
3695 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3696 {
3697  vec.remove(index);
3698 }
3699 
3700 #endif // #if VMA_USE_STL_VECTOR
3701 
3702 template<typename CmpLess, typename VectorT>
3703 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3704 {
3705  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3706  vector.data(),
3707  vector.data() + vector.size(),
3708  value,
3709  CmpLess()) - vector.data();
3710  VmaVectorInsert(vector, indexToInsert, value);
3711  return indexToInsert;
3712 }
3713 
3714 template<typename CmpLess, typename VectorT>
3715 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3716 {
3717  CmpLess comparator;
3718  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3719  vector.begin(),
3720  vector.end(),
3721  value,
3722  comparator);
3723  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3724  {
3725  size_t indexToRemove = it - vector.begin();
3726  VmaVectorRemove(vector, indexToRemove);
3727  return true;
3728  }
3729  return false;
3730 }
3731 
3732 template<typename CmpLess, typename IterT, typename KeyT>
3733 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3734 {
3735  CmpLess comparator;
3736  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3737  beg, end, value, comparator);
3738  if(it == end ||
3739  (!comparator(*it, value) && !comparator(value, *it)))
3740  {
3741  return it;
3742  }
3743  return end;
3744 }
3745 
3747 // class VmaPoolAllocator
3748 
3749 /*
3750 Allocator for objects of type T using a list of arrays (pools) to speed up
3751 allocation. Number of elements that can be allocated is not bounded because
3752 allocator can create multiple blocks.
3753 */
3754 template<typename T>
3755 class VmaPoolAllocator
3756 {
3757  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3758 public:
3759  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3760  ~VmaPoolAllocator();
3761  void Clear();
3762  T* Alloc();
3763  void Free(T* ptr);
3764 
3765 private:
3766  union Item
3767  {
3768  uint32_t NextFreeIndex;
3769  T Value;
3770  };
3771 
3772  struct ItemBlock
3773  {
3774  Item* pItems;
3775  uint32_t FirstFreeIndex;
3776  };
3777 
3778  const VkAllocationCallbacks* m_pAllocationCallbacks;
3779  size_t m_ItemsPerBlock;
3780  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3781 
3782  ItemBlock& CreateNewBlock();
3783 };
3784 
3785 template<typename T>
3786 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3787  m_pAllocationCallbacks(pAllocationCallbacks),
3788  m_ItemsPerBlock(itemsPerBlock),
3789  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3790 {
3791  VMA_ASSERT(itemsPerBlock > 0);
3792 }
3793 
3794 template<typename T>
3795 VmaPoolAllocator<T>::~VmaPoolAllocator()
3796 {
3797  Clear();
3798 }
3799 
3800 template<typename T>
3801 void VmaPoolAllocator<T>::Clear()
3802 {
3803  for(size_t i = m_ItemBlocks.size(); i--; )
3804  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3805  m_ItemBlocks.clear();
3806 }
3807 
3808 template<typename T>
3809 T* VmaPoolAllocator<T>::Alloc()
3810 {
3811  for(size_t i = m_ItemBlocks.size(); i--; )
3812  {
3813  ItemBlock& block = m_ItemBlocks[i];
3814  // This block has some free items: Use first one.
3815  if(block.FirstFreeIndex != UINT32_MAX)
3816  {
3817  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3818  block.FirstFreeIndex = pItem->NextFreeIndex;
3819  return &pItem->Value;
3820  }
3821  }
3822 
3823  // No block has free item: Create new one and use it.
3824  ItemBlock& newBlock = CreateNewBlock();
3825  Item* const pItem = &newBlock.pItems[0];
3826  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3827  return &pItem->Value;
3828 }
3829 
3830 template<typename T>
3831 void VmaPoolAllocator<T>::Free(T* ptr)
3832 {
3833  // Search all memory blocks to find ptr.
3834  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3835  {
3836  ItemBlock& block = m_ItemBlocks[i];
3837 
3838  // Casting to union.
3839  Item* pItemPtr;
3840  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3841 
3842  // Check if pItemPtr is in address range of this block.
3843  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3844  {
3845  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3846  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3847  block.FirstFreeIndex = index;
3848  return;
3849  }
3850  }
3851  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3852 }
3853 
3854 template<typename T>
3855 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3856 {
3857  ItemBlock newBlock = {
3858  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3859 
3860  m_ItemBlocks.push_back(newBlock);
3861 
3862  // Setup singly-linked list of all free items in this block.
3863  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3864  newBlock.pItems[i].NextFreeIndex = i + 1;
3865  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3866  return m_ItemBlocks.back();
3867 }
3868 
3870 // class VmaRawList, VmaList
3871 
3872 #if VMA_USE_STL_LIST
3873 
3874 #define VmaList std::list
3875 
3876 #else // #if VMA_USE_STL_LIST
3877 
3878 template<typename T>
3879 struct VmaListItem
3880 {
3881  VmaListItem* pPrev;
3882  VmaListItem* pNext;
3883  T Value;
3884 };
3885 
3886 // Doubly linked list.
3887 template<typename T>
3888 class VmaRawList
3889 {
3890  VMA_CLASS_NO_COPY(VmaRawList)
3891 public:
3892  typedef VmaListItem<T> ItemType;
3893 
3894  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3895  ~VmaRawList();
3896  void Clear();
3897 
3898  size_t GetCount() const { return m_Count; }
3899  bool IsEmpty() const { return m_Count == 0; }
3900 
3901  ItemType* Front() { return m_pFront; }
3902  const ItemType* Front() const { return m_pFront; }
3903  ItemType* Back() { return m_pBack; }
3904  const ItemType* Back() const { return m_pBack; }
3905 
3906  ItemType* PushBack();
3907  ItemType* PushFront();
3908  ItemType* PushBack(const T& value);
3909  ItemType* PushFront(const T& value);
3910  void PopBack();
3911  void PopFront();
3912 
3913  // Item can be null - it means PushBack.
3914  ItemType* InsertBefore(ItemType* pItem);
3915  // Item can be null - it means PushFront.
3916  ItemType* InsertAfter(ItemType* pItem);
3917 
3918  ItemType* InsertBefore(ItemType* pItem, const T& value);
3919  ItemType* InsertAfter(ItemType* pItem, const T& value);
3920 
3921  void Remove(ItemType* pItem);
3922 
3923 private:
3924  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3925  VmaPoolAllocator<ItemType> m_ItemAllocator;
3926  ItemType* m_pFront;
3927  ItemType* m_pBack;
3928  size_t m_Count;
3929 };
3930 
3931 template<typename T>
3932 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3933  m_pAllocationCallbacks(pAllocationCallbacks),
3934  m_ItemAllocator(pAllocationCallbacks, 128),
3935  m_pFront(VMA_NULL),
3936  m_pBack(VMA_NULL),
3937  m_Count(0)
3938 {
3939 }
3940 
3941 template<typename T>
3942 VmaRawList<T>::~VmaRawList()
3943 {
3944  // Intentionally not calling Clear, because that would be unnecessary
3945  // computations to return all items to m_ItemAllocator as free.
3946 }
3947 
3948 template<typename T>
3949 void VmaRawList<T>::Clear()
3950 {
3951  if(IsEmpty() == false)
3952  {
3953  ItemType* pItem = m_pBack;
3954  while(pItem != VMA_NULL)
3955  {
3956  ItemType* const pPrevItem = pItem->pPrev;
3957  m_ItemAllocator.Free(pItem);
3958  pItem = pPrevItem;
3959  }
3960  m_pFront = VMA_NULL;
3961  m_pBack = VMA_NULL;
3962  m_Count = 0;
3963  }
3964 }
3965 
3966 template<typename T>
3967 VmaListItem<T>* VmaRawList<T>::PushBack()
3968 {
3969  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3970  pNewItem->pNext = VMA_NULL;
3971  if(IsEmpty())
3972  {
3973  pNewItem->pPrev = VMA_NULL;
3974  m_pFront = pNewItem;
3975  m_pBack = pNewItem;
3976  m_Count = 1;
3977  }
3978  else
3979  {
3980  pNewItem->pPrev = m_pBack;
3981  m_pBack->pNext = pNewItem;
3982  m_pBack = pNewItem;
3983  ++m_Count;
3984  }
3985  return pNewItem;
3986 }
3987 
3988 template<typename T>
3989 VmaListItem<T>* VmaRawList<T>::PushFront()
3990 {
3991  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3992  pNewItem->pPrev = VMA_NULL;
3993  if(IsEmpty())
3994  {
3995  pNewItem->pNext = VMA_NULL;
3996  m_pFront = pNewItem;
3997  m_pBack = pNewItem;
3998  m_Count = 1;
3999  }
4000  else
4001  {
4002  pNewItem->pNext = m_pFront;
4003  m_pFront->pPrev = pNewItem;
4004  m_pFront = pNewItem;
4005  ++m_Count;
4006  }
4007  return pNewItem;
4008 }
4009 
4010 template<typename T>
4011 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4012 {
4013  ItemType* const pNewItem = PushBack();
4014  pNewItem->Value = value;
4015  return pNewItem;
4016 }
4017 
4018 template<typename T>
4019 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4020 {
4021  ItemType* const pNewItem = PushFront();
4022  pNewItem->Value = value;
4023  return pNewItem;
4024 }
4025 
4026 template<typename T>
4027 void VmaRawList<T>::PopBack()
4028 {
4029  VMA_HEAVY_ASSERT(m_Count > 0);
4030  ItemType* const pBackItem = m_pBack;
4031  ItemType* const pPrevItem = pBackItem->pPrev;
4032  if(pPrevItem != VMA_NULL)
4033  {
4034  pPrevItem->pNext = VMA_NULL;
4035  }
4036  m_pBack = pPrevItem;
4037  m_ItemAllocator.Free(pBackItem);
4038  --m_Count;
4039 }
4040 
4041 template<typename T>
4042 void VmaRawList<T>::PopFront()
4043 {
4044  VMA_HEAVY_ASSERT(m_Count > 0);
4045  ItemType* const pFrontItem = m_pFront;
4046  ItemType* const pNextItem = pFrontItem->pNext;
4047  if(pNextItem != VMA_NULL)
4048  {
4049  pNextItem->pPrev = VMA_NULL;
4050  }
4051  m_pFront = pNextItem;
4052  m_ItemAllocator.Free(pFrontItem);
4053  --m_Count;
4054 }
4055 
4056 template<typename T>
4057 void VmaRawList<T>::Remove(ItemType* pItem)
4058 {
4059  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4060  VMA_HEAVY_ASSERT(m_Count > 0);
4061 
4062  if(pItem->pPrev != VMA_NULL)
4063  {
4064  pItem->pPrev->pNext = pItem->pNext;
4065  }
4066  else
4067  {
4068  VMA_HEAVY_ASSERT(m_pFront == pItem);
4069  m_pFront = pItem->pNext;
4070  }
4071 
4072  if(pItem->pNext != VMA_NULL)
4073  {
4074  pItem->pNext->pPrev = pItem->pPrev;
4075  }
4076  else
4077  {
4078  VMA_HEAVY_ASSERT(m_pBack == pItem);
4079  m_pBack = pItem->pPrev;
4080  }
4081 
4082  m_ItemAllocator.Free(pItem);
4083  --m_Count;
4084 }
4085 
4086 template<typename T>
4087 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4088 {
4089  if(pItem != VMA_NULL)
4090  {
4091  ItemType* const prevItem = pItem->pPrev;
4092  ItemType* const newItem = m_ItemAllocator.Alloc();
4093  newItem->pPrev = prevItem;
4094  newItem->pNext = pItem;
4095  pItem->pPrev = newItem;
4096  if(prevItem != VMA_NULL)
4097  {
4098  prevItem->pNext = newItem;
4099  }
4100  else
4101  {
4102  VMA_HEAVY_ASSERT(m_pFront == pItem);
4103  m_pFront = newItem;
4104  }
4105  ++m_Count;
4106  return newItem;
4107  }
4108  else
4109  return PushBack();
4110 }
4111 
4112 template<typename T>
4113 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4114 {
4115  if(pItem != VMA_NULL)
4116  {
4117  ItemType* const nextItem = pItem->pNext;
4118  ItemType* const newItem = m_ItemAllocator.Alloc();
4119  newItem->pNext = nextItem;
4120  newItem->pPrev = pItem;
4121  pItem->pNext = newItem;
4122  if(nextItem != VMA_NULL)
4123  {
4124  nextItem->pPrev = newItem;
4125  }
4126  else
4127  {
4128  VMA_HEAVY_ASSERT(m_pBack == pItem);
4129  m_pBack = newItem;
4130  }
4131  ++m_Count;
4132  return newItem;
4133  }
4134  else
4135  return PushFront();
4136 }
4137 
4138 template<typename T>
4139 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4140 {
4141  ItemType* const newItem = InsertBefore(pItem);
4142  newItem->Value = value;
4143  return newItem;
4144 }
4145 
4146 template<typename T>
4147 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4148 {
4149  ItemType* const newItem = InsertAfter(pItem);
4150  newItem->Value = value;
4151  return newItem;
4152 }
4153 
4154 template<typename T, typename AllocatorT>
4155 class VmaList
4156 {
4157  VMA_CLASS_NO_COPY(VmaList)
4158 public:
4159  class iterator
4160  {
4161  public:
4162  iterator() :
4163  m_pList(VMA_NULL),
4164  m_pItem(VMA_NULL)
4165  {
4166  }
4167 
4168  T& operator*() const
4169  {
4170  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4171  return m_pItem->Value;
4172  }
4173  T* operator->() const
4174  {
4175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4176  return &m_pItem->Value;
4177  }
4178 
4179  iterator& operator++()
4180  {
4181  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4182  m_pItem = m_pItem->pNext;
4183  return *this;
4184  }
4185  iterator& operator--()
4186  {
4187  if(m_pItem != VMA_NULL)
4188  {
4189  m_pItem = m_pItem->pPrev;
4190  }
4191  else
4192  {
4193  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4194  m_pItem = m_pList->Back();
4195  }
4196  return *this;
4197  }
4198 
4199  iterator operator++(int)
4200  {
4201  iterator result = *this;
4202  ++*this;
4203  return result;
4204  }
4205  iterator operator--(int)
4206  {
4207  iterator result = *this;
4208  --*this;
4209  return result;
4210  }
4211 
4212  bool operator==(const iterator& rhs) const
4213  {
4214  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4215  return m_pItem == rhs.m_pItem;
4216  }
4217  bool operator!=(const iterator& rhs) const
4218  {
4219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4220  return m_pItem != rhs.m_pItem;
4221  }
4222 
4223  private:
4224  VmaRawList<T>* m_pList;
4225  VmaListItem<T>* m_pItem;
4226 
4227  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4228  m_pList(pList),
4229  m_pItem(pItem)
4230  {
4231  }
4232 
4233  friend class VmaList<T, AllocatorT>;
4234  };
4235 
4236  class const_iterator
4237  {
4238  public:
4239  const_iterator() :
4240  m_pList(VMA_NULL),
4241  m_pItem(VMA_NULL)
4242  {
4243  }
4244 
4245  const_iterator(const iterator& src) :
4246  m_pList(src.m_pList),
4247  m_pItem(src.m_pItem)
4248  {
4249  }
4250 
4251  const T& operator*() const
4252  {
4253  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4254  return m_pItem->Value;
4255  }
4256  const T* operator->() const
4257  {
4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4259  return &m_pItem->Value;
4260  }
4261 
4262  const_iterator& operator++()
4263  {
4264  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4265  m_pItem = m_pItem->pNext;
4266  return *this;
4267  }
4268  const_iterator& operator--()
4269  {
4270  if(m_pItem != VMA_NULL)
4271  {
4272  m_pItem = m_pItem->pPrev;
4273  }
4274  else
4275  {
4276  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4277  m_pItem = m_pList->Back();
4278  }
4279  return *this;
4280  }
4281 
4282  const_iterator operator++(int)
4283  {
4284  const_iterator result = *this;
4285  ++*this;
4286  return result;
4287  }
4288  const_iterator operator--(int)
4289  {
4290  const_iterator result = *this;
4291  --*this;
4292  return result;
4293  }
4294 
4295  bool operator==(const const_iterator& rhs) const
4296  {
4297  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4298  return m_pItem == rhs.m_pItem;
4299  }
4300  bool operator!=(const const_iterator& rhs) const
4301  {
4302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4303  return m_pItem != rhs.m_pItem;
4304  }
4305 
4306  private:
4307  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4308  m_pList(pList),
4309  m_pItem(pItem)
4310  {
4311  }
4312 
4313  const VmaRawList<T>* m_pList;
4314  const VmaListItem<T>* m_pItem;
4315 
4316  friend class VmaList<T, AllocatorT>;
4317  };
4318 
4319  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4320 
4321  bool empty() const { return m_RawList.IsEmpty(); }
4322  size_t size() const { return m_RawList.GetCount(); }
4323 
4324  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4325  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4326 
4327  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4328  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4329 
4330  void clear() { m_RawList.Clear(); }
4331  void push_back(const T& value) { m_RawList.PushBack(value); }
4332  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4333  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4334 
4335 private:
4336  VmaRawList<T> m_RawList;
4337 };
4338 
4339 #endif // #if VMA_USE_STL_LIST
4340 
4342 // class VmaMap
4343 
4344 // Unused in this version.
4345 #if 0
4346 
4347 #if VMA_USE_STL_UNORDERED_MAP
4348 
4349 #define VmaPair std::pair
4350 
4351 #define VMA_MAP_TYPE(KeyT, ValueT) \
4352  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4353 
4354 #else // #if VMA_USE_STL_UNORDERED_MAP
4355 
4356 template<typename T1, typename T2>
4357 struct VmaPair
4358 {
4359  T1 first;
4360  T2 second;
4361 
4362  VmaPair() : first(), second() { }
4363  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4364 };
4365 
4366 /* Class compatible with subset of interface of std::unordered_map.
4367 KeyT, ValueT must be POD because they will be stored in VmaVector.
4368 */
4369 template<typename KeyT, typename ValueT>
4370 class VmaMap
4371 {
4372 public:
4373  typedef VmaPair<KeyT, ValueT> PairType;
4374  typedef PairType* iterator;
4375 
4376  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4377 
4378  iterator begin() { return m_Vector.begin(); }
4379  iterator end() { return m_Vector.end(); }
4380 
4381  void insert(const PairType& pair);
4382  iterator find(const KeyT& key);
4383  void erase(iterator it);
4384 
4385 private:
4386  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4387 };
4388 
4389 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4390 
4391 template<typename FirstT, typename SecondT>
4392 struct VmaPairFirstLess
4393 {
4394  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4395  {
4396  return lhs.first < rhs.first;
4397  }
4398  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4399  {
4400  return lhs.first < rhsFirst;
4401  }
4402 };
4403 
4404 template<typename KeyT, typename ValueT>
4405 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4406 {
4407  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4408  m_Vector.data(),
4409  m_Vector.data() + m_Vector.size(),
4410  pair,
4411  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4412  VmaVectorInsert(m_Vector, indexToInsert, pair);
4413 }
4414 
4415 template<typename KeyT, typename ValueT>
4416 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4417 {
4418  PairType* it = VmaBinaryFindFirstNotLess(
4419  m_Vector.data(),
4420  m_Vector.data() + m_Vector.size(),
4421  key,
4422  VmaPairFirstLess<KeyT, ValueT>());
4423  if((it != m_Vector.end()) && (it->first == key))
4424  {
4425  return it;
4426  }
4427  else
4428  {
4429  return m_Vector.end();
4430  }
4431 }
4432 
4433 template<typename KeyT, typename ValueT>
4434 void VmaMap<KeyT, ValueT>::erase(iterator it)
4435 {
4436  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4437 }
4438 
4439 #endif // #if VMA_USE_STL_UNORDERED_MAP
4440 
4441 #endif // #if 0
4442 
4444 
4445 class VmaDeviceMemoryBlock;
4446 
4447 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4448 
4449 struct VmaAllocation_T
4450 {
4451  VMA_CLASS_NO_COPY(VmaAllocation_T)
4452 private:
4453  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4454 
4455  enum FLAGS
4456  {
4457  FLAG_USER_DATA_STRING = 0x01,
4458  };
4459 
4460 public:
4461  enum ALLOCATION_TYPE
4462  {
4463  ALLOCATION_TYPE_NONE,
4464  ALLOCATION_TYPE_BLOCK,
4465  ALLOCATION_TYPE_DEDICATED,
4466  };
4467 
4468  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4469  m_Alignment(1),
4470  m_Size(0),
4471  m_pUserData(VMA_NULL),
4472  m_LastUseFrameIndex(currentFrameIndex),
4473  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4474  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4475  m_MapCount(0),
4476  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4477  {
4478 #if VMA_STATS_STRING_ENABLED
4479  m_CreationFrameIndex = currentFrameIndex;
4480  m_BufferImageUsage = 0;
4481 #endif
4482  }
4483 
4484  ~VmaAllocation_T()
4485  {
4486  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4487 
4488  // Check if owned string was freed.
4489  VMA_ASSERT(m_pUserData == VMA_NULL);
4490  }
4491 
4492  void InitBlockAllocation(
4493  VmaPool hPool,
4494  VmaDeviceMemoryBlock* block,
4495  VkDeviceSize offset,
4496  VkDeviceSize alignment,
4497  VkDeviceSize size,
4498  VmaSuballocationType suballocationType,
4499  bool mapped,
4500  bool canBecomeLost)
4501  {
4502  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4503  VMA_ASSERT(block != VMA_NULL);
4504  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4505  m_Alignment = alignment;
4506  m_Size = size;
4507  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4508  m_SuballocationType = (uint8_t)suballocationType;
4509  m_BlockAllocation.m_hPool = hPool;
4510  m_BlockAllocation.m_Block = block;
4511  m_BlockAllocation.m_Offset = offset;
4512  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4513  }
4514 
4515  void InitLost()
4516  {
4517  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4518  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4519  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4520  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4521  m_BlockAllocation.m_Block = VMA_NULL;
4522  m_BlockAllocation.m_Offset = 0;
4523  m_BlockAllocation.m_CanBecomeLost = true;
4524  }
4525 
4526  void ChangeBlockAllocation(
4527  VmaAllocator hAllocator,
4528  VmaDeviceMemoryBlock* block,
4529  VkDeviceSize offset);
4530 
4531  // pMappedData not null means allocation is created with MAPPED flag.
4532  void InitDedicatedAllocation(
4533  uint32_t memoryTypeIndex,
4534  VkDeviceMemory hMemory,
4535  VmaSuballocationType suballocationType,
4536  void* pMappedData,
4537  VkDeviceSize size)
4538  {
4539  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4540  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4541  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4542  m_Alignment = 0;
4543  m_Size = size;
4544  m_SuballocationType = (uint8_t)suballocationType;
4545  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4546  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4547  m_DedicatedAllocation.m_hMemory = hMemory;
4548  m_DedicatedAllocation.m_pMappedData = pMappedData;
4549  }
4550 
4551  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4552  VkDeviceSize GetAlignment() const { return m_Alignment; }
4553  VkDeviceSize GetSize() const { return m_Size; }
4554  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4555  void* GetUserData() const { return m_pUserData; }
4556  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4557  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4558 
4559  VmaDeviceMemoryBlock* GetBlock() const
4560  {
4561  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4562  return m_BlockAllocation.m_Block;
4563  }
4564  VkDeviceSize GetOffset() const;
4565  VkDeviceMemory GetMemory() const;
4566  uint32_t GetMemoryTypeIndex() const;
4567  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4568  void* GetMappedData() const;
4569  bool CanBecomeLost() const;
4570  VmaPool GetPool() const;
4571 
4572  uint32_t GetLastUseFrameIndex() const
4573  {
4574  return m_LastUseFrameIndex.load();
4575  }
4576  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4577  {
4578  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4579  }
4580  /*
4581  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4582  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4583  - Else, returns false.
4584 
4585  If hAllocation is already lost, assert - you should not call it then.
4586  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4587  */
4588  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4589 
4590  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4591  {
4592  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4593  outInfo.blockCount = 1;
4594  outInfo.allocationCount = 1;
4595  outInfo.unusedRangeCount = 0;
4596  outInfo.usedBytes = m_Size;
4597  outInfo.unusedBytes = 0;
4598  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4599  outInfo.unusedRangeSizeMin = UINT64_MAX;
4600  outInfo.unusedRangeSizeMax = 0;
4601  }
4602 
4603  void BlockAllocMap();
4604  void BlockAllocUnmap();
4605  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4606  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4607 
4608 #if VMA_STATS_STRING_ENABLED
4609  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4610  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4611 
4612  void InitBufferImageUsage(uint32_t bufferImageUsage)
4613  {
4614  VMA_ASSERT(m_BufferImageUsage == 0);
4615  m_BufferImageUsage = bufferImageUsage;
4616  }
4617 
4618  void PrintParameters(class VmaJsonWriter& json) const;
4619 #endif
4620 
4621 private:
4622  VkDeviceSize m_Alignment;
4623  VkDeviceSize m_Size;
4624  void* m_pUserData;
4625  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4626  uint8_t m_Type; // ALLOCATION_TYPE
4627  uint8_t m_SuballocationType; // VmaSuballocationType
4628  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4629  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4630  uint8_t m_MapCount;
4631  uint8_t m_Flags; // enum FLAGS
4632 
4633  // Allocation out of VmaDeviceMemoryBlock.
4634  struct BlockAllocation
4635  {
4636  VmaPool m_hPool; // Null if belongs to general memory.
4637  VmaDeviceMemoryBlock* m_Block;
4638  VkDeviceSize m_Offset;
4639  bool m_CanBecomeLost;
4640  };
4641 
4642  // Allocation for an object that has its own private VkDeviceMemory.
4643  struct DedicatedAllocation
4644  {
4645  uint32_t m_MemoryTypeIndex;
4646  VkDeviceMemory m_hMemory;
4647  void* m_pMappedData; // Not null means memory is mapped.
4648  };
4649 
4650  union
4651  {
4652  // Allocation out of VmaDeviceMemoryBlock.
4653  BlockAllocation m_BlockAllocation;
4654  // Allocation for an object that has its own private VkDeviceMemory.
4655  DedicatedAllocation m_DedicatedAllocation;
4656  };
4657 
4658 #if VMA_STATS_STRING_ENABLED
4659  uint32_t m_CreationFrameIndex;
4660  uint32_t m_BufferImageUsage; // 0 if unknown.
4661 #endif
4662 
4663  void FreeUserDataString(VmaAllocator hAllocator);
4664 };
4665 
4666 /*
4667 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4668 allocated memory block or free.
4669 */
4670 struct VmaSuballocation
4671 {
4672  VkDeviceSize offset;
4673  VkDeviceSize size;
4674  VmaAllocation hAllocation;
4675  VmaSuballocationType type;
4676 };
4677 
4678 // Comparator for offsets.
4679 struct VmaSuballocationOffsetLess
4680 {
4681  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4682  {
4683  return lhs.offset < rhs.offset;
4684  }
4685 };
4686 struct VmaSuballocationOffsetGreater
4687 {
4688  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4689  {
4690  return lhs.offset > rhs.offset;
4691  }
4692 };
4693 
4694 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4695 
4696 // Cost of one additional allocation lost, as equivalent in bytes.
4697 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4698 
4699 /*
4700 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4701 
4702 If canMakeOtherLost was false:
4703 - item points to a FREE suballocation.
4704 - itemsToMakeLostCount is 0.
4705 
4706 If canMakeOtherLost was true:
4707 - item points to first of sequence of suballocations, which are either FREE,
4708  or point to VmaAllocations that can become lost.
4709 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4710  the requested allocation to succeed.
4711 */
4712 struct VmaAllocationRequest
4713 {
4714  VkDeviceSize offset;
4715  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4716  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4717  VmaSuballocationList::iterator item;
4718  size_t itemsToMakeLostCount;
4719  void* customData;
4720 
4721  VkDeviceSize CalcCost() const
4722  {
4723  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4724  }
4725 };
4726 
4727 /*
4728 Data structure used for bookkeeping of allocations and unused ranges of memory
4729 in a single VkDeviceMemory block.
4730 */
4731 class VmaBlockMetadata
4732 {
4733 public:
4734  VmaBlockMetadata(VmaAllocator hAllocator);
4735  virtual ~VmaBlockMetadata() { }
4736  virtual void Init(VkDeviceSize size) { m_Size = size; }
4737 
4738  // Validates all data structures inside this object. If not valid, returns false.
4739  virtual bool Validate() const = 0;
4740  VkDeviceSize GetSize() const { return m_Size; }
4741  virtual size_t GetAllocationCount() const = 0;
4742  virtual VkDeviceSize GetSumFreeSize() const = 0;
4743  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4744  // Returns true if this block is empty - contains only single free suballocation.
4745  virtual bool IsEmpty() const = 0;
4746 
4747  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4748  // Shouldn't modify blockCount.
4749  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4750 
4751 #if VMA_STATS_STRING_ENABLED
4752  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4753 #endif
4754 
4755  // Tries to find a place for suballocation with given parameters inside this block.
4756  // If succeeded, fills pAllocationRequest and returns true.
4757  // If failed, returns false.
4758  virtual bool CreateAllocationRequest(
4759  uint32_t currentFrameIndex,
4760  uint32_t frameInUseCount,
4761  VkDeviceSize bufferImageGranularity,
4762  VkDeviceSize allocSize,
4763  VkDeviceSize allocAlignment,
4764  bool upperAddress,
4765  VmaSuballocationType allocType,
4766  bool canMakeOtherLost,
4767  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4768  VmaAllocationRequest* pAllocationRequest) = 0;
4769 
4770  virtual bool MakeRequestedAllocationsLost(
4771  uint32_t currentFrameIndex,
4772  uint32_t frameInUseCount,
4773  VmaAllocationRequest* pAllocationRequest) = 0;
4774 
4775  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4776 
4777  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4778 
4779  // Makes actual allocation based on request. Request must already be checked and valid.
4780  virtual void Alloc(
4781  const VmaAllocationRequest& request,
4782  VmaSuballocationType type,
4783  VkDeviceSize allocSize,
4784  bool upperAddress,
4785  VmaAllocation hAllocation) = 0;
4786 
4787  // Frees suballocation assigned to given memory region.
4788  virtual void Free(const VmaAllocation allocation) = 0;
4789  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4790 
4791 protected:
4792  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4793 
4794 #if VMA_STATS_STRING_ENABLED
4795  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4796  VkDeviceSize unusedBytes,
4797  size_t allocationCount,
4798  size_t unusedRangeCount) const;
4799  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4800  VkDeviceSize offset,
4801  VmaAllocation hAllocation) const;
4802  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4803  VkDeviceSize offset,
4804  VkDeviceSize size) const;
4805  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4806 #endif
4807 
4808 private:
4809  VkDeviceSize m_Size;
4810  const VkAllocationCallbacks* m_pAllocationCallbacks;
4811 };
4812 
4813 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4814  VMA_ASSERT(0 && "Validation failed: " #cond); \
4815  return false; \
4816  } } while(false)
4817 
4818 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4819 {
4820  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4821 public:
4822  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4823  virtual ~VmaBlockMetadata_Generic();
4824  virtual void Init(VkDeviceSize size);
4825 
4826  virtual bool Validate() const;
4827  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4828  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4829  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4830  virtual bool IsEmpty() const;
4831 
4832  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4833  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4834 
4835 #if VMA_STATS_STRING_ENABLED
4836  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4837 #endif
4838 
4839  virtual bool CreateAllocationRequest(
4840  uint32_t currentFrameIndex,
4841  uint32_t frameInUseCount,
4842  VkDeviceSize bufferImageGranularity,
4843  VkDeviceSize allocSize,
4844  VkDeviceSize allocAlignment,
4845  bool upperAddress,
4846  VmaSuballocationType allocType,
4847  bool canMakeOtherLost,
4848  uint32_t strategy,
4849  VmaAllocationRequest* pAllocationRequest);
4850 
4851  virtual bool MakeRequestedAllocationsLost(
4852  uint32_t currentFrameIndex,
4853  uint32_t frameInUseCount,
4854  VmaAllocationRequest* pAllocationRequest);
4855 
4856  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4857 
4858  virtual VkResult CheckCorruption(const void* pBlockData);
4859 
4860  virtual void Alloc(
4861  const VmaAllocationRequest& request,
4862  VmaSuballocationType type,
4863  VkDeviceSize allocSize,
4864  bool upperAddress,
4865  VmaAllocation hAllocation);
4866 
4867  virtual void Free(const VmaAllocation allocation);
4868  virtual void FreeAtOffset(VkDeviceSize offset);
4869 
4870 private:
4871  uint32_t m_FreeCount;
4872  VkDeviceSize m_SumFreeSize;
4873  VmaSuballocationList m_Suballocations;
4874  // Suballocations that are free and have size greater than certain threshold.
4875  // Sorted by size, ascending.
4876  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4877 
4878  bool ValidateFreeSuballocationList() const;
4879 
4880  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4881  // If yes, fills pOffset and returns true. If no, returns false.
4882  bool CheckAllocation(
4883  uint32_t currentFrameIndex,
4884  uint32_t frameInUseCount,
4885  VkDeviceSize bufferImageGranularity,
4886  VkDeviceSize allocSize,
4887  VkDeviceSize allocAlignment,
4888  VmaSuballocationType allocType,
4889  VmaSuballocationList::const_iterator suballocItem,
4890  bool canMakeOtherLost,
4891  VkDeviceSize* pOffset,
4892  size_t* itemsToMakeLostCount,
4893  VkDeviceSize* pSumFreeSize,
4894  VkDeviceSize* pSumItemSize) const;
4895  // Given free suballocation, it merges it with following one, which must also be free.
4896  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4897  // Releases given suballocation, making it free.
4898  // Merges it with adjacent free suballocations if applicable.
4899  // Returns iterator to new free suballocation at this place.
4900  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4901  // Given free suballocation, it inserts it into sorted list of
4902  // m_FreeSuballocationsBySize if it's suitable.
4903  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4904  // Given free suballocation, it removes it from sorted list of
4905  // m_FreeSuballocationsBySize if it's suitable.
4906  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4907 };
4908 
4909 /*
4910 Allocations and their references in internal data structure look like this:
4911 
4912 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4913 
4914  0 +-------+
4915  | |
4916  | |
4917  | |
4918  +-------+
4919  | Alloc | 1st[m_1stNullItemsBeginCount]
4920  +-------+
4921  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4922  +-------+
4923  | ... |
4924  +-------+
4925  | Alloc | 1st[1st.size() - 1]
4926  +-------+
4927  | |
4928  | |
4929  | |
4930 GetSize() +-------+
4931 
4932 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4933 
4934  0 +-------+
4935  | Alloc | 2nd[0]
4936  +-------+
4937  | Alloc | 2nd[1]
4938  +-------+
4939  | ... |
4940  +-------+
4941  | Alloc | 2nd[2nd.size() - 1]
4942  +-------+
4943  | |
4944  | |
4945  | |
4946  +-------+
4947  | Alloc | 1st[m_1stNullItemsBeginCount]
4948  +-------+
4949  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4950  +-------+
4951  | ... |
4952  +-------+
4953  | Alloc | 1st[1st.size() - 1]
4954  +-------+
4955  | |
4956 GetSize() +-------+
4957 
4958 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4959 
4960  0 +-------+
4961  | |
4962  | |
4963  | |
4964  +-------+
4965  | Alloc | 1st[m_1stNullItemsBeginCount]
4966  +-------+
4967  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4968  +-------+
4969  | ... |
4970  +-------+
4971  | Alloc | 1st[1st.size() - 1]
4972  +-------+
4973  | |
4974  | |
4975  | |
4976  +-------+
4977  | Alloc | 2nd[2nd.size() - 1]
4978  +-------+
4979  | ... |
4980  +-------+
4981  | Alloc | 2nd[1]
4982  +-------+
4983  | Alloc | 2nd[0]
4984 GetSize() +-------+
4985 
4986 */
4987 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4988 {
4989  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4990 public:
4991  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4992  virtual ~VmaBlockMetadata_Linear();
4993  virtual void Init(VkDeviceSize size);
4994 
4995  virtual bool Validate() const;
4996  virtual size_t GetAllocationCount() const;
4997  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4998  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4999  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5000 
5001  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5002  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5003 
5004 #if VMA_STATS_STRING_ENABLED
5005  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5006 #endif
5007 
5008  virtual bool CreateAllocationRequest(
5009  uint32_t currentFrameIndex,
5010  uint32_t frameInUseCount,
5011  VkDeviceSize bufferImageGranularity,
5012  VkDeviceSize allocSize,
5013  VkDeviceSize allocAlignment,
5014  bool upperAddress,
5015  VmaSuballocationType allocType,
5016  bool canMakeOtherLost,
5017  uint32_t strategy,
5018  VmaAllocationRequest* pAllocationRequest);
5019 
5020  virtual bool MakeRequestedAllocationsLost(
5021  uint32_t currentFrameIndex,
5022  uint32_t frameInUseCount,
5023  VmaAllocationRequest* pAllocationRequest);
5024 
5025  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5026 
5027  virtual VkResult CheckCorruption(const void* pBlockData);
5028 
5029  virtual void Alloc(
5030  const VmaAllocationRequest& request,
5031  VmaSuballocationType type,
5032  VkDeviceSize allocSize,
5033  bool upperAddress,
5034  VmaAllocation hAllocation);
5035 
5036  virtual void Free(const VmaAllocation allocation);
5037  virtual void FreeAtOffset(VkDeviceSize offset);
5038 
5039 private:
5040  /*
5041  There are two suballocation vectors, used in ping-pong way.
5042  The one with index m_1stVectorIndex is called 1st.
5043  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5044  2nd can be non-empty only when 1st is not empty.
5045  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5046  */
5047  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5048 
5049  enum SECOND_VECTOR_MODE
5050  {
5051  SECOND_VECTOR_EMPTY,
5052  /*
5053  Suballocations in 2nd vector are created later than the ones in 1st, but they
5054  all have smaller offset.
5055  */
5056  SECOND_VECTOR_RING_BUFFER,
5057  /*
5058  Suballocations in 2nd vector are upper side of double stack.
5059  They all have offsets higher than those in 1st vector.
5060  Top of this stack means smaller offsets, but higher indices in this vector.
5061  */
5062  SECOND_VECTOR_DOUBLE_STACK,
5063  };
5064 
5065  VkDeviceSize m_SumFreeSize;
5066  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5067  uint32_t m_1stVectorIndex;
5068  SECOND_VECTOR_MODE m_2ndVectorMode;
5069 
5070  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5071  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5072  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5073  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5074 
5075  // Number of items in 1st vector with hAllocation = null at the beginning.
5076  size_t m_1stNullItemsBeginCount;
5077  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5078  size_t m_1stNullItemsMiddleCount;
5079  // Number of items in 2nd vector with hAllocation = null.
5080  size_t m_2ndNullItemsCount;
5081 
5082  bool ShouldCompact1st() const;
5083  void CleanupAfterFree();
5084 };
5085 
5086 /*
5087 - GetSize() is the original size of allocated memory block.
5088 - m_UsableSize is this size aligned down to a power of two.
5089  All allocations and calculations happen relative to m_UsableSize.
5090 - GetUnusableSize() is the difference between them.
5091  It is repoted as separate, unused range, not available for allocations.
5092 
5093 Node at level 0 has size = m_UsableSize.
5094 Each next level contains nodes with size 2 times smaller than current level.
5095 m_LevelCount is the maximum number of levels to use in the current object.
5096 */
5097 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5098 {
5099  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5100 public:
5101  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5102  virtual ~VmaBlockMetadata_Buddy();
5103  virtual void Init(VkDeviceSize size);
5104 
5105  virtual bool Validate() const;
5106  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5107  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5108  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5109  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5110 
5111  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5112  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5113 
5114 #if VMA_STATS_STRING_ENABLED
5115  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5116 #endif
5117 
5118  virtual bool CreateAllocationRequest(
5119  uint32_t currentFrameIndex,
5120  uint32_t frameInUseCount,
5121  VkDeviceSize bufferImageGranularity,
5122  VkDeviceSize allocSize,
5123  VkDeviceSize allocAlignment,
5124  bool upperAddress,
5125  VmaSuballocationType allocType,
5126  bool canMakeOtherLost,
5127  uint32_t strategy,
5128  VmaAllocationRequest* pAllocationRequest);
5129 
5130  virtual bool MakeRequestedAllocationsLost(
5131  uint32_t currentFrameIndex,
5132  uint32_t frameInUseCount,
5133  VmaAllocationRequest* pAllocationRequest);
5134 
5135  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5136 
5137  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5138 
5139  virtual void Alloc(
5140  const VmaAllocationRequest& request,
5141  VmaSuballocationType type,
5142  VkDeviceSize allocSize,
5143  bool upperAddress,
5144  VmaAllocation hAllocation);
5145 
5146  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5147  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5148 
5149 private:
5150  static const VkDeviceSize MIN_NODE_SIZE = 32;
5151  static const size_t MAX_LEVELS = 30;
5152 
5153  struct ValidationContext
5154  {
5155  size_t calculatedAllocationCount;
5156  size_t calculatedFreeCount;
5157  VkDeviceSize calculatedSumFreeSize;
5158 
5159  ValidationContext() :
5160  calculatedAllocationCount(0),
5161  calculatedFreeCount(0),
5162  calculatedSumFreeSize(0) { }
5163  };
5164 
5165  struct Node
5166  {
5167  VkDeviceSize offset;
5168  enum TYPE
5169  {
5170  TYPE_FREE,
5171  TYPE_ALLOCATION,
5172  TYPE_SPLIT,
5173  TYPE_COUNT
5174  } type;
5175  Node* parent;
5176  Node* buddy;
5177 
5178  union
5179  {
5180  struct
5181  {
5182  Node* prev;
5183  Node* next;
5184  } free;
5185  struct
5186  {
5187  VmaAllocation alloc;
5188  } allocation;
5189  struct
5190  {
5191  Node* leftChild;
5192  } split;
5193  };
5194  };
5195 
5196  // Size of the memory block aligned down to a power of two.
5197  VkDeviceSize m_UsableSize;
5198  uint32_t m_LevelCount;
5199 
5200  Node* m_Root;
5201  struct {
5202  Node* front;
5203  Node* back;
5204  } m_FreeList[MAX_LEVELS];
5205  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5206  size_t m_AllocationCount;
5207  // Number of nodes in the tree with type == TYPE_FREE.
5208  size_t m_FreeCount;
5209  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5210  VkDeviceSize m_SumFreeSize;
5211 
5212  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5213  void DeleteNode(Node* node);
5214  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5215  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5216  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5217  // Alloc passed just for validation. Can be null.
5218  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5219  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5220  // Adds node to the front of FreeList at given level.
5221  // node->type must be FREE.
5222  // node->free.prev, next can be undefined.
5223  void AddToFreeListFront(uint32_t level, Node* node);
5224  // Removes node from FreeList at given level.
5225  // node->type must be FREE.
5226  // node->free.prev, next stay untouched.
5227  void RemoveFromFreeList(uint32_t level, Node* node);
5228 
5229 #if VMA_STATS_STRING_ENABLED
5230  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5231 #endif
5232 };
5233 
5234 /*
5235 Represents a single block of device memory (`VkDeviceMemory`) with all the
5236 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5237 
5238 Thread-safety: This class must be externally synchronized.
5239 */
5240 class VmaDeviceMemoryBlock
5241 {
5242  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5243 public:
5244  VmaBlockMetadata* m_pMetadata;
5245 
5246  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5247 
5248  ~VmaDeviceMemoryBlock()
5249  {
5250  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5251  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5252  }
5253 
5254  // Always call after construction.
5255  void Init(
5256  VmaAllocator hAllocator,
5257  uint32_t newMemoryTypeIndex,
5258  VkDeviceMemory newMemory,
5259  VkDeviceSize newSize,
5260  uint32_t id,
5261  uint32_t algorithm);
5262  // Always call before destruction.
5263  void Destroy(VmaAllocator allocator);
5264 
5265  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5266  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5267  uint32_t GetId() const { return m_Id; }
5268  void* GetMappedData() const { return m_pMappedData; }
5269 
5270  // Validates all data structures inside this object. If not valid, returns false.
5271  bool Validate() const;
5272 
5273  VkResult CheckCorruption(VmaAllocator hAllocator);
5274 
5275  // ppData can be null.
5276  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5277  void Unmap(VmaAllocator hAllocator, uint32_t count);
5278 
5279  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5280  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5281 
5282  VkResult BindBufferMemory(
5283  const VmaAllocator hAllocator,
5284  const VmaAllocation hAllocation,
5285  VkBuffer hBuffer);
5286  VkResult BindImageMemory(
5287  const VmaAllocator hAllocator,
5288  const VmaAllocation hAllocation,
5289  VkImage hImage);
5290 
5291 private:
5292  uint32_t m_MemoryTypeIndex;
5293  uint32_t m_Id;
5294  VkDeviceMemory m_hMemory;
5295 
5296  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5297  // Also protects m_MapCount, m_pMappedData.
5298  VMA_MUTEX m_Mutex;
5299  uint32_t m_MapCount;
5300  void* m_pMappedData;
5301 };
5302 
5303 struct VmaPointerLess
5304 {
5305  bool operator()(const void* lhs, const void* rhs) const
5306  {
5307  return lhs < rhs;
5308  }
5309 };
5310 
5311 class VmaDefragmentator;
5312 
5313 /*
5314 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5315 Vulkan memory type.
5316 
5317 Synchronized internally with a mutex.
5318 */
5319 struct VmaBlockVector
5320 {
5321  VMA_CLASS_NO_COPY(VmaBlockVector)
5322 public:
5323  VmaBlockVector(
5324  VmaAllocator hAllocator,
5325  uint32_t memoryTypeIndex,
5326  VkDeviceSize preferredBlockSize,
5327  size_t minBlockCount,
5328  size_t maxBlockCount,
5329  VkDeviceSize bufferImageGranularity,
5330  uint32_t frameInUseCount,
5331  bool isCustomPool,
5332  bool explicitBlockSize,
5333  uint32_t algorithm);
5334  ~VmaBlockVector();
5335 
5336  VkResult CreateMinBlocks();
5337 
5338  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5339  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5340  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5341  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5342  uint32_t GetAlgorithm() const { return m_Algorithm; }
5343 
5344  void GetPoolStats(VmaPoolStats* pStats);
5345 
5346  bool IsEmpty() const { return m_Blocks.empty(); }
5347  bool IsCorruptionDetectionEnabled() const;
5348 
5349  VkResult Allocate(
5350  VmaPool hCurrentPool,
5351  uint32_t currentFrameIndex,
5352  VkDeviceSize size,
5353  VkDeviceSize alignment,
5354  const VmaAllocationCreateInfo& createInfo,
5355  VmaSuballocationType suballocType,
5356  size_t allocationCount,
5357  VmaAllocation* pAllocations);
5358 
5359  void Free(
5360  VmaAllocation hAllocation);
5361 
5362  // Adds statistics of this BlockVector to pStats.
5363  void AddStats(VmaStats* pStats);
5364 
5365 #if VMA_STATS_STRING_ENABLED
5366  void PrintDetailedMap(class VmaJsonWriter& json);
5367 #endif
5368 
5369  void MakePoolAllocationsLost(
5370  uint32_t currentFrameIndex,
5371  size_t* pLostAllocationCount);
5372  VkResult CheckCorruption();
5373 
5374  VmaDefragmentator* EnsureDefragmentator(
5375  VmaAllocator hAllocator,
5376  uint32_t currentFrameIndex);
5377 
5378  VkResult Defragment(
5379  VmaDefragmentationStats* pDefragmentationStats,
5380  VkDeviceSize& maxBytesToMove,
5381  uint32_t& maxAllocationsToMove);
5382 
5383  void DestroyDefragmentator();
5384 
5385 private:
5386  friend class VmaDefragmentator;
5387 
5388  const VmaAllocator m_hAllocator;
5389  const uint32_t m_MemoryTypeIndex;
5390  const VkDeviceSize m_PreferredBlockSize;
5391  const size_t m_MinBlockCount;
5392  const size_t m_MaxBlockCount;
5393  const VkDeviceSize m_BufferImageGranularity;
5394  const uint32_t m_FrameInUseCount;
5395  const bool m_IsCustomPool;
5396  const bool m_ExplicitBlockSize;
5397  const uint32_t m_Algorithm;
5398  bool m_HasEmptyBlock;
5399  VMA_MUTEX m_Mutex;
5400  // Incrementally sorted by sumFreeSize, ascending.
5401  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5402  /* There can be at most one allocation that is completely empty - a
5403  hysteresis to avoid pessimistic case of alternating creation and destruction
5404  of a VkDeviceMemory. */
5405  VmaDefragmentator* m_pDefragmentator;
5406  uint32_t m_NextBlockId;
5407 
5408  VkDeviceSize CalcMaxBlockSize() const;
5409 
5410  // Finds and removes given block from vector.
5411  void Remove(VmaDeviceMemoryBlock* pBlock);
5412 
5413  // Performs single step in sorting m_Blocks. They may not be fully sorted
5414  // after this call.
5415  void IncrementallySortBlocks();
5416 
5417  VkResult AllocatePage(
5418  VmaPool hCurrentPool,
5419  uint32_t currentFrameIndex,
5420  VkDeviceSize size,
5421  VkDeviceSize alignment,
5422  const VmaAllocationCreateInfo& createInfo,
5423  VmaSuballocationType suballocType,
5424  VmaAllocation* pAllocation);
5425 
5426  // To be used only without CAN_MAKE_OTHER_LOST flag.
5427  VkResult AllocateFromBlock(
5428  VmaDeviceMemoryBlock* pBlock,
5429  VmaPool hCurrentPool,
5430  uint32_t currentFrameIndex,
5431  VkDeviceSize size,
5432  VkDeviceSize alignment,
5433  VmaAllocationCreateFlags allocFlags,
5434  void* pUserData,
5435  VmaSuballocationType suballocType,
5436  uint32_t strategy,
5437  VmaAllocation* pAllocation);
5438 
5439  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5440 };
5441 
5442 struct VmaPool_T
5443 {
5444  VMA_CLASS_NO_COPY(VmaPool_T)
5445 public:
5446  VmaBlockVector m_BlockVector;
5447 
5448  VmaPool_T(
5449  VmaAllocator hAllocator,
5450  const VmaPoolCreateInfo& createInfo,
5451  VkDeviceSize preferredBlockSize);
5452  ~VmaPool_T();
5453 
5454  uint32_t GetId() const { return m_Id; }
5455  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5456 
5457 #if VMA_STATS_STRING_ENABLED
5458  //void PrintDetailedMap(class VmaStringBuilder& sb);
5459 #endif
5460 
5461 private:
5462  uint32_t m_Id;
5463 };
5464 
5465 class VmaDefragmentator
5466 {
5467  VMA_CLASS_NO_COPY(VmaDefragmentator)
5468 private:
5469  const VmaAllocator m_hAllocator;
5470  VmaBlockVector* const m_pBlockVector;
5471  uint32_t m_CurrentFrameIndex;
5472  VkDeviceSize m_BytesMoved;
5473  uint32_t m_AllocationsMoved;
5474 
5475  struct AllocationInfo
5476  {
5477  VmaAllocation m_hAllocation;
5478  VkBool32* m_pChanged;
5479 
5480  AllocationInfo() :
5481  m_hAllocation(VK_NULL_HANDLE),
5482  m_pChanged(VMA_NULL)
5483  {
5484  }
5485  };
5486 
5487  struct AllocationInfoSizeGreater
5488  {
5489  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5490  {
5491  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5492  }
5493  };
5494 
5495  // Used between AddAllocation and Defragment.
5496  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5497 
5498  struct BlockInfo
5499  {
5500  VmaDeviceMemoryBlock* m_pBlock;
5501  bool m_HasNonMovableAllocations;
5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5503 
5504  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5505  m_pBlock(VMA_NULL),
5506  m_HasNonMovableAllocations(true),
5507  m_Allocations(pAllocationCallbacks),
5508  m_pMappedDataForDefragmentation(VMA_NULL)
5509  {
5510  }
5511 
5512  void CalcHasNonMovableAllocations()
5513  {
5514  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5515  const size_t defragmentAllocCount = m_Allocations.size();
5516  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5517  }
5518 
5519  void SortAllocationsBySizeDescecnding()
5520  {
5521  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5522  }
5523 
5524  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5525  void Unmap(VmaAllocator hAllocator);
5526 
5527  private:
5528  // Not null if mapped for defragmentation only, not originally mapped.
5529  void* m_pMappedDataForDefragmentation;
5530  };
5531 
5532  struct BlockPointerLess
5533  {
5534  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5535  {
5536  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5537  }
5538  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5539  {
5540  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5541  }
5542  };
5543 
5544  // 1. Blocks with some non-movable allocations go first.
5545  // 2. Blocks with smaller sumFreeSize go first.
5546  struct BlockInfoCompareMoveDestination
5547  {
5548  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5549  {
5550  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5551  {
5552  return true;
5553  }
5554  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5555  {
5556  return false;
5557  }
5558  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5559  {
5560  return true;
5561  }
5562  return false;
5563  }
5564  };
5565 
5566  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5567  BlockInfoVector m_Blocks;
5568 
5569  VkResult DefragmentRound(
5570  VkDeviceSize maxBytesToMove,
5571  uint32_t maxAllocationsToMove);
5572 
5573  static bool MoveMakesSense(
5574  size_t dstBlockIndex, VkDeviceSize dstOffset,
5575  size_t srcBlockIndex, VkDeviceSize srcOffset);
5576 
5577 public:
5578  VmaDefragmentator(
5579  VmaAllocator hAllocator,
5580  VmaBlockVector* pBlockVector,
5581  uint32_t currentFrameIndex);
5582 
5583  ~VmaDefragmentator();
5584 
5585  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5586  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5587 
5588  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5589 
5590  VkResult Defragment(
5591  VkDeviceSize maxBytesToMove,
5592  uint32_t maxAllocationsToMove);
5593 };
5594 
5595 #if VMA_RECORDING_ENABLED
5596 
5597 class VmaRecorder
5598 {
5599 public:
5600  VmaRecorder();
5601  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5602  void WriteConfiguration(
5603  const VkPhysicalDeviceProperties& devProps,
5604  const VkPhysicalDeviceMemoryProperties& memProps,
5605  bool dedicatedAllocationExtensionEnabled);
5606  ~VmaRecorder();
5607 
5608  void RecordCreateAllocator(uint32_t frameIndex);
5609  void RecordDestroyAllocator(uint32_t frameIndex);
5610  void RecordCreatePool(uint32_t frameIndex,
5611  const VmaPoolCreateInfo& createInfo,
5612  VmaPool pool);
5613  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5614  void RecordAllocateMemory(uint32_t frameIndex,
5615  const VkMemoryRequirements& vkMemReq,
5616  const VmaAllocationCreateInfo& createInfo,
5617  VmaAllocation allocation);
5618  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5619  const VkMemoryRequirements& vkMemReq,
5620  bool requiresDedicatedAllocation,
5621  bool prefersDedicatedAllocation,
5622  const VmaAllocationCreateInfo& createInfo,
5623  VmaAllocation allocation);
5624  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5625  const VkMemoryRequirements& vkMemReq,
5626  bool requiresDedicatedAllocation,
5627  bool prefersDedicatedAllocation,
5628  const VmaAllocationCreateInfo& createInfo,
5629  VmaAllocation allocation);
5630  void RecordFreeMemory(uint32_t frameIndex,
5631  VmaAllocation allocation);
5632  void RecordSetAllocationUserData(uint32_t frameIndex,
5633  VmaAllocation allocation,
5634  const void* pUserData);
5635  void RecordCreateLostAllocation(uint32_t frameIndex,
5636  VmaAllocation allocation);
5637  void RecordMapMemory(uint32_t frameIndex,
5638  VmaAllocation allocation);
5639  void RecordUnmapMemory(uint32_t frameIndex,
5640  VmaAllocation allocation);
5641  void RecordFlushAllocation(uint32_t frameIndex,
5642  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5643  void RecordInvalidateAllocation(uint32_t frameIndex,
5644  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5645  void RecordCreateBuffer(uint32_t frameIndex,
5646  const VkBufferCreateInfo& bufCreateInfo,
5647  const VmaAllocationCreateInfo& allocCreateInfo,
5648  VmaAllocation allocation);
5649  void RecordCreateImage(uint32_t frameIndex,
5650  const VkImageCreateInfo& imageCreateInfo,
5651  const VmaAllocationCreateInfo& allocCreateInfo,
5652  VmaAllocation allocation);
5653  void RecordDestroyBuffer(uint32_t frameIndex,
5654  VmaAllocation allocation);
5655  void RecordDestroyImage(uint32_t frameIndex,
5656  VmaAllocation allocation);
5657  void RecordTouchAllocation(uint32_t frameIndex,
5658  VmaAllocation allocation);
5659  void RecordGetAllocationInfo(uint32_t frameIndex,
5660  VmaAllocation allocation);
5661  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5662  VmaPool pool);
5663 
5664 private:
5665  struct CallParams
5666  {
5667  uint32_t threadId;
5668  double time;
5669  };
5670 
5671  class UserDataString
5672  {
5673  public:
5674  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5675  const char* GetString() const { return m_Str; }
5676 
5677  private:
5678  char m_PtrStr[17];
5679  const char* m_Str;
5680  };
5681 
5682  bool m_UseMutex;
5683  VmaRecordFlags m_Flags;
5684  FILE* m_File;
5685  VMA_MUTEX m_FileMutex;
5686  int64_t m_Freq;
5687  int64_t m_StartCounter;
5688 
5689  void GetBasicParams(CallParams& outParams);
5690  void Flush();
5691 };
5692 
5693 #endif // #if VMA_RECORDING_ENABLED
5694 
5695 // Main allocator object.
5696 struct VmaAllocator_T
5697 {
5698  VMA_CLASS_NO_COPY(VmaAllocator_T)
5699 public:
5700  bool m_UseMutex;
5701  bool m_UseKhrDedicatedAllocation;
5702  VkDevice m_hDevice;
5703  bool m_AllocationCallbacksSpecified;
5704  VkAllocationCallbacks m_AllocationCallbacks;
5705  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5706 
5707  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5708  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5709  VMA_MUTEX m_HeapSizeLimitMutex;
5710 
5711  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5712  VkPhysicalDeviceMemoryProperties m_MemProps;
5713 
5714  // Default pools.
5715  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5716 
5717  // Each vector is sorted by memory (handle value).
5718  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5719  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5720  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5721 
5722  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5723  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5724  ~VmaAllocator_T();
5725 
5726  const VkAllocationCallbacks* GetAllocationCallbacks() const
5727  {
5728  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5729  }
5730  const VmaVulkanFunctions& GetVulkanFunctions() const
5731  {
5732  return m_VulkanFunctions;
5733  }
5734 
5735  VkDeviceSize GetBufferImageGranularity() const
5736  {
5737  return VMA_MAX(
5738  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5739  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5740  }
5741 
5742  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5743  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5744 
5745  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5746  {
5747  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5748  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5749  }
5750  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5751  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5752  {
5753  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5754  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5755  }
5756  // Minimum alignment for all allocations in specific memory type.
5757  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5758  {
5759  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5760  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5761  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5762  }
5763 
5764  bool IsIntegratedGpu() const
5765  {
5766  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5767  }
5768 
5769 #if VMA_RECORDING_ENABLED
5770  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5771 #endif
5772 
5773  void GetBufferMemoryRequirements(
5774  VkBuffer hBuffer,
5775  VkMemoryRequirements& memReq,
5776  bool& requiresDedicatedAllocation,
5777  bool& prefersDedicatedAllocation) const;
5778  void GetImageMemoryRequirements(
5779  VkImage hImage,
5780  VkMemoryRequirements& memReq,
5781  bool& requiresDedicatedAllocation,
5782  bool& prefersDedicatedAllocation) const;
5783 
5784  // Main allocation function.
5785  VkResult AllocateMemory(
5786  const VkMemoryRequirements& vkMemReq,
5787  bool requiresDedicatedAllocation,
5788  bool prefersDedicatedAllocation,
5789  VkBuffer dedicatedBuffer,
5790  VkImage dedicatedImage,
5791  const VmaAllocationCreateInfo& createInfo,
5792  VmaSuballocationType suballocType,
5793  size_t allocationCount,
5794  VmaAllocation* pAllocations);
5795 
5796  // Main deallocation function.
5797  void FreeMemory(
5798  size_t allocationCount,
5799  const VmaAllocation* pAllocations);
5800 
5801  void CalculateStats(VmaStats* pStats);
5802 
5803 #if VMA_STATS_STRING_ENABLED
5804  void PrintDetailedMap(class VmaJsonWriter& json);
5805 #endif
5806 
5807  VkResult Defragment(
5808  VmaAllocation* pAllocations,
5809  size_t allocationCount,
5810  VkBool32* pAllocationsChanged,
5811  const VmaDefragmentationInfo* pDefragmentationInfo,
5812  VmaDefragmentationStats* pDefragmentationStats);
5813 
5814  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5815  bool TouchAllocation(VmaAllocation hAllocation);
5816 
5817  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5818  void DestroyPool(VmaPool pool);
5819  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5820 
5821  void SetCurrentFrameIndex(uint32_t frameIndex);
5822  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5823 
5824  void MakePoolAllocationsLost(
5825  VmaPool hPool,
5826  size_t* pLostAllocationCount);
5827  VkResult CheckPoolCorruption(VmaPool hPool);
5828  VkResult CheckCorruption(uint32_t memoryTypeBits);
5829 
5830  void CreateLostAllocation(VmaAllocation* pAllocation);
5831 
5832  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5833  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5834 
5835  VkResult Map(VmaAllocation hAllocation, void** ppData);
5836  void Unmap(VmaAllocation hAllocation);
5837 
5838  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5839  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5840 
5841  void FlushOrInvalidateAllocation(
5842  VmaAllocation hAllocation,
5843  VkDeviceSize offset, VkDeviceSize size,
5844  VMA_CACHE_OPERATION op);
5845 
5846  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5847 
5848 private:
5849  VkDeviceSize m_PreferredLargeHeapBlockSize;
5850 
5851  VkPhysicalDevice m_PhysicalDevice;
5852  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5853 
5854  VMA_MUTEX m_PoolsMutex;
5855  // Protected by m_PoolsMutex. Sorted by pointer value.
5856  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5857  uint32_t m_NextPoolId;
5858 
5859  VmaVulkanFunctions m_VulkanFunctions;
5860 
5861 #if VMA_RECORDING_ENABLED
5862  VmaRecorder* m_pRecorder;
5863 #endif
5864 
5865  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5866 
5867  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5868 
5869  VkResult AllocateMemoryOfType(
5870  VkDeviceSize size,
5871  VkDeviceSize alignment,
5872  bool dedicatedAllocation,
5873  VkBuffer dedicatedBuffer,
5874  VkImage dedicatedImage,
5875  const VmaAllocationCreateInfo& createInfo,
5876  uint32_t memTypeIndex,
5877  VmaSuballocationType suballocType,
5878  size_t allocationCount,
5879  VmaAllocation* pAllocations);
5880 
5881  // Helper function only to be used inside AllocateDedicatedMemory.
5882  VkResult AllocateDedicatedMemoryPage(
5883  VkDeviceSize size,
5884  VmaSuballocationType suballocType,
5885  uint32_t memTypeIndex,
5886  const VkMemoryAllocateInfo& allocInfo,
5887  bool map,
5888  bool isUserDataString,
5889  void* pUserData,
5890  VmaAllocation* pAllocation);
5891 
5892  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
5893  VkResult AllocateDedicatedMemory(
5894  VkDeviceSize size,
5895  VmaSuballocationType suballocType,
5896  uint32_t memTypeIndex,
5897  bool map,
5898  bool isUserDataString,
5899  void* pUserData,
5900  VkBuffer dedicatedBuffer,
5901  VkImage dedicatedImage,
5902  size_t allocationCount,
5903  VmaAllocation* pAllocations);
5904 
5905  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5906  void FreeDedicatedMemory(VmaAllocation allocation);
5907 };
5908 
5910 // Memory allocation #2 after VmaAllocator_T definition
5911 
5912 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5913 {
5914  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5915 }
5916 
5917 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5918 {
5919  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5920 }
5921 
5922 template<typename T>
5923 static T* VmaAllocate(VmaAllocator hAllocator)
5924 {
5925  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5926 }
5927 
5928 template<typename T>
5929 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5930 {
5931  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5932 }
5933 
5934 template<typename T>
5935 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5936 {
5937  if(ptr != VMA_NULL)
5938  {
5939  ptr->~T();
5940  VmaFree(hAllocator, ptr);
5941  }
5942 }
5943 
5944 template<typename T>
5945 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5946 {
5947  if(ptr != VMA_NULL)
5948  {
5949  for(size_t i = count; i--; )
5950  ptr[i].~T();
5951  VmaFree(hAllocator, ptr);
5952  }
5953 }
5954 
5956 // VmaStringBuilder
5957 
5958 #if VMA_STATS_STRING_ENABLED
5959 
5960 class VmaStringBuilder
5961 {
5962 public:
5963  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5964  size_t GetLength() const { return m_Data.size(); }
5965  const char* GetData() const { return m_Data.data(); }
5966 
5967  void Add(char ch) { m_Data.push_back(ch); }
5968  void Add(const char* pStr);
5969  void AddNewLine() { Add('\n'); }
5970  void AddNumber(uint32_t num);
5971  void AddNumber(uint64_t num);
5972  void AddPointer(const void* ptr);
5973 
5974 private:
5975  VmaVector< char, VmaStlAllocator<char> > m_Data;
5976 };
5977 
5978 void VmaStringBuilder::Add(const char* pStr)
5979 {
5980  const size_t strLen = strlen(pStr);
5981  if(strLen > 0)
5982  {
5983  const size_t oldCount = m_Data.size();
5984  m_Data.resize(oldCount + strLen);
5985  memcpy(m_Data.data() + oldCount, pStr, strLen);
5986  }
5987 }
5988 
5989 void VmaStringBuilder::AddNumber(uint32_t num)
5990 {
5991  char buf[11];
5992  VmaUint32ToStr(buf, sizeof(buf), num);
5993  Add(buf);
5994 }
5995 
5996 void VmaStringBuilder::AddNumber(uint64_t num)
5997 {
5998  char buf[21];
5999  VmaUint64ToStr(buf, sizeof(buf), num);
6000  Add(buf);
6001 }
6002 
6003 void VmaStringBuilder::AddPointer(const void* ptr)
6004 {
6005  char buf[21];
6006  VmaPtrToStr(buf, sizeof(buf), ptr);
6007  Add(buf);
6008 }
6009 
6010 #endif // #if VMA_STATS_STRING_ENABLED
6011 
6013 // VmaJsonWriter
6014 
6015 #if VMA_STATS_STRING_ENABLED
6016 
6017 class VmaJsonWriter
6018 {
6019  VMA_CLASS_NO_COPY(VmaJsonWriter)
6020 public:
6021  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6022  ~VmaJsonWriter();
6023 
6024  void BeginObject(bool singleLine = false);
6025  void EndObject();
6026 
6027  void BeginArray(bool singleLine = false);
6028  void EndArray();
6029 
6030  void WriteString(const char* pStr);
6031  void BeginString(const char* pStr = VMA_NULL);
6032  void ContinueString(const char* pStr);
6033  void ContinueString(uint32_t n);
6034  void ContinueString(uint64_t n);
6035  void ContinueString_Pointer(const void* ptr);
6036  void EndString(const char* pStr = VMA_NULL);
6037 
6038  void WriteNumber(uint32_t n);
6039  void WriteNumber(uint64_t n);
6040  void WriteBool(bool b);
6041  void WriteNull();
6042 
6043 private:
6044  static const char* const INDENT;
6045 
6046  enum COLLECTION_TYPE
6047  {
6048  COLLECTION_TYPE_OBJECT,
6049  COLLECTION_TYPE_ARRAY,
6050  };
6051  struct StackItem
6052  {
6053  COLLECTION_TYPE type;
6054  uint32_t valueCount;
6055  bool singleLineMode;
6056  };
6057 
6058  VmaStringBuilder& m_SB;
6059  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6060  bool m_InsideString;
6061 
6062  void BeginValue(bool isString);
6063  void WriteIndent(bool oneLess = false);
6064 };
6065 
6066 const char* const VmaJsonWriter::INDENT = " ";
6067 
6068 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6069  m_SB(sb),
6070  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6071  m_InsideString(false)
6072 {
6073 }
6074 
6075 VmaJsonWriter::~VmaJsonWriter()
6076 {
6077  VMA_ASSERT(!m_InsideString);
6078  VMA_ASSERT(m_Stack.empty());
6079 }
6080 
6081 void VmaJsonWriter::BeginObject(bool singleLine)
6082 {
6083  VMA_ASSERT(!m_InsideString);
6084 
6085  BeginValue(false);
6086  m_SB.Add('{');
6087 
6088  StackItem item;
6089  item.type = COLLECTION_TYPE_OBJECT;
6090  item.valueCount = 0;
6091  item.singleLineMode = singleLine;
6092  m_Stack.push_back(item);
6093 }
6094 
6095 void VmaJsonWriter::EndObject()
6096 {
6097  VMA_ASSERT(!m_InsideString);
6098 
6099  WriteIndent(true);
6100  m_SB.Add('}');
6101 
6102  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6103  m_Stack.pop_back();
6104 }
6105 
6106 void VmaJsonWriter::BeginArray(bool singleLine)
6107 {
6108  VMA_ASSERT(!m_InsideString);
6109 
6110  BeginValue(false);
6111  m_SB.Add('[');
6112 
6113  StackItem item;
6114  item.type = COLLECTION_TYPE_ARRAY;
6115  item.valueCount = 0;
6116  item.singleLineMode = singleLine;
6117  m_Stack.push_back(item);
6118 }
6119 
6120 void VmaJsonWriter::EndArray()
6121 {
6122  VMA_ASSERT(!m_InsideString);
6123 
6124  WriteIndent(true);
6125  m_SB.Add(']');
6126 
6127  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6128  m_Stack.pop_back();
6129 }
6130 
6131 void VmaJsonWriter::WriteString(const char* pStr)
6132 {
6133  BeginString(pStr);
6134  EndString();
6135 }
6136 
6137 void VmaJsonWriter::BeginString(const char* pStr)
6138 {
6139  VMA_ASSERT(!m_InsideString);
6140 
6141  BeginValue(true);
6142  m_SB.Add('"');
6143  m_InsideString = true;
6144  if(pStr != VMA_NULL && pStr[0] != '\0')
6145  {
6146  ContinueString(pStr);
6147  }
6148 }
6149 
6150 void VmaJsonWriter::ContinueString(const char* pStr)
6151 {
6152  VMA_ASSERT(m_InsideString);
6153 
6154  const size_t strLen = strlen(pStr);
6155  for(size_t i = 0; i < strLen; ++i)
6156  {
6157  char ch = pStr[i];
6158  if(ch == '\\')
6159  {
6160  m_SB.Add("\\\\");
6161  }
6162  else if(ch == '"')
6163  {
6164  m_SB.Add("\\\"");
6165  }
6166  else if(ch >= 32)
6167  {
6168  m_SB.Add(ch);
6169  }
6170  else switch(ch)
6171  {
6172  case '\b':
6173  m_SB.Add("\\b");
6174  break;
6175  case '\f':
6176  m_SB.Add("\\f");
6177  break;
6178  case '\n':
6179  m_SB.Add("\\n");
6180  break;
6181  case '\r':
6182  m_SB.Add("\\r");
6183  break;
6184  case '\t':
6185  m_SB.Add("\\t");
6186  break;
6187  default:
6188  VMA_ASSERT(0 && "Character not currently supported.");
6189  break;
6190  }
6191  }
6192 }
6193 
6194 void VmaJsonWriter::ContinueString(uint32_t n)
6195 {
6196  VMA_ASSERT(m_InsideString);
6197  m_SB.AddNumber(n);
6198 }
6199 
6200 void VmaJsonWriter::ContinueString(uint64_t n)
6201 {
6202  VMA_ASSERT(m_InsideString);
6203  m_SB.AddNumber(n);
6204 }
6205 
6206 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6207 {
6208  VMA_ASSERT(m_InsideString);
6209  m_SB.AddPointer(ptr);
6210 }
6211 
6212 void VmaJsonWriter::EndString(const char* pStr)
6213 {
6214  VMA_ASSERT(m_InsideString);
6215  if(pStr != VMA_NULL && pStr[0] != '\0')
6216  {
6217  ContinueString(pStr);
6218  }
6219  m_SB.Add('"');
6220  m_InsideString = false;
6221 }
6222 
6223 void VmaJsonWriter::WriteNumber(uint32_t n)
6224 {
6225  VMA_ASSERT(!m_InsideString);
6226  BeginValue(false);
6227  m_SB.AddNumber(n);
6228 }
6229 
6230 void VmaJsonWriter::WriteNumber(uint64_t n)
6231 {
6232  VMA_ASSERT(!m_InsideString);
6233  BeginValue(false);
6234  m_SB.AddNumber(n);
6235 }
6236 
6237 void VmaJsonWriter::WriteBool(bool b)
6238 {
6239  VMA_ASSERT(!m_InsideString);
6240  BeginValue(false);
6241  m_SB.Add(b ? "true" : "false");
6242 }
6243 
6244 void VmaJsonWriter::WriteNull()
6245 {
6246  VMA_ASSERT(!m_InsideString);
6247  BeginValue(false);
6248  m_SB.Add("null");
6249 }
6250 
6251 void VmaJsonWriter::BeginValue(bool isString)
6252 {
6253  if(!m_Stack.empty())
6254  {
6255  StackItem& currItem = m_Stack.back();
6256  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6257  currItem.valueCount % 2 == 0)
6258  {
6259  VMA_ASSERT(isString);
6260  }
6261 
6262  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6263  currItem.valueCount % 2 != 0)
6264  {
6265  m_SB.Add(": ");
6266  }
6267  else if(currItem.valueCount > 0)
6268  {
6269  m_SB.Add(", ");
6270  WriteIndent();
6271  }
6272  else
6273  {
6274  WriteIndent();
6275  }
6276  ++currItem.valueCount;
6277  }
6278 }
6279 
6280 void VmaJsonWriter::WriteIndent(bool oneLess)
6281 {
6282  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6283  {
6284  m_SB.AddNewLine();
6285 
6286  size_t count = m_Stack.size();
6287  if(count > 0 && oneLess)
6288  {
6289  --count;
6290  }
6291  for(size_t i = 0; i < count; ++i)
6292  {
6293  m_SB.Add(INDENT);
6294  }
6295  }
6296 }
6297 
6298 #endif // #if VMA_STATS_STRING_ENABLED
6299 
6301 
6302 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6303 {
6304  if(IsUserDataString())
6305  {
6306  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6307 
6308  FreeUserDataString(hAllocator);
6309 
6310  if(pUserData != VMA_NULL)
6311  {
6312  const char* const newStrSrc = (char*)pUserData;
6313  const size_t newStrLen = strlen(newStrSrc);
6314  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6315  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6316  m_pUserData = newStrDst;
6317  }
6318  }
6319  else
6320  {
6321  m_pUserData = pUserData;
6322  }
6323 }
6324 
6325 void VmaAllocation_T::ChangeBlockAllocation(
6326  VmaAllocator hAllocator,
6327  VmaDeviceMemoryBlock* block,
6328  VkDeviceSize offset)
6329 {
6330  VMA_ASSERT(block != VMA_NULL);
6331  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6332 
6333  // Move mapping reference counter from old block to new block.
6334  if(block != m_BlockAllocation.m_Block)
6335  {
6336  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6337  if(IsPersistentMap())
6338  ++mapRefCount;
6339  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6340  block->Map(hAllocator, mapRefCount, VMA_NULL);
6341  }
6342 
6343  m_BlockAllocation.m_Block = block;
6344  m_BlockAllocation.m_Offset = offset;
6345 }
6346 
6347 VkDeviceSize VmaAllocation_T::GetOffset() const
6348 {
6349  switch(m_Type)
6350  {
6351  case ALLOCATION_TYPE_BLOCK:
6352  return m_BlockAllocation.m_Offset;
6353  case ALLOCATION_TYPE_DEDICATED:
6354  return 0;
6355  default:
6356  VMA_ASSERT(0);
6357  return 0;
6358  }
6359 }
6360 
6361 VkDeviceMemory VmaAllocation_T::GetMemory() const
6362 {
6363  switch(m_Type)
6364  {
6365  case ALLOCATION_TYPE_BLOCK:
6366  return m_BlockAllocation.m_Block->GetDeviceMemory();
6367  case ALLOCATION_TYPE_DEDICATED:
6368  return m_DedicatedAllocation.m_hMemory;
6369  default:
6370  VMA_ASSERT(0);
6371  return VK_NULL_HANDLE;
6372  }
6373 }
6374 
6375 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6376 {
6377  switch(m_Type)
6378  {
6379  case ALLOCATION_TYPE_BLOCK:
6380  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6381  case ALLOCATION_TYPE_DEDICATED:
6382  return m_DedicatedAllocation.m_MemoryTypeIndex;
6383  default:
6384  VMA_ASSERT(0);
6385  return UINT32_MAX;
6386  }
6387 }
6388 
6389 void* VmaAllocation_T::GetMappedData() const
6390 {
6391  switch(m_Type)
6392  {
6393  case ALLOCATION_TYPE_BLOCK:
6394  if(m_MapCount != 0)
6395  {
6396  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6397  VMA_ASSERT(pBlockData != VMA_NULL);
6398  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6399  }
6400  else
6401  {
6402  return VMA_NULL;
6403  }
6404  break;
6405  case ALLOCATION_TYPE_DEDICATED:
6406  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6407  return m_DedicatedAllocation.m_pMappedData;
6408  default:
6409  VMA_ASSERT(0);
6410  return VMA_NULL;
6411  }
6412 }
6413 
6414 bool VmaAllocation_T::CanBecomeLost() const
6415 {
6416  switch(m_Type)
6417  {
6418  case ALLOCATION_TYPE_BLOCK:
6419  return m_BlockAllocation.m_CanBecomeLost;
6420  case ALLOCATION_TYPE_DEDICATED:
6421  return false;
6422  default:
6423  VMA_ASSERT(0);
6424  return false;
6425  }
6426 }
6427 
6428 VmaPool VmaAllocation_T::GetPool() const
6429 {
6430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6431  return m_BlockAllocation.m_hPool;
6432 }
6433 
6434 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6435 {
6436  VMA_ASSERT(CanBecomeLost());
6437 
6438  /*
6439  Warning: This is a carefully designed algorithm.
6440  Do not modify unless you really know what you're doing :)
6441  */
6442  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6443  for(;;)
6444  {
6445  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6446  {
6447  VMA_ASSERT(0);
6448  return false;
6449  }
6450  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6451  {
6452  return false;
6453  }
6454  else // Last use time earlier than current time.
6455  {
6456  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6457  {
6458  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6459  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6460  return true;
6461  }
6462  }
6463  }
6464 }
6465 
6466 #if VMA_STATS_STRING_ENABLED
6467 
6468 // Correspond to values of enum VmaSuballocationType.
6469 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6470  "FREE",
6471  "UNKNOWN",
6472  "BUFFER",
6473  "IMAGE_UNKNOWN",
6474  "IMAGE_LINEAR",
6475  "IMAGE_OPTIMAL",
6476 };
6477 
6478 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6479 {
6480  json.WriteString("Type");
6481  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6482 
6483  json.WriteString("Size");
6484  json.WriteNumber(m_Size);
6485 
6486  if(m_pUserData != VMA_NULL)
6487  {
6488  json.WriteString("UserData");
6489  if(IsUserDataString())
6490  {
6491  json.WriteString((const char*)m_pUserData);
6492  }
6493  else
6494  {
6495  json.BeginString();
6496  json.ContinueString_Pointer(m_pUserData);
6497  json.EndString();
6498  }
6499  }
6500 
6501  json.WriteString("CreationFrameIndex");
6502  json.WriteNumber(m_CreationFrameIndex);
6503 
6504  json.WriteString("LastUseFrameIndex");
6505  json.WriteNumber(GetLastUseFrameIndex());
6506 
6507  if(m_BufferImageUsage != 0)
6508  {
6509  json.WriteString("Usage");
6510  json.WriteNumber(m_BufferImageUsage);
6511  }
6512 }
6513 
6514 #endif
6515 
6516 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6517 {
6518  VMA_ASSERT(IsUserDataString());
6519  if(m_pUserData != VMA_NULL)
6520  {
6521  char* const oldStr = (char*)m_pUserData;
6522  const size_t oldStrLen = strlen(oldStr);
6523  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6524  m_pUserData = VMA_NULL;
6525  }
6526 }
6527 
6528 void VmaAllocation_T::BlockAllocMap()
6529 {
6530  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6531 
6532  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6533  {
6534  ++m_MapCount;
6535  }
6536  else
6537  {
6538  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6539  }
6540 }
6541 
6542 void VmaAllocation_T::BlockAllocUnmap()
6543 {
6544  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6545 
6546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6547  {
6548  --m_MapCount;
6549  }
6550  else
6551  {
6552  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6553  }
6554 }
6555 
6556 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6557 {
6558  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6559 
6560  if(m_MapCount != 0)
6561  {
6562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6563  {
6564  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6565  *ppData = m_DedicatedAllocation.m_pMappedData;
6566  ++m_MapCount;
6567  return VK_SUCCESS;
6568  }
6569  else
6570  {
6571  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6572  return VK_ERROR_MEMORY_MAP_FAILED;
6573  }
6574  }
6575  else
6576  {
6577  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6578  hAllocator->m_hDevice,
6579  m_DedicatedAllocation.m_hMemory,
6580  0, // offset
6581  VK_WHOLE_SIZE,
6582  0, // flags
6583  ppData);
6584  if(result == VK_SUCCESS)
6585  {
6586  m_DedicatedAllocation.m_pMappedData = *ppData;
6587  m_MapCount = 1;
6588  }
6589  return result;
6590  }
6591 }
6592 
6593 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6594 {
6595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6596 
6597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6598  {
6599  --m_MapCount;
6600  if(m_MapCount == 0)
6601  {
6602  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6603  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6604  hAllocator->m_hDevice,
6605  m_DedicatedAllocation.m_hMemory);
6606  }
6607  }
6608  else
6609  {
6610  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6611  }
6612 }
6613 
6614 #if VMA_STATS_STRING_ENABLED
6615 
6616 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6617 {
6618  json.BeginObject();
6619 
6620  json.WriteString("Blocks");
6621  json.WriteNumber(stat.blockCount);
6622 
6623  json.WriteString("Allocations");
6624  json.WriteNumber(stat.allocationCount);
6625 
6626  json.WriteString("UnusedRanges");
6627  json.WriteNumber(stat.unusedRangeCount);
6628 
6629  json.WriteString("UsedBytes");
6630  json.WriteNumber(stat.usedBytes);
6631 
6632  json.WriteString("UnusedBytes");
6633  json.WriteNumber(stat.unusedBytes);
6634 
6635  if(stat.allocationCount > 1)
6636  {
6637  json.WriteString("AllocationSize");
6638  json.BeginObject(true);
6639  json.WriteString("Min");
6640  json.WriteNumber(stat.allocationSizeMin);
6641  json.WriteString("Avg");
6642  json.WriteNumber(stat.allocationSizeAvg);
6643  json.WriteString("Max");
6644  json.WriteNumber(stat.allocationSizeMax);
6645  json.EndObject();
6646  }
6647 
6648  if(stat.unusedRangeCount > 1)
6649  {
6650  json.WriteString("UnusedRangeSize");
6651  json.BeginObject(true);
6652  json.WriteString("Min");
6653  json.WriteNumber(stat.unusedRangeSizeMin);
6654  json.WriteString("Avg");
6655  json.WriteNumber(stat.unusedRangeSizeAvg);
6656  json.WriteString("Max");
6657  json.WriteNumber(stat.unusedRangeSizeMax);
6658  json.EndObject();
6659  }
6660 
6661  json.EndObject();
6662 }
6663 
6664 #endif // #if VMA_STATS_STRING_ENABLED
6665 
6666 struct VmaSuballocationItemSizeLess
6667 {
6668  bool operator()(
6669  const VmaSuballocationList::iterator lhs,
6670  const VmaSuballocationList::iterator rhs) const
6671  {
6672  return lhs->size < rhs->size;
6673  }
6674  bool operator()(
6675  const VmaSuballocationList::iterator lhs,
6676  VkDeviceSize rhsSize) const
6677  {
6678  return lhs->size < rhsSize;
6679  }
6680 };
6681 
6682 
6684 // class VmaBlockMetadata
6685 
6686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6687  m_Size(0),
6688  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6689 {
6690 }
6691 
6692 #if VMA_STATS_STRING_ENABLED
6693 
6694 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6695  VkDeviceSize unusedBytes,
6696  size_t allocationCount,
6697  size_t unusedRangeCount) const
6698 {
6699  json.BeginObject();
6700 
6701  json.WriteString("TotalBytes");
6702  json.WriteNumber(GetSize());
6703 
6704  json.WriteString("UnusedBytes");
6705  json.WriteNumber(unusedBytes);
6706 
6707  json.WriteString("Allocations");
6708  json.WriteNumber((uint64_t)allocationCount);
6709 
6710  json.WriteString("UnusedRanges");
6711  json.WriteNumber((uint64_t)unusedRangeCount);
6712 
6713  json.WriteString("Suballocations");
6714  json.BeginArray();
6715 }
6716 
6717 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6718  VkDeviceSize offset,
6719  VmaAllocation hAllocation) const
6720 {
6721  json.BeginObject(true);
6722 
6723  json.WriteString("Offset");
6724  json.WriteNumber(offset);
6725 
6726  hAllocation->PrintParameters(json);
6727 
6728  json.EndObject();
6729 }
6730 
6731 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6732  VkDeviceSize offset,
6733  VkDeviceSize size) const
6734 {
6735  json.BeginObject(true);
6736 
6737  json.WriteString("Offset");
6738  json.WriteNumber(offset);
6739 
6740  json.WriteString("Type");
6741  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6742 
6743  json.WriteString("Size");
6744  json.WriteNumber(size);
6745 
6746  json.EndObject();
6747 }
6748 
6749 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6750 {
6751  json.EndArray();
6752  json.EndObject();
6753 }
6754 
6755 #endif // #if VMA_STATS_STRING_ENABLED
6756 
6758 // class VmaBlockMetadata_Generic
6759 
6760 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6761  VmaBlockMetadata(hAllocator),
6762  m_FreeCount(0),
6763  m_SumFreeSize(0),
6764  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6765  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6766 {
6767 }
6768 
6769 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6770 {
6771 }
6772 
6773 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6774 {
6775  VmaBlockMetadata::Init(size);
6776 
6777  m_FreeCount = 1;
6778  m_SumFreeSize = size;
6779 
6780  VmaSuballocation suballoc = {};
6781  suballoc.offset = 0;
6782  suballoc.size = size;
6783  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6784  suballoc.hAllocation = VK_NULL_HANDLE;
6785 
6786  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6787  m_Suballocations.push_back(suballoc);
6788  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6789  --suballocItem;
6790  m_FreeSuballocationsBySize.push_back(suballocItem);
6791 }
6792 
6793 bool VmaBlockMetadata_Generic::Validate() const
6794 {
6795  VMA_VALIDATE(!m_Suballocations.empty());
6796 
6797  // Expected offset of new suballocation as calculated from previous ones.
6798  VkDeviceSize calculatedOffset = 0;
6799  // Expected number of free suballocations as calculated from traversing their list.
6800  uint32_t calculatedFreeCount = 0;
6801  // Expected sum size of free suballocations as calculated from traversing their list.
6802  VkDeviceSize calculatedSumFreeSize = 0;
6803  // Expected number of free suballocations that should be registered in
6804  // m_FreeSuballocationsBySize calculated from traversing their list.
6805  size_t freeSuballocationsToRegister = 0;
6806  // True if previous visited suballocation was free.
6807  bool prevFree = false;
6808 
6809  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6810  suballocItem != m_Suballocations.cend();
6811  ++suballocItem)
6812  {
6813  const VmaSuballocation& subAlloc = *suballocItem;
6814 
6815  // Actual offset of this suballocation doesn't match expected one.
6816  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6817 
6818  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6819  // Two adjacent free suballocations are invalid. They should be merged.
6820  VMA_VALIDATE(!prevFree || !currFree);
6821 
6822  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6823 
6824  if(currFree)
6825  {
6826  calculatedSumFreeSize += subAlloc.size;
6827  ++calculatedFreeCount;
6828  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6829  {
6830  ++freeSuballocationsToRegister;
6831  }
6832 
6833  // Margin required between allocations - every free space must be at least that large.
6834  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6835  }
6836  else
6837  {
6838  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6839  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6840 
6841  // Margin required between allocations - previous allocation must be free.
6842  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6843  }
6844 
6845  calculatedOffset += subAlloc.size;
6846  prevFree = currFree;
6847  }
6848 
6849  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6850  // match expected one.
6851  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6852 
6853  VkDeviceSize lastSize = 0;
6854  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6855  {
6856  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6857 
6858  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6859  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6860  // They must be sorted by size ascending.
6861  VMA_VALIDATE(suballocItem->size >= lastSize);
6862 
6863  lastSize = suballocItem->size;
6864  }
6865 
6866  // Check if totals match calculacted values.
6867  VMA_VALIDATE(ValidateFreeSuballocationList());
6868  VMA_VALIDATE(calculatedOffset == GetSize());
6869  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6870  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6871 
6872  return true;
6873 }
6874 
6875 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6876 {
6877  if(!m_FreeSuballocationsBySize.empty())
6878  {
6879  return m_FreeSuballocationsBySize.back()->size;
6880  }
6881  else
6882  {
6883  return 0;
6884  }
6885 }
6886 
6887 bool VmaBlockMetadata_Generic::IsEmpty() const
6888 {
6889  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6890 }
6891 
6892 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6893 {
6894  outInfo.blockCount = 1;
6895 
6896  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6897  outInfo.allocationCount = rangeCount - m_FreeCount;
6898  outInfo.unusedRangeCount = m_FreeCount;
6899 
6900  outInfo.unusedBytes = m_SumFreeSize;
6901  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6902 
6903  outInfo.allocationSizeMin = UINT64_MAX;
6904  outInfo.allocationSizeMax = 0;
6905  outInfo.unusedRangeSizeMin = UINT64_MAX;
6906  outInfo.unusedRangeSizeMax = 0;
6907 
6908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6909  suballocItem != m_Suballocations.cend();
6910  ++suballocItem)
6911  {
6912  const VmaSuballocation& suballoc = *suballocItem;
6913  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6914  {
6915  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6916  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6917  }
6918  else
6919  {
6920  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6921  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6922  }
6923  }
6924 }
6925 
6926 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6927 {
6928  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6929 
6930  inoutStats.size += GetSize();
6931  inoutStats.unusedSize += m_SumFreeSize;
6932  inoutStats.allocationCount += rangeCount - m_FreeCount;
6933  inoutStats.unusedRangeCount += m_FreeCount;
6934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6935 }
6936 
6937 #if VMA_STATS_STRING_ENABLED
6938 
6939 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6940 {
6941  PrintDetailedMap_Begin(json,
6942  m_SumFreeSize, // unusedBytes
6943  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6944  m_FreeCount); // unusedRangeCount
6945 
6946  size_t i = 0;
6947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6948  suballocItem != m_Suballocations.cend();
6949  ++suballocItem, ++i)
6950  {
6951  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6952  {
6953  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6954  }
6955  else
6956  {
6957  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6958  }
6959  }
6960 
6961  PrintDetailedMap_End(json);
6962 }
6963 
6964 #endif // #if VMA_STATS_STRING_ENABLED
6965 
6966 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6967  uint32_t currentFrameIndex,
6968  uint32_t frameInUseCount,
6969  VkDeviceSize bufferImageGranularity,
6970  VkDeviceSize allocSize,
6971  VkDeviceSize allocAlignment,
6972  bool upperAddress,
6973  VmaSuballocationType allocType,
6974  bool canMakeOtherLost,
6975  uint32_t strategy,
6976  VmaAllocationRequest* pAllocationRequest)
6977 {
6978  VMA_ASSERT(allocSize > 0);
6979  VMA_ASSERT(!upperAddress);
6980  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6981  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6982  VMA_HEAVY_ASSERT(Validate());
6983 
6984  // There is not enough total free space in this block to fullfill the request: Early return.
6985  if(canMakeOtherLost == false &&
6986  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6987  {
6988  return false;
6989  }
6990 
6991  // New algorithm, efficiently searching freeSuballocationsBySize.
6992  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6993  if(freeSuballocCount > 0)
6994  {
6996  {
6997  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6998  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6999  m_FreeSuballocationsBySize.data(),
7000  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7001  allocSize + 2 * VMA_DEBUG_MARGIN,
7002  VmaSuballocationItemSizeLess());
7003  size_t index = it - m_FreeSuballocationsBySize.data();
7004  for(; index < freeSuballocCount; ++index)
7005  {
7006  if(CheckAllocation(
7007  currentFrameIndex,
7008  frameInUseCount,
7009  bufferImageGranularity,
7010  allocSize,
7011  allocAlignment,
7012  allocType,
7013  m_FreeSuballocationsBySize[index],
7014  false, // canMakeOtherLost
7015  &pAllocationRequest->offset,
7016  &pAllocationRequest->itemsToMakeLostCount,
7017  &pAllocationRequest->sumFreeSize,
7018  &pAllocationRequest->sumItemSize))
7019  {
7020  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7021  return true;
7022  }
7023  }
7024  }
7025  else // WORST_FIT, FIRST_FIT
7026  {
7027  // Search staring from biggest suballocations.
7028  for(size_t index = freeSuballocCount; index--; )
7029  {
7030  if(CheckAllocation(
7031  currentFrameIndex,
7032  frameInUseCount,
7033  bufferImageGranularity,
7034  allocSize,
7035  allocAlignment,
7036  allocType,
7037  m_FreeSuballocationsBySize[index],
7038  false, // canMakeOtherLost
7039  &pAllocationRequest->offset,
7040  &pAllocationRequest->itemsToMakeLostCount,
7041  &pAllocationRequest->sumFreeSize,
7042  &pAllocationRequest->sumItemSize))
7043  {
7044  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7045  return true;
7046  }
7047  }
7048  }
7049  }
7050 
7051  if(canMakeOtherLost)
7052  {
7053  // Brute-force algorithm. TODO: Come up with something better.
7054 
7055  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7056  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7057 
7058  VmaAllocationRequest tmpAllocRequest = {};
7059  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7060  suballocIt != m_Suballocations.end();
7061  ++suballocIt)
7062  {
7063  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7064  suballocIt->hAllocation->CanBecomeLost())
7065  {
7066  if(CheckAllocation(
7067  currentFrameIndex,
7068  frameInUseCount,
7069  bufferImageGranularity,
7070  allocSize,
7071  allocAlignment,
7072  allocType,
7073  suballocIt,
7074  canMakeOtherLost,
7075  &tmpAllocRequest.offset,
7076  &tmpAllocRequest.itemsToMakeLostCount,
7077  &tmpAllocRequest.sumFreeSize,
7078  &tmpAllocRequest.sumItemSize))
7079  {
7080  tmpAllocRequest.item = suballocIt;
7081 
7082  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7084  {
7085  *pAllocationRequest = tmpAllocRequest;
7086  }
7087  }
7088  }
7089  }
7090 
7091  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7092  {
7093  return true;
7094  }
7095  }
7096 
7097  return false;
7098 }
7099 
7100 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7101  uint32_t currentFrameIndex,
7102  uint32_t frameInUseCount,
7103  VmaAllocationRequest* pAllocationRequest)
7104 {
7105  while(pAllocationRequest->itemsToMakeLostCount > 0)
7106  {
7107  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7108  {
7109  ++pAllocationRequest->item;
7110  }
7111  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7112  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7113  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7114  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7115  {
7116  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7117  --pAllocationRequest->itemsToMakeLostCount;
7118  }
7119  else
7120  {
7121  return false;
7122  }
7123  }
7124 
7125  VMA_HEAVY_ASSERT(Validate());
7126  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7127  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7128 
7129  return true;
7130 }
7131 
7132 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7133 {
7134  uint32_t lostAllocationCount = 0;
7135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7136  it != m_Suballocations.end();
7137  ++it)
7138  {
7139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7140  it->hAllocation->CanBecomeLost() &&
7141  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7142  {
7143  it = FreeSuballocation(it);
7144  ++lostAllocationCount;
7145  }
7146  }
7147  return lostAllocationCount;
7148 }
7149 
7150 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7151 {
7152  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7153  it != m_Suballocations.end();
7154  ++it)
7155  {
7156  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7157  {
7158  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7159  {
7160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7161  return VK_ERROR_VALIDATION_FAILED_EXT;
7162  }
7163  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7164  {
7165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7166  return VK_ERROR_VALIDATION_FAILED_EXT;
7167  }
7168  }
7169  }
7170 
7171  return VK_SUCCESS;
7172 }
7173 
7174 void VmaBlockMetadata_Generic::Alloc(
7175  const VmaAllocationRequest& request,
7176  VmaSuballocationType type,
7177  VkDeviceSize allocSize,
7178  bool upperAddress,
7179  VmaAllocation hAllocation)
7180 {
7181  VMA_ASSERT(!upperAddress);
7182  VMA_ASSERT(request.item != m_Suballocations.end());
7183  VmaSuballocation& suballoc = *request.item;
7184  // Given suballocation is a free block.
7185  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7186  // Given offset is inside this suballocation.
7187  VMA_ASSERT(request.offset >= suballoc.offset);
7188  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7189  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7190  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7191 
7192  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7193  // it to become used.
7194  UnregisterFreeSuballocation(request.item);
7195 
7196  suballoc.offset = request.offset;
7197  suballoc.size = allocSize;
7198  suballoc.type = type;
7199  suballoc.hAllocation = hAllocation;
7200 
7201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7202  if(paddingEnd)
7203  {
7204  VmaSuballocation paddingSuballoc = {};
7205  paddingSuballoc.offset = request.offset + allocSize;
7206  paddingSuballoc.size = paddingEnd;
7207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7208  VmaSuballocationList::iterator next = request.item;
7209  ++next;
7210  const VmaSuballocationList::iterator paddingEndItem =
7211  m_Suballocations.insert(next, paddingSuballoc);
7212  RegisterFreeSuballocation(paddingEndItem);
7213  }
7214 
7215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7216  if(paddingBegin)
7217  {
7218  VmaSuballocation paddingSuballoc = {};
7219  paddingSuballoc.offset = request.offset - paddingBegin;
7220  paddingSuballoc.size = paddingBegin;
7221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7222  const VmaSuballocationList::iterator paddingBeginItem =
7223  m_Suballocations.insert(request.item, paddingSuballoc);
7224  RegisterFreeSuballocation(paddingBeginItem);
7225  }
7226 
7227  // Update totals.
7228  m_FreeCount = m_FreeCount - 1;
7229  if(paddingBegin > 0)
7230  {
7231  ++m_FreeCount;
7232  }
7233  if(paddingEnd > 0)
7234  {
7235  ++m_FreeCount;
7236  }
7237  m_SumFreeSize -= allocSize;
7238 }
7239 
7240 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7241 {
7242  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7243  suballocItem != m_Suballocations.end();
7244  ++suballocItem)
7245  {
7246  VmaSuballocation& suballoc = *suballocItem;
7247  if(suballoc.hAllocation == allocation)
7248  {
7249  FreeSuballocation(suballocItem);
7250  VMA_HEAVY_ASSERT(Validate());
7251  return;
7252  }
7253  }
7254  VMA_ASSERT(0 && "Not found!");
7255 }
7256 
7257 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7258 {
7259  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7260  suballocItem != m_Suballocations.end();
7261  ++suballocItem)
7262  {
7263  VmaSuballocation& suballoc = *suballocItem;
7264  if(suballoc.offset == offset)
7265  {
7266  FreeSuballocation(suballocItem);
7267  return;
7268  }
7269  }
7270  VMA_ASSERT(0 && "Not found!");
7271 }
7272 
7273 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7274 {
7275  VkDeviceSize lastSize = 0;
7276  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7277  {
7278  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7279 
7280  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7281  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7282  VMA_VALIDATE(it->size >= lastSize);
7283  lastSize = it->size;
7284  }
7285  return true;
7286 }
7287 
7288 bool VmaBlockMetadata_Generic::CheckAllocation(
7289  uint32_t currentFrameIndex,
7290  uint32_t frameInUseCount,
7291  VkDeviceSize bufferImageGranularity,
7292  VkDeviceSize allocSize,
7293  VkDeviceSize allocAlignment,
7294  VmaSuballocationType allocType,
7295  VmaSuballocationList::const_iterator suballocItem,
7296  bool canMakeOtherLost,
7297  VkDeviceSize* pOffset,
7298  size_t* itemsToMakeLostCount,
7299  VkDeviceSize* pSumFreeSize,
7300  VkDeviceSize* pSumItemSize) const
7301 {
7302  VMA_ASSERT(allocSize > 0);
7303  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7304  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7305  VMA_ASSERT(pOffset != VMA_NULL);
7306 
7307  *itemsToMakeLostCount = 0;
7308  *pSumFreeSize = 0;
7309  *pSumItemSize = 0;
7310 
7311  if(canMakeOtherLost)
7312  {
7313  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7314  {
7315  *pSumFreeSize = suballocItem->size;
7316  }
7317  else
7318  {
7319  if(suballocItem->hAllocation->CanBecomeLost() &&
7320  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7321  {
7322  ++*itemsToMakeLostCount;
7323  *pSumItemSize = suballocItem->size;
7324  }
7325  else
7326  {
7327  return false;
7328  }
7329  }
7330 
7331  // Remaining size is too small for this request: Early return.
7332  if(GetSize() - suballocItem->offset < allocSize)
7333  {
7334  return false;
7335  }
7336 
7337  // Start from offset equal to beginning of this suballocation.
7338  *pOffset = suballocItem->offset;
7339 
7340  // Apply VMA_DEBUG_MARGIN at the beginning.
7341  if(VMA_DEBUG_MARGIN > 0)
7342  {
7343  *pOffset += VMA_DEBUG_MARGIN;
7344  }
7345 
7346  // Apply alignment.
7347  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7348 
7349  // Check previous suballocations for BufferImageGranularity conflicts.
7350  // Make bigger alignment if necessary.
7351  if(bufferImageGranularity > 1)
7352  {
7353  bool bufferImageGranularityConflict = false;
7354  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7355  while(prevSuballocItem != m_Suballocations.cbegin())
7356  {
7357  --prevSuballocItem;
7358  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7359  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7360  {
7361  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7362  {
7363  bufferImageGranularityConflict = true;
7364  break;
7365  }
7366  }
7367  else
7368  // Already on previous page.
7369  break;
7370  }
7371  if(bufferImageGranularityConflict)
7372  {
7373  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7374  }
7375  }
7376 
7377  // Now that we have final *pOffset, check if we are past suballocItem.
7378  // If yes, return false - this function should be called for another suballocItem as starting point.
7379  if(*pOffset >= suballocItem->offset + suballocItem->size)
7380  {
7381  return false;
7382  }
7383 
7384  // Calculate padding at the beginning based on current offset.
7385  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7386 
7387  // Calculate required margin at the end.
7388  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7389 
7390  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7391  // Another early return check.
7392  if(suballocItem->offset + totalSize > GetSize())
7393  {
7394  return false;
7395  }
7396 
7397  // Advance lastSuballocItem until desired size is reached.
7398  // Update itemsToMakeLostCount.
7399  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7400  if(totalSize > suballocItem->size)
7401  {
7402  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7403  while(remainingSize > 0)
7404  {
7405  ++lastSuballocItem;
7406  if(lastSuballocItem == m_Suballocations.cend())
7407  {
7408  return false;
7409  }
7410  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7411  {
7412  *pSumFreeSize += lastSuballocItem->size;
7413  }
7414  else
7415  {
7416  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7417  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7418  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7419  {
7420  ++*itemsToMakeLostCount;
7421  *pSumItemSize += lastSuballocItem->size;
7422  }
7423  else
7424  {
7425  return false;
7426  }
7427  }
7428  remainingSize = (lastSuballocItem->size < remainingSize) ?
7429  remainingSize - lastSuballocItem->size : 0;
7430  }
7431  }
7432 
7433  // Check next suballocations for BufferImageGranularity conflicts.
7434  // If conflict exists, we must mark more allocations lost or fail.
7435  if(bufferImageGranularity > 1)
7436  {
7437  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7438  ++nextSuballocItem;
7439  while(nextSuballocItem != m_Suballocations.cend())
7440  {
7441  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7442  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7443  {
7444  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7445  {
7446  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7447  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7448  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7449  {
7450  ++*itemsToMakeLostCount;
7451  }
7452  else
7453  {
7454  return false;
7455  }
7456  }
7457  }
7458  else
7459  {
7460  // Already on next page.
7461  break;
7462  }
7463  ++nextSuballocItem;
7464  }
7465  }
7466  }
7467  else
7468  {
7469  const VmaSuballocation& suballoc = *suballocItem;
7470  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7471 
7472  *pSumFreeSize = suballoc.size;
7473 
7474  // Size of this suballocation is too small for this request: Early return.
7475  if(suballoc.size < allocSize)
7476  {
7477  return false;
7478  }
7479 
7480  // Start from offset equal to beginning of this suballocation.
7481  *pOffset = suballoc.offset;
7482 
7483  // Apply VMA_DEBUG_MARGIN at the beginning.
7484  if(VMA_DEBUG_MARGIN > 0)
7485  {
7486  *pOffset += VMA_DEBUG_MARGIN;
7487  }
7488 
7489  // Apply alignment.
7490  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7491 
7492  // Check previous suballocations for BufferImageGranularity conflicts.
7493  // Make bigger alignment if necessary.
7494  if(bufferImageGranularity > 1)
7495  {
7496  bool bufferImageGranularityConflict = false;
7497  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7498  while(prevSuballocItem != m_Suballocations.cbegin())
7499  {
7500  --prevSuballocItem;
7501  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7502  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7503  {
7504  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7505  {
7506  bufferImageGranularityConflict = true;
7507  break;
7508  }
7509  }
7510  else
7511  // Already on previous page.
7512  break;
7513  }
7514  if(bufferImageGranularityConflict)
7515  {
7516  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7517  }
7518  }
7519 
7520  // Calculate padding at the beginning based on current offset.
7521  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7522 
7523  // Calculate required margin at the end.
7524  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7525 
7526  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7527  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7528  {
7529  return false;
7530  }
7531 
7532  // Check next suballocations for BufferImageGranularity conflicts.
7533  // If conflict exists, allocation cannot be made here.
7534  if(bufferImageGranularity > 1)
7535  {
7536  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7537  ++nextSuballocItem;
7538  while(nextSuballocItem != m_Suballocations.cend())
7539  {
7540  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7541  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7542  {
7543  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7544  {
7545  return false;
7546  }
7547  }
7548  else
7549  {
7550  // Already on next page.
7551  break;
7552  }
7553  ++nextSuballocItem;
7554  }
7555  }
7556  }
7557 
7558  // All tests passed: Success. pOffset is already filled.
7559  return true;
7560 }
7561 
7562 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7563 {
7564  VMA_ASSERT(item != m_Suballocations.end());
7565  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7566 
7567  VmaSuballocationList::iterator nextItem = item;
7568  ++nextItem;
7569  VMA_ASSERT(nextItem != m_Suballocations.end());
7570  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7571 
7572  item->size += nextItem->size;
7573  --m_FreeCount;
7574  m_Suballocations.erase(nextItem);
7575 }
7576 
7577 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7578 {
7579  // Change this suballocation to be marked as free.
7580  VmaSuballocation& suballoc = *suballocItem;
7581  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7582  suballoc.hAllocation = VK_NULL_HANDLE;
7583 
7584  // Update totals.
7585  ++m_FreeCount;
7586  m_SumFreeSize += suballoc.size;
7587 
7588  // Merge with previous and/or next suballocation if it's also free.
7589  bool mergeWithNext = false;
7590  bool mergeWithPrev = false;
7591 
7592  VmaSuballocationList::iterator nextItem = suballocItem;
7593  ++nextItem;
7594  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7595  {
7596  mergeWithNext = true;
7597  }
7598 
7599  VmaSuballocationList::iterator prevItem = suballocItem;
7600  if(suballocItem != m_Suballocations.begin())
7601  {
7602  --prevItem;
7603  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7604  {
7605  mergeWithPrev = true;
7606  }
7607  }
7608 
7609  if(mergeWithNext)
7610  {
7611  UnregisterFreeSuballocation(nextItem);
7612  MergeFreeWithNext(suballocItem);
7613  }
7614 
7615  if(mergeWithPrev)
7616  {
7617  UnregisterFreeSuballocation(prevItem);
7618  MergeFreeWithNext(prevItem);
7619  RegisterFreeSuballocation(prevItem);
7620  return prevItem;
7621  }
7622  else
7623  {
7624  RegisterFreeSuballocation(suballocItem);
7625  return suballocItem;
7626  }
7627 }
7628 
7629 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7630 {
7631  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7632  VMA_ASSERT(item->size > 0);
7633 
7634  // You may want to enable this validation at the beginning or at the end of
7635  // this function, depending on what do you want to check.
7636  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7637 
7638  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7639  {
7640  if(m_FreeSuballocationsBySize.empty())
7641  {
7642  m_FreeSuballocationsBySize.push_back(item);
7643  }
7644  else
7645  {
7646  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7647  }
7648  }
7649 
7650  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7651 }
7652 
7653 
7654 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7655 {
7656  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7657  VMA_ASSERT(item->size > 0);
7658 
7659  // You may want to enable this validation at the beginning or at the end of
7660  // this function, depending on what do you want to check.
7661  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7662 
7663  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7664  {
7665  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7666  m_FreeSuballocationsBySize.data(),
7667  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7668  item,
7669  VmaSuballocationItemSizeLess());
7670  for(size_t index = it - m_FreeSuballocationsBySize.data();
7671  index < m_FreeSuballocationsBySize.size();
7672  ++index)
7673  {
7674  if(m_FreeSuballocationsBySize[index] == item)
7675  {
7676  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7677  return;
7678  }
7679  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7680  }
7681  VMA_ASSERT(0 && "Not found.");
7682  }
7683 
7684  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7685 }
7686 
7688 // class VmaBlockMetadata_Linear
7689 
7690 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7691  VmaBlockMetadata(hAllocator),
7692  m_SumFreeSize(0),
7693  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7694  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7695  m_1stVectorIndex(0),
7696  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7697  m_1stNullItemsBeginCount(0),
7698  m_1stNullItemsMiddleCount(0),
7699  m_2ndNullItemsCount(0)
7700 {
7701 }
7702 
7703 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7704 {
7705 }
7706 
7707 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7708 {
7709  VmaBlockMetadata::Init(size);
7710  m_SumFreeSize = size;
7711 }
7712 
7713 bool VmaBlockMetadata_Linear::Validate() const
7714 {
7715  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7716  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7717 
7718  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7719  VMA_VALIDATE(!suballocations1st.empty() ||
7720  suballocations2nd.empty() ||
7721  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7722 
7723  if(!suballocations1st.empty())
7724  {
7725  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7726  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7727  // Null item at the end should be just pop_back().
7728  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7729  }
7730  if(!suballocations2nd.empty())
7731  {
7732  // Null item at the end should be just pop_back().
7733  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7734  }
7735 
7736  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7737  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7738 
7739  VkDeviceSize sumUsedSize = 0;
7740  const size_t suballoc1stCount = suballocations1st.size();
7741  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7742 
7743  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7744  {
7745  const size_t suballoc2ndCount = suballocations2nd.size();
7746  size_t nullItem2ndCount = 0;
7747  for(size_t i = 0; i < suballoc2ndCount; ++i)
7748  {
7749  const VmaSuballocation& suballoc = suballocations2nd[i];
7750  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7751 
7752  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7753  VMA_VALIDATE(suballoc.offset >= offset);
7754 
7755  if(!currFree)
7756  {
7757  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7758  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7759  sumUsedSize += suballoc.size;
7760  }
7761  else
7762  {
7763  ++nullItem2ndCount;
7764  }
7765 
7766  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7767  }
7768 
7769  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7770  }
7771 
7772  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7773  {
7774  const VmaSuballocation& suballoc = suballocations1st[i];
7775  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7776  suballoc.hAllocation == VK_NULL_HANDLE);
7777  }
7778 
7779  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7780 
7781  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7782  {
7783  const VmaSuballocation& suballoc = suballocations1st[i];
7784  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7785 
7786  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7787  VMA_VALIDATE(suballoc.offset >= offset);
7788  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7789 
7790  if(!currFree)
7791  {
7792  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7793  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7794  sumUsedSize += suballoc.size;
7795  }
7796  else
7797  {
7798  ++nullItem1stCount;
7799  }
7800 
7801  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7802  }
7803  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7804 
7805  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7806  {
7807  const size_t suballoc2ndCount = suballocations2nd.size();
7808  size_t nullItem2ndCount = 0;
7809  for(size_t i = suballoc2ndCount; i--; )
7810  {
7811  const VmaSuballocation& suballoc = suballocations2nd[i];
7812  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7813 
7814  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7815  VMA_VALIDATE(suballoc.offset >= offset);
7816 
7817  if(!currFree)
7818  {
7819  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7820  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7821  sumUsedSize += suballoc.size;
7822  }
7823  else
7824  {
7825  ++nullItem2ndCount;
7826  }
7827 
7828  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7829  }
7830 
7831  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7832  }
7833 
7834  VMA_VALIDATE(offset <= GetSize());
7835  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7836 
7837  return true;
7838 }
7839 
7840 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7841 {
7842  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7843  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7844 }
7845 
7846 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7847 {
7848  const VkDeviceSize size = GetSize();
7849 
7850  /*
7851  We don't consider gaps inside allocation vectors with freed allocations because
7852  they are not suitable for reuse in linear allocator. We consider only space that
7853  is available for new allocations.
7854  */
7855  if(IsEmpty())
7856  {
7857  return size;
7858  }
7859 
7860  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7861 
7862  switch(m_2ndVectorMode)
7863  {
7864  case SECOND_VECTOR_EMPTY:
7865  /*
7866  Available space is after end of 1st, as well as before beginning of 1st (which
7867  whould make it a ring buffer).
7868  */
7869  {
7870  const size_t suballocations1stCount = suballocations1st.size();
7871  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7872  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7873  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7874  return VMA_MAX(
7875  firstSuballoc.offset,
7876  size - (lastSuballoc.offset + lastSuballoc.size));
7877  }
7878  break;
7879 
7880  case SECOND_VECTOR_RING_BUFFER:
7881  /*
7882  Available space is only between end of 2nd and beginning of 1st.
7883  */
7884  {
7885  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7886  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7887  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7888  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7889  }
7890  break;
7891 
7892  case SECOND_VECTOR_DOUBLE_STACK:
7893  /*
7894  Available space is only between end of 1st and top of 2nd.
7895  */
7896  {
7897  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7898  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7899  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7900  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7901  }
7902  break;
7903 
7904  default:
7905  VMA_ASSERT(0);
7906  return 0;
7907  }
7908 }
7909 
7910 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7911 {
7912  const VkDeviceSize size = GetSize();
7913  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7914  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7915  const size_t suballoc1stCount = suballocations1st.size();
7916  const size_t suballoc2ndCount = suballocations2nd.size();
7917 
7918  outInfo.blockCount = 1;
7919  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7920  outInfo.unusedRangeCount = 0;
7921  outInfo.usedBytes = 0;
7922  outInfo.allocationSizeMin = UINT64_MAX;
7923  outInfo.allocationSizeMax = 0;
7924  outInfo.unusedRangeSizeMin = UINT64_MAX;
7925  outInfo.unusedRangeSizeMax = 0;
7926 
7927  VkDeviceSize lastOffset = 0;
7928 
7929  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7930  {
7931  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7932  size_t nextAlloc2ndIndex = 0;
7933  while(lastOffset < freeSpace2ndTo1stEnd)
7934  {
7935  // Find next non-null allocation or move nextAllocIndex to the end.
7936  while(nextAlloc2ndIndex < suballoc2ndCount &&
7937  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7938  {
7939  ++nextAlloc2ndIndex;
7940  }
7941 
7942  // Found non-null allocation.
7943  if(nextAlloc2ndIndex < suballoc2ndCount)
7944  {
7945  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7946 
7947  // 1. Process free space before this allocation.
7948  if(lastOffset < suballoc.offset)
7949  {
7950  // There is free space from lastOffset to suballoc.offset.
7951  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7952  ++outInfo.unusedRangeCount;
7953  outInfo.unusedBytes += unusedRangeSize;
7954  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7955  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7956  }
7957 
7958  // 2. Process this allocation.
7959  // There is allocation with suballoc.offset, suballoc.size.
7960  outInfo.usedBytes += suballoc.size;
7961  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7962  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7963 
7964  // 3. Prepare for next iteration.
7965  lastOffset = suballoc.offset + suballoc.size;
7966  ++nextAlloc2ndIndex;
7967  }
7968  // We are at the end.
7969  else
7970  {
7971  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7972  if(lastOffset < freeSpace2ndTo1stEnd)
7973  {
7974  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7975  ++outInfo.unusedRangeCount;
7976  outInfo.unusedBytes += unusedRangeSize;
7977  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7978  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7979  }
7980 
7981  // End of loop.
7982  lastOffset = freeSpace2ndTo1stEnd;
7983  }
7984  }
7985  }
7986 
7987  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7988  const VkDeviceSize freeSpace1stTo2ndEnd =
7989  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7990  while(lastOffset < freeSpace1stTo2ndEnd)
7991  {
7992  // Find next non-null allocation or move nextAllocIndex to the end.
7993  while(nextAlloc1stIndex < suballoc1stCount &&
7994  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7995  {
7996  ++nextAlloc1stIndex;
7997  }
7998 
7999  // Found non-null allocation.
8000  if(nextAlloc1stIndex < suballoc1stCount)
8001  {
8002  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8003 
8004  // 1. Process free space before this allocation.
8005  if(lastOffset < suballoc.offset)
8006  {
8007  // There is free space from lastOffset to suballoc.offset.
8008  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8009  ++outInfo.unusedRangeCount;
8010  outInfo.unusedBytes += unusedRangeSize;
8011  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8012  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8013  }
8014 
8015  // 2. Process this allocation.
8016  // There is allocation with suballoc.offset, suballoc.size.
8017  outInfo.usedBytes += suballoc.size;
8018  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8019  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8020 
8021  // 3. Prepare for next iteration.
8022  lastOffset = suballoc.offset + suballoc.size;
8023  ++nextAlloc1stIndex;
8024  }
8025  // We are at the end.
8026  else
8027  {
8028  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8029  if(lastOffset < freeSpace1stTo2ndEnd)
8030  {
8031  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8032  ++outInfo.unusedRangeCount;
8033  outInfo.unusedBytes += unusedRangeSize;
8034  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8035  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8036  }
8037 
8038  // End of loop.
8039  lastOffset = freeSpace1stTo2ndEnd;
8040  }
8041  }
8042 
8043  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8044  {
8045  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8046  while(lastOffset < size)
8047  {
8048  // Find next non-null allocation or move nextAllocIndex to the end.
8049  while(nextAlloc2ndIndex != SIZE_MAX &&
8050  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8051  {
8052  --nextAlloc2ndIndex;
8053  }
8054 
8055  // Found non-null allocation.
8056  if(nextAlloc2ndIndex != SIZE_MAX)
8057  {
8058  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8059 
8060  // 1. Process free space before this allocation.
8061  if(lastOffset < suballoc.offset)
8062  {
8063  // There is free space from lastOffset to suballoc.offset.
8064  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8065  ++outInfo.unusedRangeCount;
8066  outInfo.unusedBytes += unusedRangeSize;
8067  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8068  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8069  }
8070 
8071  // 2. Process this allocation.
8072  // There is allocation with suballoc.offset, suballoc.size.
8073  outInfo.usedBytes += suballoc.size;
8074  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8075  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8076 
8077  // 3. Prepare for next iteration.
8078  lastOffset = suballoc.offset + suballoc.size;
8079  --nextAlloc2ndIndex;
8080  }
8081  // We are at the end.
8082  else
8083  {
8084  // There is free space from lastOffset to size.
8085  if(lastOffset < size)
8086  {
8087  const VkDeviceSize unusedRangeSize = size - lastOffset;
8088  ++outInfo.unusedRangeCount;
8089  outInfo.unusedBytes += unusedRangeSize;
8090  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8091  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8092  }
8093 
8094  // End of loop.
8095  lastOffset = size;
8096  }
8097  }
8098  }
8099 
8100  outInfo.unusedBytes = size - outInfo.usedBytes;
8101 }
8102 
8103 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8104 {
8105  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8106  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8107  const VkDeviceSize size = GetSize();
8108  const size_t suballoc1stCount = suballocations1st.size();
8109  const size_t suballoc2ndCount = suballocations2nd.size();
8110 
8111  inoutStats.size += size;
8112 
8113  VkDeviceSize lastOffset = 0;
8114 
8115  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8116  {
8117  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8118  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8119  while(lastOffset < freeSpace2ndTo1stEnd)
8120  {
8121  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8122  while(nextAlloc2ndIndex < suballoc2ndCount &&
8123  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8124  {
8125  ++nextAlloc2ndIndex;
8126  }
8127 
8128  // Found non-null allocation.
8129  if(nextAlloc2ndIndex < suballoc2ndCount)
8130  {
8131  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8132 
8133  // 1. Process free space before this allocation.
8134  if(lastOffset < suballoc.offset)
8135  {
8136  // There is free space from lastOffset to suballoc.offset.
8137  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8138  inoutStats.unusedSize += unusedRangeSize;
8139  ++inoutStats.unusedRangeCount;
8140  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8141  }
8142 
8143  // 2. Process this allocation.
8144  // There is allocation with suballoc.offset, suballoc.size.
8145  ++inoutStats.allocationCount;
8146 
8147  // 3. Prepare for next iteration.
8148  lastOffset = suballoc.offset + suballoc.size;
8149  ++nextAlloc2ndIndex;
8150  }
8151  // We are at the end.
8152  else
8153  {
8154  if(lastOffset < freeSpace2ndTo1stEnd)
8155  {
8156  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8157  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8158  inoutStats.unusedSize += unusedRangeSize;
8159  ++inoutStats.unusedRangeCount;
8160  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8161  }
8162 
8163  // End of loop.
8164  lastOffset = freeSpace2ndTo1stEnd;
8165  }
8166  }
8167  }
8168 
8169  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8170  const VkDeviceSize freeSpace1stTo2ndEnd =
8171  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8172  while(lastOffset < freeSpace1stTo2ndEnd)
8173  {
8174  // Find next non-null allocation or move nextAllocIndex to the end.
8175  while(nextAlloc1stIndex < suballoc1stCount &&
8176  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8177  {
8178  ++nextAlloc1stIndex;
8179  }
8180 
8181  // Found non-null allocation.
8182  if(nextAlloc1stIndex < suballoc1stCount)
8183  {
8184  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8185 
8186  // 1. Process free space before this allocation.
8187  if(lastOffset < suballoc.offset)
8188  {
8189  // There is free space from lastOffset to suballoc.offset.
8190  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8191  inoutStats.unusedSize += unusedRangeSize;
8192  ++inoutStats.unusedRangeCount;
8193  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8194  }
8195 
8196  // 2. Process this allocation.
8197  // There is allocation with suballoc.offset, suballoc.size.
8198  ++inoutStats.allocationCount;
8199 
8200  // 3. Prepare for next iteration.
8201  lastOffset = suballoc.offset + suballoc.size;
8202  ++nextAlloc1stIndex;
8203  }
8204  // We are at the end.
8205  else
8206  {
8207  if(lastOffset < freeSpace1stTo2ndEnd)
8208  {
8209  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8210  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8211  inoutStats.unusedSize += unusedRangeSize;
8212  ++inoutStats.unusedRangeCount;
8213  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8214  }
8215 
8216  // End of loop.
8217  lastOffset = freeSpace1stTo2ndEnd;
8218  }
8219  }
8220 
8221  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8222  {
8223  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8224  while(lastOffset < size)
8225  {
8226  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8227  while(nextAlloc2ndIndex != SIZE_MAX &&
8228  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8229  {
8230  --nextAlloc2ndIndex;
8231  }
8232 
8233  // Found non-null allocation.
8234  if(nextAlloc2ndIndex != SIZE_MAX)
8235  {
8236  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8237 
8238  // 1. Process free space before this allocation.
8239  if(lastOffset < suballoc.offset)
8240  {
8241  // There is free space from lastOffset to suballoc.offset.
8242  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8243  inoutStats.unusedSize += unusedRangeSize;
8244  ++inoutStats.unusedRangeCount;
8245  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8246  }
8247 
8248  // 2. Process this allocation.
8249  // There is allocation with suballoc.offset, suballoc.size.
8250  ++inoutStats.allocationCount;
8251 
8252  // 3. Prepare for next iteration.
8253  lastOffset = suballoc.offset + suballoc.size;
8254  --nextAlloc2ndIndex;
8255  }
8256  // We are at the end.
8257  else
8258  {
8259  if(lastOffset < size)
8260  {
8261  // There is free space from lastOffset to size.
8262  const VkDeviceSize unusedRangeSize = size - lastOffset;
8263  inoutStats.unusedSize += unusedRangeSize;
8264  ++inoutStats.unusedRangeCount;
8265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8266  }
8267 
8268  // End of loop.
8269  lastOffset = size;
8270  }
8271  }
8272  }
8273 }
8274 
8275 #if VMA_STATS_STRING_ENABLED
8276 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8277 {
8278  const VkDeviceSize size = GetSize();
8279  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8280  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8281  const size_t suballoc1stCount = suballocations1st.size();
8282  const size_t suballoc2ndCount = suballocations2nd.size();
8283 
8284  // FIRST PASS
8285 
8286  size_t unusedRangeCount = 0;
8287  VkDeviceSize usedBytes = 0;
8288 
8289  VkDeviceSize lastOffset = 0;
8290 
8291  size_t alloc2ndCount = 0;
8292  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8293  {
8294  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8295  size_t nextAlloc2ndIndex = 0;
8296  while(lastOffset < freeSpace2ndTo1stEnd)
8297  {
8298  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8299  while(nextAlloc2ndIndex < suballoc2ndCount &&
8300  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8301  {
8302  ++nextAlloc2ndIndex;
8303  }
8304 
8305  // Found non-null allocation.
8306  if(nextAlloc2ndIndex < suballoc2ndCount)
8307  {
8308  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8309 
8310  // 1. Process free space before this allocation.
8311  if(lastOffset < suballoc.offset)
8312  {
8313  // There is free space from lastOffset to suballoc.offset.
8314  ++unusedRangeCount;
8315  }
8316 
8317  // 2. Process this allocation.
8318  // There is allocation with suballoc.offset, suballoc.size.
8319  ++alloc2ndCount;
8320  usedBytes += suballoc.size;
8321 
8322  // 3. Prepare for next iteration.
8323  lastOffset = suballoc.offset + suballoc.size;
8324  ++nextAlloc2ndIndex;
8325  }
8326  // We are at the end.
8327  else
8328  {
8329  if(lastOffset < freeSpace2ndTo1stEnd)
8330  {
8331  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8332  ++unusedRangeCount;
8333  }
8334 
8335  // End of loop.
8336  lastOffset = freeSpace2ndTo1stEnd;
8337  }
8338  }
8339  }
8340 
8341  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8342  size_t alloc1stCount = 0;
8343  const VkDeviceSize freeSpace1stTo2ndEnd =
8344  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8345  while(lastOffset < freeSpace1stTo2ndEnd)
8346  {
8347  // Find next non-null allocation or move nextAllocIndex to the end.
8348  while(nextAlloc1stIndex < suballoc1stCount &&
8349  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8350  {
8351  ++nextAlloc1stIndex;
8352  }
8353 
8354  // Found non-null allocation.
8355  if(nextAlloc1stIndex < suballoc1stCount)
8356  {
8357  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8358 
8359  // 1. Process free space before this allocation.
8360  if(lastOffset < suballoc.offset)
8361  {
8362  // There is free space from lastOffset to suballoc.offset.
8363  ++unusedRangeCount;
8364  }
8365 
8366  // 2. Process this allocation.
8367  // There is allocation with suballoc.offset, suballoc.size.
8368  ++alloc1stCount;
8369  usedBytes += suballoc.size;
8370 
8371  // 3. Prepare for next iteration.
8372  lastOffset = suballoc.offset + suballoc.size;
8373  ++nextAlloc1stIndex;
8374  }
8375  // We are at the end.
8376  else
8377  {
8378  if(lastOffset < size)
8379  {
8380  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8381  ++unusedRangeCount;
8382  }
8383 
8384  // End of loop.
8385  lastOffset = freeSpace1stTo2ndEnd;
8386  }
8387  }
8388 
8389  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8390  {
8391  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8392  while(lastOffset < size)
8393  {
8394  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8395  while(nextAlloc2ndIndex != SIZE_MAX &&
8396  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8397  {
8398  --nextAlloc2ndIndex;
8399  }
8400 
8401  // Found non-null allocation.
8402  if(nextAlloc2ndIndex != SIZE_MAX)
8403  {
8404  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8405 
8406  // 1. Process free space before this allocation.
8407  if(lastOffset < suballoc.offset)
8408  {
8409  // There is free space from lastOffset to suballoc.offset.
8410  ++unusedRangeCount;
8411  }
8412 
8413  // 2. Process this allocation.
8414  // There is allocation with suballoc.offset, suballoc.size.
8415  ++alloc2ndCount;
8416  usedBytes += suballoc.size;
8417 
8418  // 3. Prepare for next iteration.
8419  lastOffset = suballoc.offset + suballoc.size;
8420  --nextAlloc2ndIndex;
8421  }
8422  // We are at the end.
8423  else
8424  {
8425  if(lastOffset < size)
8426  {
8427  // There is free space from lastOffset to size.
8428  ++unusedRangeCount;
8429  }
8430 
8431  // End of loop.
8432  lastOffset = size;
8433  }
8434  }
8435  }
8436 
8437  const VkDeviceSize unusedBytes = size - usedBytes;
8438  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8439 
8440  // SECOND PASS
8441  lastOffset = 0;
8442 
8443  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8444  {
8445  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8446  size_t nextAlloc2ndIndex = 0;
8447  while(lastOffset < freeSpace2ndTo1stEnd)
8448  {
8449  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8450  while(nextAlloc2ndIndex < suballoc2ndCount &&
8451  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8452  {
8453  ++nextAlloc2ndIndex;
8454  }
8455 
8456  // Found non-null allocation.
8457  if(nextAlloc2ndIndex < suballoc2ndCount)
8458  {
8459  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8460 
8461  // 1. Process free space before this allocation.
8462  if(lastOffset < suballoc.offset)
8463  {
8464  // There is free space from lastOffset to suballoc.offset.
8465  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8466  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8467  }
8468 
8469  // 2. Process this allocation.
8470  // There is allocation with suballoc.offset, suballoc.size.
8471  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8472 
8473  // 3. Prepare for next iteration.
8474  lastOffset = suballoc.offset + suballoc.size;
8475  ++nextAlloc2ndIndex;
8476  }
8477  // We are at the end.
8478  else
8479  {
8480  if(lastOffset < freeSpace2ndTo1stEnd)
8481  {
8482  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8483  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8484  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8485  }
8486 
8487  // End of loop.
8488  lastOffset = freeSpace2ndTo1stEnd;
8489  }
8490  }
8491  }
8492 
8493  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8494  while(lastOffset < freeSpace1stTo2ndEnd)
8495  {
8496  // Find next non-null allocation or move nextAllocIndex to the end.
8497  while(nextAlloc1stIndex < suballoc1stCount &&
8498  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8499  {
8500  ++nextAlloc1stIndex;
8501  }
8502 
8503  // Found non-null allocation.
8504  if(nextAlloc1stIndex < suballoc1stCount)
8505  {
8506  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8507 
8508  // 1. Process free space before this allocation.
8509  if(lastOffset < suballoc.offset)
8510  {
8511  // There is free space from lastOffset to suballoc.offset.
8512  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8513  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8514  }
8515 
8516  // 2. Process this allocation.
8517  // There is allocation with suballoc.offset, suballoc.size.
8518  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8519 
8520  // 3. Prepare for next iteration.
8521  lastOffset = suballoc.offset + suballoc.size;
8522  ++nextAlloc1stIndex;
8523  }
8524  // We are at the end.
8525  else
8526  {
8527  if(lastOffset < freeSpace1stTo2ndEnd)
8528  {
8529  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8530  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8531  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8532  }
8533 
8534  // End of loop.
8535  lastOffset = freeSpace1stTo2ndEnd;
8536  }
8537  }
8538 
8539  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8540  {
8541  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8542  while(lastOffset < size)
8543  {
8544  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8545  while(nextAlloc2ndIndex != SIZE_MAX &&
8546  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8547  {
8548  --nextAlloc2ndIndex;
8549  }
8550 
8551  // Found non-null allocation.
8552  if(nextAlloc2ndIndex != SIZE_MAX)
8553  {
8554  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8555 
8556  // 1. Process free space before this allocation.
8557  if(lastOffset < suballoc.offset)
8558  {
8559  // There is free space from lastOffset to suballoc.offset.
8560  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8561  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8562  }
8563 
8564  // 2. Process this allocation.
8565  // There is allocation with suballoc.offset, suballoc.size.
8566  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8567 
8568  // 3. Prepare for next iteration.
8569  lastOffset = suballoc.offset + suballoc.size;
8570  --nextAlloc2ndIndex;
8571  }
8572  // We are at the end.
8573  else
8574  {
8575  if(lastOffset < size)
8576  {
8577  // There is free space from lastOffset to size.
8578  const VkDeviceSize unusedRangeSize = size - lastOffset;
8579  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8580  }
8581 
8582  // End of loop.
8583  lastOffset = size;
8584  }
8585  }
8586  }
8587 
8588  PrintDetailedMap_End(json);
8589 }
8590 #endif // #if VMA_STATS_STRING_ENABLED
8591 
8592 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8593  uint32_t currentFrameIndex,
8594  uint32_t frameInUseCount,
8595  VkDeviceSize bufferImageGranularity,
8596  VkDeviceSize allocSize,
8597  VkDeviceSize allocAlignment,
8598  bool upperAddress,
8599  VmaSuballocationType allocType,
8600  bool canMakeOtherLost,
8601  uint32_t strategy,
8602  VmaAllocationRequest* pAllocationRequest)
8603 {
8604  VMA_ASSERT(allocSize > 0);
8605  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8606  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8607  VMA_HEAVY_ASSERT(Validate());
8608 
8609  const VkDeviceSize size = GetSize();
8610  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8611  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8612 
8613  if(upperAddress)
8614  {
8615  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8616  {
8617  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8618  return false;
8619  }
8620 
8621  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8622  if(allocSize > size)
8623  {
8624  return false;
8625  }
8626  VkDeviceSize resultBaseOffset = size - allocSize;
8627  if(!suballocations2nd.empty())
8628  {
8629  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8630  resultBaseOffset = lastSuballoc.offset - allocSize;
8631  if(allocSize > lastSuballoc.offset)
8632  {
8633  return false;
8634  }
8635  }
8636 
8637  // Start from offset equal to end of free space.
8638  VkDeviceSize resultOffset = resultBaseOffset;
8639 
8640  // Apply VMA_DEBUG_MARGIN at the end.
8641  if(VMA_DEBUG_MARGIN > 0)
8642  {
8643  if(resultOffset < VMA_DEBUG_MARGIN)
8644  {
8645  return false;
8646  }
8647  resultOffset -= VMA_DEBUG_MARGIN;
8648  }
8649 
8650  // Apply alignment.
8651  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8652 
8653  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8654  // Make bigger alignment if necessary.
8655  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8656  {
8657  bool bufferImageGranularityConflict = false;
8658  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8659  {
8660  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8661  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8662  {
8663  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8664  {
8665  bufferImageGranularityConflict = true;
8666  break;
8667  }
8668  }
8669  else
8670  // Already on previous page.
8671  break;
8672  }
8673  if(bufferImageGranularityConflict)
8674  {
8675  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8676  }
8677  }
8678 
8679  // There is enough free space.
8680  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8681  suballocations1st.back().offset + suballocations1st.back().size :
8682  0;
8683  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8684  {
8685  // Check previous suballocations for BufferImageGranularity conflicts.
8686  // If conflict exists, allocation cannot be made here.
8687  if(bufferImageGranularity > 1)
8688  {
8689  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8690  {
8691  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8692  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8693  {
8694  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8695  {
8696  return false;
8697  }
8698  }
8699  else
8700  {
8701  // Already on next page.
8702  break;
8703  }
8704  }
8705  }
8706 
8707  // All tests passed: Success.
8708  pAllocationRequest->offset = resultOffset;
8709  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8710  pAllocationRequest->sumItemSize = 0;
8711  // pAllocationRequest->item unused.
8712  pAllocationRequest->itemsToMakeLostCount = 0;
8713  return true;
8714  }
8715  }
8716  else // !upperAddress
8717  {
8718  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8719  {
8720  // Try to allocate at the end of 1st vector.
8721 
8722  VkDeviceSize resultBaseOffset = 0;
8723  if(!suballocations1st.empty())
8724  {
8725  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8726  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8727  }
8728 
8729  // Start from offset equal to beginning of free space.
8730  VkDeviceSize resultOffset = resultBaseOffset;
8731 
8732  // Apply VMA_DEBUG_MARGIN at the beginning.
8733  if(VMA_DEBUG_MARGIN > 0)
8734  {
8735  resultOffset += VMA_DEBUG_MARGIN;
8736  }
8737 
8738  // Apply alignment.
8739  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8740 
8741  // Check previous suballocations for BufferImageGranularity conflicts.
8742  // Make bigger alignment if necessary.
8743  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8744  {
8745  bool bufferImageGranularityConflict = false;
8746  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8747  {
8748  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8749  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8750  {
8751  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8752  {
8753  bufferImageGranularityConflict = true;
8754  break;
8755  }
8756  }
8757  else
8758  // Already on previous page.
8759  break;
8760  }
8761  if(bufferImageGranularityConflict)
8762  {
8763  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8764  }
8765  }
8766 
8767  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8768  suballocations2nd.back().offset : size;
8769 
8770  // There is enough free space at the end after alignment.
8771  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8772  {
8773  // Check next suballocations for BufferImageGranularity conflicts.
8774  // If conflict exists, allocation cannot be made here.
8775  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8776  {
8777  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8778  {
8779  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8780  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8781  {
8782  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8783  {
8784  return false;
8785  }
8786  }
8787  else
8788  {
8789  // Already on previous page.
8790  break;
8791  }
8792  }
8793  }
8794 
8795  // All tests passed: Success.
8796  pAllocationRequest->offset = resultOffset;
8797  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8798  pAllocationRequest->sumItemSize = 0;
8799  // pAllocationRequest->item unused.
8800  pAllocationRequest->itemsToMakeLostCount = 0;
8801  return true;
8802  }
8803  }
8804 
8805  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8806  // beginning of 1st vector as the end of free space.
8807  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8808  {
8809  VMA_ASSERT(!suballocations1st.empty());
8810 
8811  VkDeviceSize resultBaseOffset = 0;
8812  if(!suballocations2nd.empty())
8813  {
8814  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8815  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8816  }
8817 
8818  // Start from offset equal to beginning of free space.
8819  VkDeviceSize resultOffset = resultBaseOffset;
8820 
8821  // Apply VMA_DEBUG_MARGIN at the beginning.
8822  if(VMA_DEBUG_MARGIN > 0)
8823  {
8824  resultOffset += VMA_DEBUG_MARGIN;
8825  }
8826 
8827  // Apply alignment.
8828  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8829 
8830  // Check previous suballocations for BufferImageGranularity conflicts.
8831  // Make bigger alignment if necessary.
8832  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8833  {
8834  bool bufferImageGranularityConflict = false;
8835  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8836  {
8837  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8838  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8839  {
8840  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8841  {
8842  bufferImageGranularityConflict = true;
8843  break;
8844  }
8845  }
8846  else
8847  // Already on previous page.
8848  break;
8849  }
8850  if(bufferImageGranularityConflict)
8851  {
8852  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8853  }
8854  }
8855 
8856  pAllocationRequest->itemsToMakeLostCount = 0;
8857  pAllocationRequest->sumItemSize = 0;
8858  size_t index1st = m_1stNullItemsBeginCount;
8859 
8860  if(canMakeOtherLost)
8861  {
8862  while(index1st < suballocations1st.size() &&
8863  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8864  {
8865  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8866  const VmaSuballocation& suballoc = suballocations1st[index1st];
8867  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8868  {
8869  // No problem.
8870  }
8871  else
8872  {
8873  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8874  if(suballoc.hAllocation->CanBecomeLost() &&
8875  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8876  {
8877  ++pAllocationRequest->itemsToMakeLostCount;
8878  pAllocationRequest->sumItemSize += suballoc.size;
8879  }
8880  else
8881  {
8882  return false;
8883  }
8884  }
8885  ++index1st;
8886  }
8887 
8888  // Check next suballocations for BufferImageGranularity conflicts.
8889  // If conflict exists, we must mark more allocations lost or fail.
8890  if(bufferImageGranularity > 1)
8891  {
8892  while(index1st < suballocations1st.size())
8893  {
8894  const VmaSuballocation& suballoc = suballocations1st[index1st];
8895  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8896  {
8897  if(suballoc.hAllocation != VK_NULL_HANDLE)
8898  {
8899  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8900  if(suballoc.hAllocation->CanBecomeLost() &&
8901  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8902  {
8903  ++pAllocationRequest->itemsToMakeLostCount;
8904  pAllocationRequest->sumItemSize += suballoc.size;
8905  }
8906  else
8907  {
8908  return false;
8909  }
8910  }
8911  }
8912  else
8913  {
8914  // Already on next page.
8915  break;
8916  }
8917  ++index1st;
8918  }
8919  }
8920  }
8921 
8922  // There is enough free space at the end after alignment.
8923  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8924  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8925  {
8926  // Check next suballocations for BufferImageGranularity conflicts.
8927  // If conflict exists, allocation cannot be made here.
8928  if(bufferImageGranularity > 1)
8929  {
8930  for(size_t nextSuballocIndex = index1st;
8931  nextSuballocIndex < suballocations1st.size();
8932  nextSuballocIndex++)
8933  {
8934  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8935  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8936  {
8937  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8938  {
8939  return false;
8940  }
8941  }
8942  else
8943  {
8944  // Already on next page.
8945  break;
8946  }
8947  }
8948  }
8949 
8950  // All tests passed: Success.
8951  pAllocationRequest->offset = resultOffset;
8952  pAllocationRequest->sumFreeSize =
8953  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8954  - resultBaseOffset
8955  - pAllocationRequest->sumItemSize;
8956  // pAllocationRequest->item unused.
8957  return true;
8958  }
8959  }
8960  }
8961 
8962  return false;
8963 }
8964 
8965 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8966  uint32_t currentFrameIndex,
8967  uint32_t frameInUseCount,
8968  VmaAllocationRequest* pAllocationRequest)
8969 {
8970  if(pAllocationRequest->itemsToMakeLostCount == 0)
8971  {
8972  return true;
8973  }
8974 
8975  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8976 
8977  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8978  size_t index1st = m_1stNullItemsBeginCount;
8979  size_t madeLostCount = 0;
8980  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8981  {
8982  VMA_ASSERT(index1st < suballocations1st.size());
8983  VmaSuballocation& suballoc = suballocations1st[index1st];
8984  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8985  {
8986  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8987  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8988  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8989  {
8990  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8991  suballoc.hAllocation = VK_NULL_HANDLE;
8992  m_SumFreeSize += suballoc.size;
8993  ++m_1stNullItemsMiddleCount;
8994  ++madeLostCount;
8995  }
8996  else
8997  {
8998  return false;
8999  }
9000  }
9001  ++index1st;
9002  }
9003 
9004  CleanupAfterFree();
9005  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9006 
9007  return true;
9008 }
9009 
9010 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9011 {
9012  uint32_t lostAllocationCount = 0;
9013 
9014  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9015  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9016  {
9017  VmaSuballocation& suballoc = suballocations1st[i];
9018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9019  suballoc.hAllocation->CanBecomeLost() &&
9020  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9021  {
9022  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9023  suballoc.hAllocation = VK_NULL_HANDLE;
9024  ++m_1stNullItemsMiddleCount;
9025  m_SumFreeSize += suballoc.size;
9026  ++lostAllocationCount;
9027  }
9028  }
9029 
9030  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9031  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9032  {
9033  VmaSuballocation& suballoc = suballocations2nd[i];
9034  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9035  suballoc.hAllocation->CanBecomeLost() &&
9036  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9037  {
9038  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9039  suballoc.hAllocation = VK_NULL_HANDLE;
9040  ++m_2ndNullItemsCount;
9041  ++lostAllocationCount;
9042  }
9043  }
9044 
9045  if(lostAllocationCount)
9046  {
9047  CleanupAfterFree();
9048  }
9049 
9050  return lostAllocationCount;
9051 }
9052 
9053 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9054 {
9055  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9056  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9057  {
9058  const VmaSuballocation& suballoc = suballocations1st[i];
9059  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9060  {
9061  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9062  {
9063  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9064  return VK_ERROR_VALIDATION_FAILED_EXT;
9065  }
9066  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9067  {
9068  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9069  return VK_ERROR_VALIDATION_FAILED_EXT;
9070  }
9071  }
9072  }
9073 
9074  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9075  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9076  {
9077  const VmaSuballocation& suballoc = suballocations2nd[i];
9078  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9079  {
9080  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9081  {
9082  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9083  return VK_ERROR_VALIDATION_FAILED_EXT;
9084  }
9085  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9086  {
9087  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9088  return VK_ERROR_VALIDATION_FAILED_EXT;
9089  }
9090  }
9091  }
9092 
9093  return VK_SUCCESS;
9094 }
9095 
9096 void VmaBlockMetadata_Linear::Alloc(
9097  const VmaAllocationRequest& request,
9098  VmaSuballocationType type,
9099  VkDeviceSize allocSize,
9100  bool upperAddress,
9101  VmaAllocation hAllocation)
9102 {
9103  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9104 
9105  if(upperAddress)
9106  {
9107  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9108  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9109  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9110  suballocations2nd.push_back(newSuballoc);
9111  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9112  }
9113  else
9114  {
9115  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9116 
9117  // First allocation.
9118  if(suballocations1st.empty())
9119  {
9120  suballocations1st.push_back(newSuballoc);
9121  }
9122  else
9123  {
9124  // New allocation at the end of 1st vector.
9125  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9126  {
9127  // Check if it fits before the end of the block.
9128  VMA_ASSERT(request.offset + allocSize <= GetSize());
9129  suballocations1st.push_back(newSuballoc);
9130  }
9131  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9132  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9133  {
9134  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9135 
9136  switch(m_2ndVectorMode)
9137  {
9138  case SECOND_VECTOR_EMPTY:
9139  // First allocation from second part ring buffer.
9140  VMA_ASSERT(suballocations2nd.empty());
9141  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9142  break;
9143  case SECOND_VECTOR_RING_BUFFER:
9144  // 2-part ring buffer is already started.
9145  VMA_ASSERT(!suballocations2nd.empty());
9146  break;
9147  case SECOND_VECTOR_DOUBLE_STACK:
9148  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9149  break;
9150  default:
9151  VMA_ASSERT(0);
9152  }
9153 
9154  suballocations2nd.push_back(newSuballoc);
9155  }
9156  else
9157  {
9158  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9159  }
9160  }
9161  }
9162 
9163  m_SumFreeSize -= newSuballoc.size;
9164 }
9165 
9166 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9167 {
9168  FreeAtOffset(allocation->GetOffset());
9169 }
9170 
9171 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9172 {
9173  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9174  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9175 
9176  if(!suballocations1st.empty())
9177  {
9178  // First allocation: Mark it as next empty at the beginning.
9179  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9180  if(firstSuballoc.offset == offset)
9181  {
9182  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9183  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9184  m_SumFreeSize += firstSuballoc.size;
9185  ++m_1stNullItemsBeginCount;
9186  CleanupAfterFree();
9187  return;
9188  }
9189  }
9190 
9191  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9192  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9193  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9194  {
9195  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9196  if(lastSuballoc.offset == offset)
9197  {
9198  m_SumFreeSize += lastSuballoc.size;
9199  suballocations2nd.pop_back();
9200  CleanupAfterFree();
9201  return;
9202  }
9203  }
9204  // Last allocation in 1st vector.
9205  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9206  {
9207  VmaSuballocation& lastSuballoc = suballocations1st.back();
9208  if(lastSuballoc.offset == offset)
9209  {
9210  m_SumFreeSize += lastSuballoc.size;
9211  suballocations1st.pop_back();
9212  CleanupAfterFree();
9213  return;
9214  }
9215  }
9216 
9217  // Item from the middle of 1st vector.
9218  {
9219  VmaSuballocation refSuballoc;
9220  refSuballoc.offset = offset;
9221  // Rest of members stays uninitialized intentionally for better performance.
9222  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9223  suballocations1st.begin() + m_1stNullItemsBeginCount,
9224  suballocations1st.end(),
9225  refSuballoc);
9226  if(it != suballocations1st.end())
9227  {
9228  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9229  it->hAllocation = VK_NULL_HANDLE;
9230  ++m_1stNullItemsMiddleCount;
9231  m_SumFreeSize += it->size;
9232  CleanupAfterFree();
9233  return;
9234  }
9235  }
9236 
9237  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9238  {
9239  // Item from the middle of 2nd vector.
9240  VmaSuballocation refSuballoc;
9241  refSuballoc.offset = offset;
9242  // Rest of members stays uninitialized intentionally for better performance.
9243  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9244  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9245  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9246  if(it != suballocations2nd.end())
9247  {
9248  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9249  it->hAllocation = VK_NULL_HANDLE;
9250  ++m_2ndNullItemsCount;
9251  m_SumFreeSize += it->size;
9252  CleanupAfterFree();
9253  return;
9254  }
9255  }
9256 
9257  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9258 }
9259 
9260 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9261 {
9262  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9263  const size_t suballocCount = AccessSuballocations1st().size();
9264  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9265 }
9266 
9267 void VmaBlockMetadata_Linear::CleanupAfterFree()
9268 {
9269  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9270  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9271 
9272  if(IsEmpty())
9273  {
9274  suballocations1st.clear();
9275  suballocations2nd.clear();
9276  m_1stNullItemsBeginCount = 0;
9277  m_1stNullItemsMiddleCount = 0;
9278  m_2ndNullItemsCount = 0;
9279  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9280  }
9281  else
9282  {
9283  const size_t suballoc1stCount = suballocations1st.size();
9284  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9285  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9286 
9287  // Find more null items at the beginning of 1st vector.
9288  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9289  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9290  {
9291  ++m_1stNullItemsBeginCount;
9292  --m_1stNullItemsMiddleCount;
9293  }
9294 
9295  // Find more null items at the end of 1st vector.
9296  while(m_1stNullItemsMiddleCount > 0 &&
9297  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9298  {
9299  --m_1stNullItemsMiddleCount;
9300  suballocations1st.pop_back();
9301  }
9302 
9303  // Find more null items at the end of 2nd vector.
9304  while(m_2ndNullItemsCount > 0 &&
9305  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9306  {
9307  --m_2ndNullItemsCount;
9308  suballocations2nd.pop_back();
9309  }
9310 
9311  if(ShouldCompact1st())
9312  {
9313  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9314  size_t srcIndex = m_1stNullItemsBeginCount;
9315  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9316  {
9317  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9318  {
9319  ++srcIndex;
9320  }
9321  if(dstIndex != srcIndex)
9322  {
9323  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9324  }
9325  ++srcIndex;
9326  }
9327  suballocations1st.resize(nonNullItemCount);
9328  m_1stNullItemsBeginCount = 0;
9329  m_1stNullItemsMiddleCount = 0;
9330  }
9331 
9332  // 2nd vector became empty.
9333  if(suballocations2nd.empty())
9334  {
9335  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9336  }
9337 
9338  // 1st vector became empty.
9339  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9340  {
9341  suballocations1st.clear();
9342  m_1stNullItemsBeginCount = 0;
9343 
9344  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9345  {
9346  // Swap 1st with 2nd. Now 2nd is empty.
9347  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9348  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9349  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9350  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9351  {
9352  ++m_1stNullItemsBeginCount;
9353  --m_1stNullItemsMiddleCount;
9354  }
9355  m_2ndNullItemsCount = 0;
9356  m_1stVectorIndex ^= 1;
9357  }
9358  }
9359  }
9360 
9361  VMA_HEAVY_ASSERT(Validate());
9362 }
9363 
9364 
9366 // class VmaBlockMetadata_Buddy
9367 
9368 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9369  VmaBlockMetadata(hAllocator),
9370  m_Root(VMA_NULL),
9371  m_AllocationCount(0),
9372  m_FreeCount(1),
9373  m_SumFreeSize(0)
9374 {
9375  memset(m_FreeList, 0, sizeof(m_FreeList));
9376 }
9377 
9378 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9379 {
9380  DeleteNode(m_Root);
9381 }
9382 
9383 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9384 {
9385  VmaBlockMetadata::Init(size);
9386 
9387  m_UsableSize = VmaPrevPow2(size);
9388  m_SumFreeSize = m_UsableSize;
9389 
9390  // Calculate m_LevelCount.
9391  m_LevelCount = 1;
9392  while(m_LevelCount < MAX_LEVELS &&
9393  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9394  {
9395  ++m_LevelCount;
9396  }
9397 
9398  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9399  rootNode->offset = 0;
9400  rootNode->type = Node::TYPE_FREE;
9401  rootNode->parent = VMA_NULL;
9402  rootNode->buddy = VMA_NULL;
9403 
9404  m_Root = rootNode;
9405  AddToFreeListFront(0, rootNode);
9406 }
9407 
9408 bool VmaBlockMetadata_Buddy::Validate() const
9409 {
9410  // Validate tree.
9411  ValidationContext ctx;
9412  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9413  {
9414  VMA_VALIDATE(false && "ValidateNode failed.");
9415  }
9416  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9417  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9418 
9419  // Validate free node lists.
9420  for(uint32_t level = 0; level < m_LevelCount; ++level)
9421  {
9422  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9423  m_FreeList[level].front->free.prev == VMA_NULL);
9424 
9425  for(Node* node = m_FreeList[level].front;
9426  node != VMA_NULL;
9427  node = node->free.next)
9428  {
9429  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9430 
9431  if(node->free.next == VMA_NULL)
9432  {
9433  VMA_VALIDATE(m_FreeList[level].back == node);
9434  }
9435  else
9436  {
9437  VMA_VALIDATE(node->free.next->free.prev == node);
9438  }
9439  }
9440  }
9441 
9442  // Validate that free lists ar higher levels are empty.
9443  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9444  {
9445  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9446  }
9447 
9448  return true;
9449 }
9450 
9451 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9452 {
9453  for(uint32_t level = 0; level < m_LevelCount; ++level)
9454  {
9455  if(m_FreeList[level].front != VMA_NULL)
9456  {
9457  return LevelToNodeSize(level);
9458  }
9459  }
9460  return 0;
9461 }
9462 
9463 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9464 {
9465  const VkDeviceSize unusableSize = GetUnusableSize();
9466 
9467  outInfo.blockCount = 1;
9468 
9469  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9470  outInfo.usedBytes = outInfo.unusedBytes = 0;
9471 
9472  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9473  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9474  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9475 
9476  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9477 
9478  if(unusableSize > 0)
9479  {
9480  ++outInfo.unusedRangeCount;
9481  outInfo.unusedBytes += unusableSize;
9482  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9483  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9484  }
9485 }
9486 
9487 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9488 {
9489  const VkDeviceSize unusableSize = GetUnusableSize();
9490 
9491  inoutStats.size += GetSize();
9492  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9493  inoutStats.allocationCount += m_AllocationCount;
9494  inoutStats.unusedRangeCount += m_FreeCount;
9495  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9496 
9497  if(unusableSize > 0)
9498  {
9499  ++inoutStats.unusedRangeCount;
9500  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9501  }
9502 }
9503 
9504 #if VMA_STATS_STRING_ENABLED
9505 
9506 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9507 {
9508  // TODO optimize
9509  VmaStatInfo stat;
9510  CalcAllocationStatInfo(stat);
9511 
9512  PrintDetailedMap_Begin(
9513  json,
9514  stat.unusedBytes,
9515  stat.allocationCount,
9516  stat.unusedRangeCount);
9517 
9518  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9519 
9520  const VkDeviceSize unusableSize = GetUnusableSize();
9521  if(unusableSize > 0)
9522  {
9523  PrintDetailedMap_UnusedRange(json,
9524  m_UsableSize, // offset
9525  unusableSize); // size
9526  }
9527 
9528  PrintDetailedMap_End(json);
9529 }
9530 
9531 #endif // #if VMA_STATS_STRING_ENABLED
9532 
9533 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9534  uint32_t currentFrameIndex,
9535  uint32_t frameInUseCount,
9536  VkDeviceSize bufferImageGranularity,
9537  VkDeviceSize allocSize,
9538  VkDeviceSize allocAlignment,
9539  bool upperAddress,
9540  VmaSuballocationType allocType,
9541  bool canMakeOtherLost,
9542  uint32_t strategy,
9543  VmaAllocationRequest* pAllocationRequest)
9544 {
9545  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9546 
9547  // Simple way to respect bufferImageGranularity. May be optimized some day.
9548  // Whenever it might be an OPTIMAL image...
9549  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9550  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9551  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9552  {
9553  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9554  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9555  }
9556 
9557  if(allocSize > m_UsableSize)
9558  {
9559  return false;
9560  }
9561 
9562  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9563  for(uint32_t level = targetLevel + 1; level--; )
9564  {
9565  for(Node* freeNode = m_FreeList[level].front;
9566  freeNode != VMA_NULL;
9567  freeNode = freeNode->free.next)
9568  {
9569  if(freeNode->offset % allocAlignment == 0)
9570  {
9571  pAllocationRequest->offset = freeNode->offset;
9572  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9573  pAllocationRequest->sumItemSize = 0;
9574  pAllocationRequest->itemsToMakeLostCount = 0;
9575  pAllocationRequest->customData = (void*)(uintptr_t)level;
9576  return true;
9577  }
9578  }
9579  }
9580 
9581  return false;
9582 }
9583 
9584 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9585  uint32_t currentFrameIndex,
9586  uint32_t frameInUseCount,
9587  VmaAllocationRequest* pAllocationRequest)
9588 {
9589  /*
9590  Lost allocations are not supported in buddy allocator at the moment.
9591  Support might be added in the future.
9592  */
9593  return pAllocationRequest->itemsToMakeLostCount == 0;
9594 }
9595 
9596 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9597 {
9598  /*
9599  Lost allocations are not supported in buddy allocator at the moment.
9600  Support might be added in the future.
9601  */
9602  return 0;
9603 }
9604 
9605 void VmaBlockMetadata_Buddy::Alloc(
9606  const VmaAllocationRequest& request,
9607  VmaSuballocationType type,
9608  VkDeviceSize allocSize,
9609  bool upperAddress,
9610  VmaAllocation hAllocation)
9611 {
9612  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9613  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9614 
9615  Node* currNode = m_FreeList[currLevel].front;
9616  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9617  while(currNode->offset != request.offset)
9618  {
9619  currNode = currNode->free.next;
9620  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9621  }
9622 
9623  // Go down, splitting free nodes.
9624  while(currLevel < targetLevel)
9625  {
9626  // currNode is already first free node at currLevel.
9627  // Remove it from list of free nodes at this currLevel.
9628  RemoveFromFreeList(currLevel, currNode);
9629 
9630  const uint32_t childrenLevel = currLevel + 1;
9631 
9632  // Create two free sub-nodes.
9633  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9634  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9635 
9636  leftChild->offset = currNode->offset;
9637  leftChild->type = Node::TYPE_FREE;
9638  leftChild->parent = currNode;
9639  leftChild->buddy = rightChild;
9640 
9641  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9642  rightChild->type = Node::TYPE_FREE;
9643  rightChild->parent = currNode;
9644  rightChild->buddy = leftChild;
9645 
9646  // Convert current currNode to split type.
9647  currNode->type = Node::TYPE_SPLIT;
9648  currNode->split.leftChild = leftChild;
9649 
9650  // Add child nodes to free list. Order is important!
9651  AddToFreeListFront(childrenLevel, rightChild);
9652  AddToFreeListFront(childrenLevel, leftChild);
9653 
9654  ++m_FreeCount;
9655  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9656  ++currLevel;
9657  currNode = m_FreeList[currLevel].front;
9658 
9659  /*
9660  We can be sure that currNode, as left child of node previously split,
9661  also fullfills the alignment requirement.
9662  */
9663  }
9664 
9665  // Remove from free list.
9666  VMA_ASSERT(currLevel == targetLevel &&
9667  currNode != VMA_NULL &&
9668  currNode->type == Node::TYPE_FREE);
9669  RemoveFromFreeList(currLevel, currNode);
9670 
9671  // Convert to allocation node.
9672  currNode->type = Node::TYPE_ALLOCATION;
9673  currNode->allocation.alloc = hAllocation;
9674 
9675  ++m_AllocationCount;
9676  --m_FreeCount;
9677  m_SumFreeSize -= allocSize;
9678 }
9679 
9680 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9681 {
9682  if(node->type == Node::TYPE_SPLIT)
9683  {
9684  DeleteNode(node->split.leftChild->buddy);
9685  DeleteNode(node->split.leftChild);
9686  }
9687 
9688  vma_delete(GetAllocationCallbacks(), node);
9689 }
9690 
9691 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9692 {
9693  VMA_VALIDATE(level < m_LevelCount);
9694  VMA_VALIDATE(curr->parent == parent);
9695  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9696  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9697  switch(curr->type)
9698  {
9699  case Node::TYPE_FREE:
9700  // curr->free.prev, next are validated separately.
9701  ctx.calculatedSumFreeSize += levelNodeSize;
9702  ++ctx.calculatedFreeCount;
9703  break;
9704  case Node::TYPE_ALLOCATION:
9705  ++ctx.calculatedAllocationCount;
9706  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9707  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9708  break;
9709  case Node::TYPE_SPLIT:
9710  {
9711  const uint32_t childrenLevel = level + 1;
9712  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9713  const Node* const leftChild = curr->split.leftChild;
9714  VMA_VALIDATE(leftChild != VMA_NULL);
9715  VMA_VALIDATE(leftChild->offset == curr->offset);
9716  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9717  {
9718  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9719  }
9720  const Node* const rightChild = leftChild->buddy;
9721  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9722  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9723  {
9724  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9725  }
9726  }
9727  break;
9728  default:
9729  return false;
9730  }
9731 
9732  return true;
9733 }
9734 
9735 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9736 {
9737  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9738  uint32_t level = 0;
9739  VkDeviceSize currLevelNodeSize = m_UsableSize;
9740  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9741  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9742  {
9743  ++level;
9744  currLevelNodeSize = nextLevelNodeSize;
9745  nextLevelNodeSize = currLevelNodeSize >> 1;
9746  }
9747  return level;
9748 }
9749 
9750 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9751 {
9752  // Find node and level.
9753  Node* node = m_Root;
9754  VkDeviceSize nodeOffset = 0;
9755  uint32_t level = 0;
9756  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9757  while(node->type == Node::TYPE_SPLIT)
9758  {
9759  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9760  if(offset < nodeOffset + nextLevelSize)
9761  {
9762  node = node->split.leftChild;
9763  }
9764  else
9765  {
9766  node = node->split.leftChild->buddy;
9767  nodeOffset += nextLevelSize;
9768  }
9769  ++level;
9770  levelNodeSize = nextLevelSize;
9771  }
9772 
9773  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9774  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9775 
9776  ++m_FreeCount;
9777  --m_AllocationCount;
9778  m_SumFreeSize += alloc->GetSize();
9779 
9780  node->type = Node::TYPE_FREE;
9781 
9782  // Join free nodes if possible.
9783  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9784  {
9785  RemoveFromFreeList(level, node->buddy);
9786  Node* const parent = node->parent;
9787 
9788  vma_delete(GetAllocationCallbacks(), node->buddy);
9789  vma_delete(GetAllocationCallbacks(), node);
9790  parent->type = Node::TYPE_FREE;
9791 
9792  node = parent;
9793  --level;
9794  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9795  --m_FreeCount;
9796  }
9797 
9798  AddToFreeListFront(level, node);
9799 }
9800 
9801 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9802 {
9803  switch(node->type)
9804  {
9805  case Node::TYPE_FREE:
9806  ++outInfo.unusedRangeCount;
9807  outInfo.unusedBytes += levelNodeSize;
9808  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9809  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9810  break;
9811  case Node::TYPE_ALLOCATION:
9812  {
9813  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9814  ++outInfo.allocationCount;
9815  outInfo.usedBytes += allocSize;
9816  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9817  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9818 
9819  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9820  if(unusedRangeSize > 0)
9821  {
9822  ++outInfo.unusedRangeCount;
9823  outInfo.unusedBytes += unusedRangeSize;
9824  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9825  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9826  }
9827  }
9828  break;
9829  case Node::TYPE_SPLIT:
9830  {
9831  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9832  const Node* const leftChild = node->split.leftChild;
9833  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9834  const Node* const rightChild = leftChild->buddy;
9835  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9836  }
9837  break;
9838  default:
9839  VMA_ASSERT(0);
9840  }
9841 }
9842 
9843 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9844 {
9845  VMA_ASSERT(node->type == Node::TYPE_FREE);
9846 
9847  // List is empty.
9848  Node* const frontNode = m_FreeList[level].front;
9849  if(frontNode == VMA_NULL)
9850  {
9851  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9852  node->free.prev = node->free.next = VMA_NULL;
9853  m_FreeList[level].front = m_FreeList[level].back = node;
9854  }
9855  else
9856  {
9857  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9858  node->free.prev = VMA_NULL;
9859  node->free.next = frontNode;
9860  frontNode->free.prev = node;
9861  m_FreeList[level].front = node;
9862  }
9863 }
9864 
9865 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9866 {
9867  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9868 
9869  // It is at the front.
9870  if(node->free.prev == VMA_NULL)
9871  {
9872  VMA_ASSERT(m_FreeList[level].front == node);
9873  m_FreeList[level].front = node->free.next;
9874  }
9875  else
9876  {
9877  Node* const prevFreeNode = node->free.prev;
9878  VMA_ASSERT(prevFreeNode->free.next == node);
9879  prevFreeNode->free.next = node->free.next;
9880  }
9881 
9882  // It is at the back.
9883  if(node->free.next == VMA_NULL)
9884  {
9885  VMA_ASSERT(m_FreeList[level].back == node);
9886  m_FreeList[level].back = node->free.prev;
9887  }
9888  else
9889  {
9890  Node* const nextFreeNode = node->free.next;
9891  VMA_ASSERT(nextFreeNode->free.prev == node);
9892  nextFreeNode->free.prev = node->free.prev;
9893  }
9894 }
9895 
9896 #if VMA_STATS_STRING_ENABLED
9897 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9898 {
9899  switch(node->type)
9900  {
9901  case Node::TYPE_FREE:
9902  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9903  break;
9904  case Node::TYPE_ALLOCATION:
9905  {
9906  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9907  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9908  if(allocSize < levelNodeSize)
9909  {
9910  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
9911  }
9912  }
9913  break;
9914  case Node::TYPE_SPLIT:
9915  {
9916  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9917  const Node* const leftChild = node->split.leftChild;
9918  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9919  const Node* const rightChild = leftChild->buddy;
9920  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9921  }
9922  break;
9923  default:
9924  VMA_ASSERT(0);
9925  }
9926 }
9927 #endif // #if VMA_STATS_STRING_ENABLED
9928 
9929 
9931 // class VmaDeviceMemoryBlock
9932 
9933 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9934  m_pMetadata(VMA_NULL),
9935  m_MemoryTypeIndex(UINT32_MAX),
9936  m_Id(0),
9937  m_hMemory(VK_NULL_HANDLE),
9938  m_MapCount(0),
9939  m_pMappedData(VMA_NULL)
9940 {
9941 }
9942 
9943 void VmaDeviceMemoryBlock::Init(
9944  VmaAllocator hAllocator,
9945  uint32_t newMemoryTypeIndex,
9946  VkDeviceMemory newMemory,
9947  VkDeviceSize newSize,
9948  uint32_t id,
9949  uint32_t algorithm)
9950 {
9951  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9952 
9953  m_MemoryTypeIndex = newMemoryTypeIndex;
9954  m_Id = id;
9955  m_hMemory = newMemory;
9956 
9957  switch(algorithm)
9958  {
9960  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9961  break;
9963  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
9964  break;
9965  default:
9966  VMA_ASSERT(0);
9967  // Fall-through.
9968  case 0:
9969  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9970  }
9971  m_pMetadata->Init(newSize);
9972 }
9973 
9974 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9975 {
9976  // This is the most important assert in the entire library.
9977  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9978  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9979 
9980  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9981  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9982  m_hMemory = VK_NULL_HANDLE;
9983 
9984  vma_delete(allocator, m_pMetadata);
9985  m_pMetadata = VMA_NULL;
9986 }
9987 
9988 bool VmaDeviceMemoryBlock::Validate() const
9989 {
9990  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
9991  (m_pMetadata->GetSize() != 0));
9992 
9993  return m_pMetadata->Validate();
9994 }
9995 
9996 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9997 {
9998  void* pData = nullptr;
9999  VkResult res = Map(hAllocator, 1, &pData);
10000  if(res != VK_SUCCESS)
10001  {
10002  return res;
10003  }
10004 
10005  res = m_pMetadata->CheckCorruption(pData);
10006 
10007  Unmap(hAllocator, 1);
10008 
10009  return res;
10010 }
10011 
10012 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10013 {
10014  if(count == 0)
10015  {
10016  return VK_SUCCESS;
10017  }
10018 
10019  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10020  if(m_MapCount != 0)
10021  {
10022  m_MapCount += count;
10023  VMA_ASSERT(m_pMappedData != VMA_NULL);
10024  if(ppData != VMA_NULL)
10025  {
10026  *ppData = m_pMappedData;
10027  }
10028  return VK_SUCCESS;
10029  }
10030  else
10031  {
10032  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10033  hAllocator->m_hDevice,
10034  m_hMemory,
10035  0, // offset
10036  VK_WHOLE_SIZE,
10037  0, // flags
10038  &m_pMappedData);
10039  if(result == VK_SUCCESS)
10040  {
10041  if(ppData != VMA_NULL)
10042  {
10043  *ppData = m_pMappedData;
10044  }
10045  m_MapCount = count;
10046  }
10047  return result;
10048  }
10049 }
10050 
10051 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10052 {
10053  if(count == 0)
10054  {
10055  return;
10056  }
10057 
10058  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10059  if(m_MapCount >= count)
10060  {
10061  m_MapCount -= count;
10062  if(m_MapCount == 0)
10063  {
10064  m_pMappedData = VMA_NULL;
10065  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10066  }
10067  }
10068  else
10069  {
10070  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10071  }
10072 }
10073 
10074 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10075 {
10076  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10077  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10078 
10079  void* pData;
10080  VkResult res = Map(hAllocator, 1, &pData);
10081  if(res != VK_SUCCESS)
10082  {
10083  return res;
10084  }
10085 
10086  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10087  VmaWriteMagicValue(pData, allocOffset + allocSize);
10088 
10089  Unmap(hAllocator, 1);
10090 
10091  return VK_SUCCESS;
10092 }
10093 
10094 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10095 {
10096  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10097  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10098 
10099  void* pData;
10100  VkResult res = Map(hAllocator, 1, &pData);
10101  if(res != VK_SUCCESS)
10102  {
10103  return res;
10104  }
10105 
10106  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10107  {
10108  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10109  }
10110  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10111  {
10112  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10113  }
10114 
10115  Unmap(hAllocator, 1);
10116 
10117  return VK_SUCCESS;
10118 }
10119 
10120 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10121  const VmaAllocator hAllocator,
10122  const VmaAllocation hAllocation,
10123  VkBuffer hBuffer)
10124 {
10125  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10126  hAllocation->GetBlock() == this);
10127  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10128  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10129  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10130  hAllocator->m_hDevice,
10131  hBuffer,
10132  m_hMemory,
10133  hAllocation->GetOffset());
10134 }
10135 
10136 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10137  const VmaAllocator hAllocator,
10138  const VmaAllocation hAllocation,
10139  VkImage hImage)
10140 {
10141  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10142  hAllocation->GetBlock() == this);
10143  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10144  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10145  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10146  hAllocator->m_hDevice,
10147  hImage,
10148  m_hMemory,
10149  hAllocation->GetOffset());
10150 }
10151 
10152 static void InitStatInfo(VmaStatInfo& outInfo)
10153 {
10154  memset(&outInfo, 0, sizeof(outInfo));
10155  outInfo.allocationSizeMin = UINT64_MAX;
10156  outInfo.unusedRangeSizeMin = UINT64_MAX;
10157 }
10158 
10159 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10160 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10161 {
10162  inoutInfo.blockCount += srcInfo.blockCount;
10163  inoutInfo.allocationCount += srcInfo.allocationCount;
10164  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10165  inoutInfo.usedBytes += srcInfo.usedBytes;
10166  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10167  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10168  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10169  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10170  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10171 }
10172 
10173 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10174 {
10175  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10176  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10177  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10178  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10179 }
10180 
10181 VmaPool_T::VmaPool_T(
10182  VmaAllocator hAllocator,
10183  const VmaPoolCreateInfo& createInfo,
10184  VkDeviceSize preferredBlockSize) :
10185  m_BlockVector(
10186  hAllocator,
10187  createInfo.memoryTypeIndex,
10188  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10189  createInfo.minBlockCount,
10190  createInfo.maxBlockCount,
10191  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10192  createInfo.frameInUseCount,
10193  true, // isCustomPool
10194  createInfo.blockSize != 0, // explicitBlockSize
10195  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10196  m_Id(0)
10197 {
10198 }
10199 
10200 VmaPool_T::~VmaPool_T()
10201 {
10202 }
10203 
10204 #if VMA_STATS_STRING_ENABLED
10205 
10206 #endif // #if VMA_STATS_STRING_ENABLED
10207 
10208 VmaBlockVector::VmaBlockVector(
10209  VmaAllocator hAllocator,
10210  uint32_t memoryTypeIndex,
10211  VkDeviceSize preferredBlockSize,
10212  size_t minBlockCount,
10213  size_t maxBlockCount,
10214  VkDeviceSize bufferImageGranularity,
10215  uint32_t frameInUseCount,
10216  bool isCustomPool,
10217  bool explicitBlockSize,
10218  uint32_t algorithm) :
10219  m_hAllocator(hAllocator),
10220  m_MemoryTypeIndex(memoryTypeIndex),
10221  m_PreferredBlockSize(preferredBlockSize),
10222  m_MinBlockCount(minBlockCount),
10223  m_MaxBlockCount(maxBlockCount),
10224  m_BufferImageGranularity(bufferImageGranularity),
10225  m_FrameInUseCount(frameInUseCount),
10226  m_IsCustomPool(isCustomPool),
10227  m_ExplicitBlockSize(explicitBlockSize),
10228  m_Algorithm(algorithm),
10229  m_HasEmptyBlock(false),
10230  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10231  m_pDefragmentator(VMA_NULL),
10232  m_NextBlockId(0)
10233 {
10234 }
10235 
10236 VmaBlockVector::~VmaBlockVector()
10237 {
10238  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10239 
10240  for(size_t i = m_Blocks.size(); i--; )
10241  {
10242  m_Blocks[i]->Destroy(m_hAllocator);
10243  vma_delete(m_hAllocator, m_Blocks[i]);
10244  }
10245 }
10246 
10247 VkResult VmaBlockVector::CreateMinBlocks()
10248 {
10249  for(size_t i = 0; i < m_MinBlockCount; ++i)
10250  {
10251  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10252  if(res != VK_SUCCESS)
10253  {
10254  return res;
10255  }
10256  }
10257  return VK_SUCCESS;
10258 }
10259 
10260 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10261 {
10262  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10263 
10264  const size_t blockCount = m_Blocks.size();
10265 
10266  pStats->size = 0;
10267  pStats->unusedSize = 0;
10268  pStats->allocationCount = 0;
10269  pStats->unusedRangeCount = 0;
10270  pStats->unusedRangeSizeMax = 0;
10271  pStats->blockCount = blockCount;
10272 
10273  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10274  {
10275  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10276  VMA_ASSERT(pBlock);
10277  VMA_HEAVY_ASSERT(pBlock->Validate());
10278  pBlock->m_pMetadata->AddPoolStats(*pStats);
10279  }
10280 }
10281 
10282 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10283 {
10284  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10285  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10286  (VMA_DEBUG_MARGIN > 0) &&
10287  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10288 }
10289 
10290 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10291 
10292 VkResult VmaBlockVector::Allocate(
10293  VmaPool hCurrentPool,
10294  uint32_t currentFrameIndex,
10295  VkDeviceSize size,
10296  VkDeviceSize alignment,
10297  const VmaAllocationCreateInfo& createInfo,
10298  VmaSuballocationType suballocType,
10299  size_t allocationCount,
10300  VmaAllocation* pAllocations)
10301 {
10302  size_t allocIndex;
10303  VkResult res = VK_SUCCESS;
10304 
10305  {
10306  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10307  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
10308  {
10309  res = AllocatePage(
10310  hCurrentPool,
10311  currentFrameIndex,
10312  size,
10313  alignment,
10314  createInfo,
10315  suballocType,
10316  pAllocations + allocIndex);
10317  if(res != VK_SUCCESS)
10318  {
10319  break;
10320  }
10321  }
10322  }
10323 
10324  if(res != VK_SUCCESS)
10325  {
10326  // Free all already created allocations.
10327  while(allocIndex--)
10328  {
10329  Free(pAllocations[allocIndex]);
10330  }
10331  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
10332  }
10333 
10334  return res;
10335 }
10336 
10337 VkResult VmaBlockVector::AllocatePage(
10338  VmaPool hCurrentPool,
10339  uint32_t currentFrameIndex,
10340  VkDeviceSize size,
10341  VkDeviceSize alignment,
10342  const VmaAllocationCreateInfo& createInfo,
10343  VmaSuballocationType suballocType,
10344  VmaAllocation* pAllocation)
10345 {
10346  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10347  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10348  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10349  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10350  const bool canCreateNewBlock =
10351  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10352  (m_Blocks.size() < m_MaxBlockCount);
10353  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10354 
10355  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10356  // Which in turn is available only when maxBlockCount = 1.
10357  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10358  {
10359  canMakeOtherLost = false;
10360  }
10361 
10362  // Upper address can only be used with linear allocator and within single memory block.
10363  if(isUpperAddress &&
10364  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10365  {
10366  return VK_ERROR_FEATURE_NOT_PRESENT;
10367  }
10368 
10369  // Validate strategy.
10370  switch(strategy)
10371  {
10372  case 0:
10374  break;
10378  break;
10379  default:
10380  return VK_ERROR_FEATURE_NOT_PRESENT;
10381  }
10382 
10383  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10384  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10385  {
10386  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10387  }
10388 
10389  /*
10390  Under certain condition, this whole section can be skipped for optimization, so
10391  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10392  e.g. for custom pools with linear algorithm.
10393  */
10394  if(!canMakeOtherLost || canCreateNewBlock)
10395  {
10396  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10397  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10399 
10400  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10401  {
10402  // Use only last block.
10403  if(!m_Blocks.empty())
10404  {
10405  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10406  VMA_ASSERT(pCurrBlock);
10407  VkResult res = AllocateFromBlock(
10408  pCurrBlock,
10409  hCurrentPool,
10410  currentFrameIndex,
10411  size,
10412  alignment,
10413  allocFlagsCopy,
10414  createInfo.pUserData,
10415  suballocType,
10416  strategy,
10417  pAllocation);
10418  if(res == VK_SUCCESS)
10419  {
10420  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10421  return VK_SUCCESS;
10422  }
10423  }
10424  }
10425  else
10426  {
10428  {
10429  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10430  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10431  {
10432  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10433  VMA_ASSERT(pCurrBlock);
10434  VkResult res = AllocateFromBlock(
10435  pCurrBlock,
10436  hCurrentPool,
10437  currentFrameIndex,
10438  size,
10439  alignment,
10440  allocFlagsCopy,
10441  createInfo.pUserData,
10442  suballocType,
10443  strategy,
10444  pAllocation);
10445  if(res == VK_SUCCESS)
10446  {
10447  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10448  return VK_SUCCESS;
10449  }
10450  }
10451  }
10452  else // WORST_FIT, FIRST_FIT
10453  {
10454  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10455  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10456  {
10457  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10458  VMA_ASSERT(pCurrBlock);
10459  VkResult res = AllocateFromBlock(
10460  pCurrBlock,
10461  hCurrentPool,
10462  currentFrameIndex,
10463  size,
10464  alignment,
10465  allocFlagsCopy,
10466  createInfo.pUserData,
10467  suballocType,
10468  strategy,
10469  pAllocation);
10470  if(res == VK_SUCCESS)
10471  {
10472  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10473  return VK_SUCCESS;
10474  }
10475  }
10476  }
10477  }
10478 
10479  // 2. Try to create new block.
10480  if(canCreateNewBlock)
10481  {
10482  // Calculate optimal size for new block.
10483  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10484  uint32_t newBlockSizeShift = 0;
10485  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10486 
10487  if(!m_ExplicitBlockSize)
10488  {
10489  // Allocate 1/8, 1/4, 1/2 as first blocks.
10490  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10491  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10492  {
10493  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10494  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10495  {
10496  newBlockSize = smallerNewBlockSize;
10497  ++newBlockSizeShift;
10498  }
10499  else
10500  {
10501  break;
10502  }
10503  }
10504  }
10505 
10506  size_t newBlockIndex = 0;
10507  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10508  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10509  if(!m_ExplicitBlockSize)
10510  {
10511  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10512  {
10513  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10514  if(smallerNewBlockSize >= size)
10515  {
10516  newBlockSize = smallerNewBlockSize;
10517  ++newBlockSizeShift;
10518  res = CreateBlock(newBlockSize, &newBlockIndex);
10519  }
10520  else
10521  {
10522  break;
10523  }
10524  }
10525  }
10526 
10527  if(res == VK_SUCCESS)
10528  {
10529  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10530  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10531 
10532  res = AllocateFromBlock(
10533  pBlock,
10534  hCurrentPool,
10535  currentFrameIndex,
10536  size,
10537  alignment,
10538  allocFlagsCopy,
10539  createInfo.pUserData,
10540  suballocType,
10541  strategy,
10542  pAllocation);
10543  if(res == VK_SUCCESS)
10544  {
10545  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10546  return VK_SUCCESS;
10547  }
10548  else
10549  {
10550  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10551  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10552  }
10553  }
10554  }
10555  }
10556 
10557  // 3. Try to allocate from existing blocks with making other allocations lost.
10558  if(canMakeOtherLost)
10559  {
10560  uint32_t tryIndex = 0;
10561  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10562  {
10563  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10564  VmaAllocationRequest bestRequest = {};
10565  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10566 
10567  // 1. Search existing allocations.
10569  {
10570  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10571  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10572  {
10573  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10574  VMA_ASSERT(pCurrBlock);
10575  VmaAllocationRequest currRequest = {};
10576  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10577  currentFrameIndex,
10578  m_FrameInUseCount,
10579  m_BufferImageGranularity,
10580  size,
10581  alignment,
10582  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10583  suballocType,
10584  canMakeOtherLost,
10585  strategy,
10586  &currRequest))
10587  {
10588  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10589  if(pBestRequestBlock == VMA_NULL ||
10590  currRequestCost < bestRequestCost)
10591  {
10592  pBestRequestBlock = pCurrBlock;
10593  bestRequest = currRequest;
10594  bestRequestCost = currRequestCost;
10595 
10596  if(bestRequestCost == 0)
10597  {
10598  break;
10599  }
10600  }
10601  }
10602  }
10603  }
10604  else // WORST_FIT, FIRST_FIT
10605  {
10606  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10607  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10608  {
10609  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10610  VMA_ASSERT(pCurrBlock);
10611  VmaAllocationRequest currRequest = {};
10612  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10613  currentFrameIndex,
10614  m_FrameInUseCount,
10615  m_BufferImageGranularity,
10616  size,
10617  alignment,
10618  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10619  suballocType,
10620  canMakeOtherLost,
10621  strategy,
10622  &currRequest))
10623  {
10624  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10625  if(pBestRequestBlock == VMA_NULL ||
10626  currRequestCost < bestRequestCost ||
10628  {
10629  pBestRequestBlock = pCurrBlock;
10630  bestRequest = currRequest;
10631  bestRequestCost = currRequestCost;
10632 
10633  if(bestRequestCost == 0 ||
10635  {
10636  break;
10637  }
10638  }
10639  }
10640  }
10641  }
10642 
10643  if(pBestRequestBlock != VMA_NULL)
10644  {
10645  if(mapped)
10646  {
10647  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10648  if(res != VK_SUCCESS)
10649  {
10650  return res;
10651  }
10652  }
10653 
10654  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10655  currentFrameIndex,
10656  m_FrameInUseCount,
10657  &bestRequest))
10658  {
10659  // We no longer have an empty Allocation.
10660  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10661  {
10662  m_HasEmptyBlock = false;
10663  }
10664  // Allocate from this pBlock.
10665  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10666  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10667  (*pAllocation)->InitBlockAllocation(
10668  hCurrentPool,
10669  pBestRequestBlock,
10670  bestRequest.offset,
10671  alignment,
10672  size,
10673  suballocType,
10674  mapped,
10675  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10676  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10677  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10678  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10679  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10680  {
10681  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10682  }
10683  if(IsCorruptionDetectionEnabled())
10684  {
10685  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10686  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10687  }
10688  return VK_SUCCESS;
10689  }
10690  // else: Some allocations must have been touched while we are here. Next try.
10691  }
10692  else
10693  {
10694  // Could not find place in any of the blocks - break outer loop.
10695  break;
10696  }
10697  }
10698  /* Maximum number of tries exceeded - a very unlike event when many other
10699  threads are simultaneously touching allocations making it impossible to make
10700  lost at the same time as we try to allocate. */
10701  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10702  {
10703  return VK_ERROR_TOO_MANY_OBJECTS;
10704  }
10705  }
10706 
10707  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10708 }
10709 
10710 void VmaBlockVector::Free(
10711  VmaAllocation hAllocation)
10712 {
10713  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10714 
10715  // Scope for lock.
10716  {
10717  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10718 
10719  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10720 
10721  if(IsCorruptionDetectionEnabled())
10722  {
10723  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10724  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10725  }
10726 
10727  if(hAllocation->IsPersistentMap())
10728  {
10729  pBlock->Unmap(m_hAllocator, 1);
10730  }
10731 
10732  pBlock->m_pMetadata->Free(hAllocation);
10733  VMA_HEAVY_ASSERT(pBlock->Validate());
10734 
10735  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10736 
10737  // pBlock became empty after this deallocation.
10738  if(pBlock->m_pMetadata->IsEmpty())
10739  {
10740  // Already has empty Allocation. We don't want to have two, so delete this one.
10741  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10742  {
10743  pBlockToDelete = pBlock;
10744  Remove(pBlock);
10745  }
10746  // We now have first empty block.
10747  else
10748  {
10749  m_HasEmptyBlock = true;
10750  }
10751  }
10752  // pBlock didn't become empty, but we have another empty block - find and free that one.
10753  // (This is optional, heuristics.)
10754  else if(m_HasEmptyBlock)
10755  {
10756  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10757  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10758  {
10759  pBlockToDelete = pLastBlock;
10760  m_Blocks.pop_back();
10761  m_HasEmptyBlock = false;
10762  }
10763  }
10764 
10765  IncrementallySortBlocks();
10766  }
10767 
10768  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10769  // lock, for performance reason.
10770  if(pBlockToDelete != VMA_NULL)
10771  {
10772  VMA_DEBUG_LOG(" Deleted empty allocation");
10773  pBlockToDelete->Destroy(m_hAllocator);
10774  vma_delete(m_hAllocator, pBlockToDelete);
10775  }
10776 }
10777 
10778 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10779 {
10780  VkDeviceSize result = 0;
10781  for(size_t i = m_Blocks.size(); i--; )
10782  {
10783  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10784  if(result >= m_PreferredBlockSize)
10785  {
10786  break;
10787  }
10788  }
10789  return result;
10790 }
10791 
10792 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10793 {
10794  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10795  {
10796  if(m_Blocks[blockIndex] == pBlock)
10797  {
10798  VmaVectorRemove(m_Blocks, blockIndex);
10799  return;
10800  }
10801  }
10802  VMA_ASSERT(0);
10803 }
10804 
10805 void VmaBlockVector::IncrementallySortBlocks()
10806 {
10807  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10808  {
10809  // Bubble sort only until first swap.
10810  for(size_t i = 1; i < m_Blocks.size(); ++i)
10811  {
10812  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10813  {
10814  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10815  return;
10816  }
10817  }
10818  }
10819 }
10820 
10821 VkResult VmaBlockVector::AllocateFromBlock(
10822  VmaDeviceMemoryBlock* pBlock,
10823  VmaPool hCurrentPool,
10824  uint32_t currentFrameIndex,
10825  VkDeviceSize size,
10826  VkDeviceSize alignment,
10827  VmaAllocationCreateFlags allocFlags,
10828  void* pUserData,
10829  VmaSuballocationType suballocType,
10830  uint32_t strategy,
10831  VmaAllocation* pAllocation)
10832 {
10833  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10834  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10835  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10836  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10837 
10838  VmaAllocationRequest currRequest = {};
10839  if(pBlock->m_pMetadata->CreateAllocationRequest(
10840  currentFrameIndex,
10841  m_FrameInUseCount,
10842  m_BufferImageGranularity,
10843  size,
10844  alignment,
10845  isUpperAddress,
10846  suballocType,
10847  false, // canMakeOtherLost
10848  strategy,
10849  &currRequest))
10850  {
10851  // Allocate from pCurrBlock.
10852  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10853 
10854  if(mapped)
10855  {
10856  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10857  if(res != VK_SUCCESS)
10858  {
10859  return res;
10860  }
10861  }
10862 
10863  // We no longer have an empty Allocation.
10864  if(pBlock->m_pMetadata->IsEmpty())
10865  {
10866  m_HasEmptyBlock = false;
10867  }
10868 
10869  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10870  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10871  (*pAllocation)->InitBlockAllocation(
10872  hCurrentPool,
10873  pBlock,
10874  currRequest.offset,
10875  alignment,
10876  size,
10877  suballocType,
10878  mapped,
10879  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10880  VMA_HEAVY_ASSERT(pBlock->Validate());
10881  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10882  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10883  {
10884  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10885  }
10886  if(IsCorruptionDetectionEnabled())
10887  {
10888  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10889  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10890  }
10891  return VK_SUCCESS;
10892  }
10893  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10894 }
10895 
10896 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10897 {
10898  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10899  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10900  allocInfo.allocationSize = blockSize;
10901  VkDeviceMemory mem = VK_NULL_HANDLE;
10902  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10903  if(res < 0)
10904  {
10905  return res;
10906  }
10907 
10908  // New VkDeviceMemory successfully created.
10909 
10910  // Create new Allocation for it.
10911  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10912  pBlock->Init(
10913  m_hAllocator,
10914  m_MemoryTypeIndex,
10915  mem,
10916  allocInfo.allocationSize,
10917  m_NextBlockId++,
10918  m_Algorithm);
10919 
10920  m_Blocks.push_back(pBlock);
10921  if(pNewBlockIndex != VMA_NULL)
10922  {
10923  *pNewBlockIndex = m_Blocks.size() - 1;
10924  }
10925 
10926  return VK_SUCCESS;
10927 }
10928 
10929 #if VMA_STATS_STRING_ENABLED
10930 
10931 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10932 {
10933  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10934 
10935  json.BeginObject();
10936 
10937  if(m_IsCustomPool)
10938  {
10939  json.WriteString("MemoryTypeIndex");
10940  json.WriteNumber(m_MemoryTypeIndex);
10941 
10942  json.WriteString("BlockSize");
10943  json.WriteNumber(m_PreferredBlockSize);
10944 
10945  json.WriteString("BlockCount");
10946  json.BeginObject(true);
10947  if(m_MinBlockCount > 0)
10948  {
10949  json.WriteString("Min");
10950  json.WriteNumber((uint64_t)m_MinBlockCount);
10951  }
10952  if(m_MaxBlockCount < SIZE_MAX)
10953  {
10954  json.WriteString("Max");
10955  json.WriteNumber((uint64_t)m_MaxBlockCount);
10956  }
10957  json.WriteString("Cur");
10958  json.WriteNumber((uint64_t)m_Blocks.size());
10959  json.EndObject();
10960 
10961  if(m_FrameInUseCount > 0)
10962  {
10963  json.WriteString("FrameInUseCount");
10964  json.WriteNumber(m_FrameInUseCount);
10965  }
10966 
10967  if(m_Algorithm != 0)
10968  {
10969  json.WriteString("Algorithm");
10970  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10971  }
10972  }
10973  else
10974  {
10975  json.WriteString("PreferredBlockSize");
10976  json.WriteNumber(m_PreferredBlockSize);
10977  }
10978 
10979  json.WriteString("Blocks");
10980  json.BeginObject();
10981  for(size_t i = 0; i < m_Blocks.size(); ++i)
10982  {
10983  json.BeginString();
10984  json.ContinueString(m_Blocks[i]->GetId());
10985  json.EndString();
10986 
10987  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10988  }
10989  json.EndObject();
10990 
10991  json.EndObject();
10992 }
10993 
10994 #endif // #if VMA_STATS_STRING_ENABLED
10995 
10996 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10997  VmaAllocator hAllocator,
10998  uint32_t currentFrameIndex)
10999 {
11000  if(m_pDefragmentator == VMA_NULL)
11001  {
11002  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
11003  hAllocator,
11004  this,
11005  currentFrameIndex);
11006  }
11007 
11008  return m_pDefragmentator;
11009 }
11010 
11011 VkResult VmaBlockVector::Defragment(
11012  VmaDefragmentationStats* pDefragmentationStats,
11013  VkDeviceSize& maxBytesToMove,
11014  uint32_t& maxAllocationsToMove)
11015 {
11016  if(m_pDefragmentator == VMA_NULL)
11017  {
11018  return VK_SUCCESS;
11019  }
11020 
11021  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11022 
11023  // Defragment.
11024  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
11025 
11026  // Accumulate statistics.
11027  if(pDefragmentationStats != VMA_NULL)
11028  {
11029  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
11030  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
11031  pDefragmentationStats->bytesMoved += bytesMoved;
11032  pDefragmentationStats->allocationsMoved += allocationsMoved;
11033  VMA_ASSERT(bytesMoved <= maxBytesToMove);
11034  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
11035  maxBytesToMove -= bytesMoved;
11036  maxAllocationsToMove -= allocationsMoved;
11037  }
11038 
11039  // Free empty blocks.
11040  m_HasEmptyBlock = false;
11041  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11042  {
11043  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11044  if(pBlock->m_pMetadata->IsEmpty())
11045  {
11046  if(m_Blocks.size() > m_MinBlockCount)
11047  {
11048  if(pDefragmentationStats != VMA_NULL)
11049  {
11050  ++pDefragmentationStats->deviceMemoryBlocksFreed;
11051  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
11052  }
11053 
11054  VmaVectorRemove(m_Blocks, blockIndex);
11055  pBlock->Destroy(m_hAllocator);
11056  vma_delete(m_hAllocator, pBlock);
11057  }
11058  else
11059  {
11060  m_HasEmptyBlock = true;
11061  }
11062  }
11063  }
11064 
11065  return result;
11066 }
11067 
11068 void VmaBlockVector::DestroyDefragmentator()
11069 {
11070  if(m_pDefragmentator != VMA_NULL)
11071  {
11072  vma_delete(m_hAllocator, m_pDefragmentator);
11073  m_pDefragmentator = VMA_NULL;
11074  }
11075 }
11076 
11077 void VmaBlockVector::MakePoolAllocationsLost(
11078  uint32_t currentFrameIndex,
11079  size_t* pLostAllocationCount)
11080 {
11081  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11082  size_t lostAllocationCount = 0;
11083  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11084  {
11085  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11086  VMA_ASSERT(pBlock);
11087  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
11088  }
11089  if(pLostAllocationCount != VMA_NULL)
11090  {
11091  *pLostAllocationCount = lostAllocationCount;
11092  }
11093 }
11094 
11095 VkResult VmaBlockVector::CheckCorruption()
11096 {
11097  if(!IsCorruptionDetectionEnabled())
11098  {
11099  return VK_ERROR_FEATURE_NOT_PRESENT;
11100  }
11101 
11102  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11103  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11104  {
11105  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11106  VMA_ASSERT(pBlock);
11107  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11108  if(res != VK_SUCCESS)
11109  {
11110  return res;
11111  }
11112  }
11113  return VK_SUCCESS;
11114 }
11115 
11116 void VmaBlockVector::AddStats(VmaStats* pStats)
11117 {
11118  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11119  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11120 
11121  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11122 
11123  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11124  {
11125  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11126  VMA_ASSERT(pBlock);
11127  VMA_HEAVY_ASSERT(pBlock->Validate());
11128  VmaStatInfo allocationStatInfo;
11129  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11130  VmaAddStatInfo(pStats->total, allocationStatInfo);
11131  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11132  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11133  }
11134 }
11135 
11137 // VmaDefragmentator members definition
11138 
11139 VmaDefragmentator::VmaDefragmentator(
11140  VmaAllocator hAllocator,
11141  VmaBlockVector* pBlockVector,
11142  uint32_t currentFrameIndex) :
11143  m_hAllocator(hAllocator),
11144  m_pBlockVector(pBlockVector),
11145  m_CurrentFrameIndex(currentFrameIndex),
11146  m_BytesMoved(0),
11147  m_AllocationsMoved(0),
11148  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11149  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11150 {
11151  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11152 }
11153 
11154 VmaDefragmentator::~VmaDefragmentator()
11155 {
11156  for(size_t i = m_Blocks.size(); i--; )
11157  {
11158  vma_delete(m_hAllocator, m_Blocks[i]);
11159  }
11160 }
11161 
11162 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11163 {
11164  AllocationInfo allocInfo;
11165  allocInfo.m_hAllocation = hAlloc;
11166  allocInfo.m_pChanged = pChanged;
11167  m_Allocations.push_back(allocInfo);
11168 }
11169 
11170 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11171 {
11172  // It has already been mapped for defragmentation.
11173  if(m_pMappedDataForDefragmentation)
11174  {
11175  *ppMappedData = m_pMappedDataForDefragmentation;
11176  return VK_SUCCESS;
11177  }
11178 
11179  // It is originally mapped.
11180  if(m_pBlock->GetMappedData())
11181  {
11182  *ppMappedData = m_pBlock->GetMappedData();
11183  return VK_SUCCESS;
11184  }
11185 
11186  // Map on first usage.
11187  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11188  *ppMappedData = m_pMappedDataForDefragmentation;
11189  return res;
11190 }
11191 
11192 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11193 {
11194  if(m_pMappedDataForDefragmentation != VMA_NULL)
11195  {
11196  m_pBlock->Unmap(hAllocator, 1);
11197  }
11198 }
11199 
11200 VkResult VmaDefragmentator::DefragmentRound(
11201  VkDeviceSize maxBytesToMove,
11202  uint32_t maxAllocationsToMove)
11203 {
11204  if(m_Blocks.empty())
11205  {
11206  return VK_SUCCESS;
11207  }
11208 
11209  size_t srcBlockIndex = m_Blocks.size() - 1;
11210  size_t srcAllocIndex = SIZE_MAX;
11211  for(;;)
11212  {
11213  // 1. Find next allocation to move.
11214  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11215  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11216  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11217  {
11218  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11219  {
11220  // Finished: no more allocations to process.
11221  if(srcBlockIndex == 0)
11222  {
11223  return VK_SUCCESS;
11224  }
11225  else
11226  {
11227  --srcBlockIndex;
11228  srcAllocIndex = SIZE_MAX;
11229  }
11230  }
11231  else
11232  {
11233  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11234  }
11235  }
11236 
11237  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11238  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11239 
11240  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11241  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11242  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11243  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11244 
11245  // 2. Try to find new place for this allocation in preceding or current block.
11246  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11247  {
11248  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11249  VmaAllocationRequest dstAllocRequest;
11250  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11251  m_CurrentFrameIndex,
11252  m_pBlockVector->GetFrameInUseCount(),
11253  m_pBlockVector->GetBufferImageGranularity(),
11254  size,
11255  alignment,
11256  false, // upperAddress
11257  suballocType,
11258  false, // canMakeOtherLost
11260  &dstAllocRequest) &&
11261  MoveMakesSense(
11262  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11263  {
11264  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11265 
11266  // Reached limit on number of allocations or bytes to move.
11267  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11268  (m_BytesMoved + size > maxBytesToMove))
11269  {
11270  return VK_INCOMPLETE;
11271  }
11272 
11273  void* pDstMappedData = VMA_NULL;
11274  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11275  if(res != VK_SUCCESS)
11276  {
11277  return res;
11278  }
11279 
11280  void* pSrcMappedData = VMA_NULL;
11281  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11282  if(res != VK_SUCCESS)
11283  {
11284  return res;
11285  }
11286 
11287  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11288  memcpy(
11289  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11290  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11291  static_cast<size_t>(size));
11292 
11293  if(VMA_DEBUG_MARGIN > 0)
11294  {
11295  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11296  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11297  }
11298 
11299  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11300  dstAllocRequest,
11301  suballocType,
11302  size,
11303  false, // upperAddress
11304  allocInfo.m_hAllocation);
11305  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11306 
11307  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11308 
11309  if(allocInfo.m_pChanged != VMA_NULL)
11310  {
11311  *allocInfo.m_pChanged = VK_TRUE;
11312  }
11313 
11314  ++m_AllocationsMoved;
11315  m_BytesMoved += size;
11316 
11317  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11318 
11319  break;
11320  }
11321  }
11322 
11323  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11324 
11325  if(srcAllocIndex > 0)
11326  {
11327  --srcAllocIndex;
11328  }
11329  else
11330  {
11331  if(srcBlockIndex > 0)
11332  {
11333  --srcBlockIndex;
11334  srcAllocIndex = SIZE_MAX;
11335  }
11336  else
11337  {
11338  return VK_SUCCESS;
11339  }
11340  }
11341  }
11342 }
11343 
11344 VkResult VmaDefragmentator::Defragment(
11345  VkDeviceSize maxBytesToMove,
11346  uint32_t maxAllocationsToMove)
11347 {
11348  if(m_Allocations.empty())
11349  {
11350  return VK_SUCCESS;
11351  }
11352 
11353  // Create block info for each block.
11354  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11355  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11356  {
11357  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11358  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11359  m_Blocks.push_back(pBlockInfo);
11360  }
11361 
11362  // Sort them by m_pBlock pointer value.
11363  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11364 
11365  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11366  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11367  {
11368  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11369  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11370  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11371  {
11372  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11373  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11374  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11375  {
11376  (*it)->m_Allocations.push_back(allocInfo);
11377  }
11378  else
11379  {
11380  VMA_ASSERT(0);
11381  }
11382  }
11383  }
11384  m_Allocations.clear();
11385 
11386  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11387  {
11388  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11389  pBlockInfo->CalcHasNonMovableAllocations();
11390  pBlockInfo->SortAllocationsBySizeDescecnding();
11391  }
11392 
11393  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11394  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11395 
11396  // Execute defragmentation rounds (the main part).
11397  VkResult result = VK_SUCCESS;
11398  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11399  {
11400  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11401  }
11402 
11403  // Unmap blocks that were mapped for defragmentation.
11404  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11405  {
11406  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11407  }
11408 
11409  return result;
11410 }
11411 
11412 bool VmaDefragmentator::MoveMakesSense(
11413  size_t dstBlockIndex, VkDeviceSize dstOffset,
11414  size_t srcBlockIndex, VkDeviceSize srcOffset)
11415 {
11416  if(dstBlockIndex < srcBlockIndex)
11417  {
11418  return true;
11419  }
11420  if(dstBlockIndex > srcBlockIndex)
11421  {
11422  return false;
11423  }
11424  if(dstOffset < srcOffset)
11425  {
11426  return true;
11427  }
11428  return false;
11429 }
11430 
11432 // VmaRecorder
11433 
11434 #if VMA_RECORDING_ENABLED
11435 
11436 VmaRecorder::VmaRecorder() :
11437  m_UseMutex(true),
11438  m_Flags(0),
11439  m_File(VMA_NULL),
11440  m_Freq(INT64_MAX),
11441  m_StartCounter(INT64_MAX)
11442 {
11443 }
11444 
11445 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11446 {
11447  m_UseMutex = useMutex;
11448  m_Flags = settings.flags;
11449 
11450  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11451  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11452 
11453  // Open file for writing.
11454  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11455  if(err != 0)
11456  {
11457  return VK_ERROR_INITIALIZATION_FAILED;
11458  }
11459 
11460  // Write header.
11461  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11462  fprintf(m_File, "%s\n", "1,3");
11463 
11464  return VK_SUCCESS;
11465 }
11466 
11467 VmaRecorder::~VmaRecorder()
11468 {
11469  if(m_File != VMA_NULL)
11470  {
11471  fclose(m_File);
11472  }
11473 }
11474 
11475 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11476 {
11477  CallParams callParams;
11478  GetBasicParams(callParams);
11479 
11480  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11481  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11482  Flush();
11483 }
11484 
11485 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11486 {
11487  CallParams callParams;
11488  GetBasicParams(callParams);
11489 
11490  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11491  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11492  Flush();
11493 }
11494 
11495 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11496 {
11497  CallParams callParams;
11498  GetBasicParams(callParams);
11499 
11500  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11501  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11502  createInfo.memoryTypeIndex,
11503  createInfo.flags,
11504  createInfo.blockSize,
11505  (uint64_t)createInfo.minBlockCount,
11506  (uint64_t)createInfo.maxBlockCount,
11507  createInfo.frameInUseCount,
11508  pool);
11509  Flush();
11510 }
11511 
11512 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11513 {
11514  CallParams callParams;
11515  GetBasicParams(callParams);
11516 
11517  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11518  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11519  pool);
11520  Flush();
11521 }
11522 
11523 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11524  const VkMemoryRequirements& vkMemReq,
11525  const VmaAllocationCreateInfo& createInfo,
11526  VmaAllocation allocation)
11527 {
11528  CallParams callParams;
11529  GetBasicParams(callParams);
11530 
11531  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11532  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11533  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11534  vkMemReq.size,
11535  vkMemReq.alignment,
11536  vkMemReq.memoryTypeBits,
11537  createInfo.flags,
11538  createInfo.usage,
11539  createInfo.requiredFlags,
11540  createInfo.preferredFlags,
11541  createInfo.memoryTypeBits,
11542  createInfo.pool,
11543  allocation,
11544  userDataStr.GetString());
11545  Flush();
11546 }
11547 
11548 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11549  const VkMemoryRequirements& vkMemReq,
11550  bool requiresDedicatedAllocation,
11551  bool prefersDedicatedAllocation,
11552  const VmaAllocationCreateInfo& createInfo,
11553  VmaAllocation allocation)
11554 {
11555  CallParams callParams;
11556  GetBasicParams(callParams);
11557 
11558  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11559  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11560  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11561  vkMemReq.size,
11562  vkMemReq.alignment,
11563  vkMemReq.memoryTypeBits,
11564  requiresDedicatedAllocation ? 1 : 0,
11565  prefersDedicatedAllocation ? 1 : 0,
11566  createInfo.flags,
11567  createInfo.usage,
11568  createInfo.requiredFlags,
11569  createInfo.preferredFlags,
11570  createInfo.memoryTypeBits,
11571  createInfo.pool,
11572  allocation,
11573  userDataStr.GetString());
11574  Flush();
11575 }
11576 
11577 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11578  const VkMemoryRequirements& vkMemReq,
11579  bool requiresDedicatedAllocation,
11580  bool prefersDedicatedAllocation,
11581  const VmaAllocationCreateInfo& createInfo,
11582  VmaAllocation allocation)
11583 {
11584  CallParams callParams;
11585  GetBasicParams(callParams);
11586 
11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11588  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11589  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11590  vkMemReq.size,
11591  vkMemReq.alignment,
11592  vkMemReq.memoryTypeBits,
11593  requiresDedicatedAllocation ? 1 : 0,
11594  prefersDedicatedAllocation ? 1 : 0,
11595  createInfo.flags,
11596  createInfo.usage,
11597  createInfo.requiredFlags,
11598  createInfo.preferredFlags,
11599  createInfo.memoryTypeBits,
11600  createInfo.pool,
11601  allocation,
11602  userDataStr.GetString());
11603  Flush();
11604 }
11605 
11606 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11607  VmaAllocation allocation)
11608 {
11609  CallParams callParams;
11610  GetBasicParams(callParams);
11611 
11612  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11613  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11614  allocation);
11615  Flush();
11616 }
11617 
11618 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11619  VmaAllocation allocation,
11620  const void* pUserData)
11621 {
11622  CallParams callParams;
11623  GetBasicParams(callParams);
11624 
11625  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11626  UserDataString userDataStr(
11627  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11628  pUserData);
11629  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11630  allocation,
11631  userDataStr.GetString());
11632  Flush();
11633 }
11634 
11635 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11636  VmaAllocation allocation)
11637 {
11638  CallParams callParams;
11639  GetBasicParams(callParams);
11640 
11641  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11642  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11643  allocation);
11644  Flush();
11645 }
11646 
11647 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11648  VmaAllocation allocation)
11649 {
11650  CallParams callParams;
11651  GetBasicParams(callParams);
11652 
11653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11654  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11655  allocation);
11656  Flush();
11657 }
11658 
11659 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11660  VmaAllocation allocation)
11661 {
11662  CallParams callParams;
11663  GetBasicParams(callParams);
11664 
11665  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11666  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11667  allocation);
11668  Flush();
11669 }
11670 
11671 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11672  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11673 {
11674  CallParams callParams;
11675  GetBasicParams(callParams);
11676 
11677  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11678  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11679  allocation,
11680  offset,
11681  size);
11682  Flush();
11683 }
11684 
11685 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11686  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11687 {
11688  CallParams callParams;
11689  GetBasicParams(callParams);
11690 
11691  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11692  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11693  allocation,
11694  offset,
11695  size);
11696  Flush();
11697 }
11698 
11699 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11700  const VkBufferCreateInfo& bufCreateInfo,
11701  const VmaAllocationCreateInfo& allocCreateInfo,
11702  VmaAllocation allocation)
11703 {
11704  CallParams callParams;
11705  GetBasicParams(callParams);
11706 
11707  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11708  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11709  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11710  bufCreateInfo.flags,
11711  bufCreateInfo.size,
11712  bufCreateInfo.usage,
11713  bufCreateInfo.sharingMode,
11714  allocCreateInfo.flags,
11715  allocCreateInfo.usage,
11716  allocCreateInfo.requiredFlags,
11717  allocCreateInfo.preferredFlags,
11718  allocCreateInfo.memoryTypeBits,
11719  allocCreateInfo.pool,
11720  allocation,
11721  userDataStr.GetString());
11722  Flush();
11723 }
11724 
11725 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11726  const VkImageCreateInfo& imageCreateInfo,
11727  const VmaAllocationCreateInfo& allocCreateInfo,
11728  VmaAllocation allocation)
11729 {
11730  CallParams callParams;
11731  GetBasicParams(callParams);
11732 
11733  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11734  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11735  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11736  imageCreateInfo.flags,
11737  imageCreateInfo.imageType,
11738  imageCreateInfo.format,
11739  imageCreateInfo.extent.width,
11740  imageCreateInfo.extent.height,
11741  imageCreateInfo.extent.depth,
11742  imageCreateInfo.mipLevels,
11743  imageCreateInfo.arrayLayers,
11744  imageCreateInfo.samples,
11745  imageCreateInfo.tiling,
11746  imageCreateInfo.usage,
11747  imageCreateInfo.sharingMode,
11748  imageCreateInfo.initialLayout,
11749  allocCreateInfo.flags,
11750  allocCreateInfo.usage,
11751  allocCreateInfo.requiredFlags,
11752  allocCreateInfo.preferredFlags,
11753  allocCreateInfo.memoryTypeBits,
11754  allocCreateInfo.pool,
11755  allocation,
11756  userDataStr.GetString());
11757  Flush();
11758 }
11759 
11760 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11761  VmaAllocation allocation)
11762 {
11763  CallParams callParams;
11764  GetBasicParams(callParams);
11765 
11766  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11767  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11768  allocation);
11769  Flush();
11770 }
11771 
11772 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11773  VmaAllocation allocation)
11774 {
11775  CallParams callParams;
11776  GetBasicParams(callParams);
11777 
11778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11779  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11780  allocation);
11781  Flush();
11782 }
11783 
11784 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11785  VmaAllocation allocation)
11786 {
11787  CallParams callParams;
11788  GetBasicParams(callParams);
11789 
11790  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11791  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11792  allocation);
11793  Flush();
11794 }
11795 
11796 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11797  VmaAllocation allocation)
11798 {
11799  CallParams callParams;
11800  GetBasicParams(callParams);
11801 
11802  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11803  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11804  allocation);
11805  Flush();
11806 }
11807 
11808 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11809  VmaPool pool)
11810 {
11811  CallParams callParams;
11812  GetBasicParams(callParams);
11813 
11814  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11815  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11816  pool);
11817  Flush();
11818 }
11819 
11820 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11821 {
11822  if(pUserData != VMA_NULL)
11823  {
11824  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11825  {
11826  m_Str = (const char*)pUserData;
11827  }
11828  else
11829  {
11830  sprintf_s(m_PtrStr, "%p", pUserData);
11831  m_Str = m_PtrStr;
11832  }
11833  }
11834  else
11835  {
11836  m_Str = "";
11837  }
11838 }
11839 
11840 void VmaRecorder::WriteConfiguration(
11841  const VkPhysicalDeviceProperties& devProps,
11842  const VkPhysicalDeviceMemoryProperties& memProps,
11843  bool dedicatedAllocationExtensionEnabled)
11844 {
11845  fprintf(m_File, "Config,Begin\n");
11846 
11847  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11848  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11849  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11850  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11851  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11852  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11853 
11854  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11855  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11856  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11857 
11858  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11859  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11860  {
11861  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11862  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11863  }
11864  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11865  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11866  {
11867  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11868  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11869  }
11870 
11871  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11872 
11873  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11874  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11875  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11876  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11877  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11878  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11879  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11880  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11881  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11882 
11883  fprintf(m_File, "Config,End\n");
11884 }
11885 
11886 void VmaRecorder::GetBasicParams(CallParams& outParams)
11887 {
11888  outParams.threadId = GetCurrentThreadId();
11889 
11890  LARGE_INTEGER counter;
11891  QueryPerformanceCounter(&counter);
11892  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11893 }
11894 
11895 void VmaRecorder::Flush()
11896 {
11897  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11898  {
11899  fflush(m_File);
11900  }
11901 }
11902 
11903 #endif // #if VMA_RECORDING_ENABLED
11904 
11906 // VmaAllocator_T
11907 
11908 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11909  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11910  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11911  m_hDevice(pCreateInfo->device),
11912  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11913  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11914  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11915  m_PreferredLargeHeapBlockSize(0),
11916  m_PhysicalDevice(pCreateInfo->physicalDevice),
11917  m_CurrentFrameIndex(0),
11918  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11919  m_NextPoolId(0)
11921  ,m_pRecorder(VMA_NULL)
11922 #endif
11923 {
11924  if(VMA_DEBUG_DETECT_CORRUPTION)
11925  {
11926  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11927  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11928  }
11929 
11930  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11931 
11932 #if !(VMA_DEDICATED_ALLOCATION)
11934  {
11935  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11936  }
11937 #endif
11938 
11939  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11940  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11941  memset(&m_MemProps, 0, sizeof(m_MemProps));
11942 
11943  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11944  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11945 
11946  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11947  {
11948  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11949  }
11950 
11951  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11952  {
11953  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11954  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11955  }
11956 
11957  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11958 
11959  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11960  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11961 
11962  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11963  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11964  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11965  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11966 
11967  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11968  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11969 
11970  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11971  {
11972  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
11973  {
11974  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
11975  if(limit != VK_WHOLE_SIZE)
11976  {
11977  m_HeapSizeLimit[heapIndex] = limit;
11978  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
11979  {
11980  m_MemProps.memoryHeaps[heapIndex].size = limit;
11981  }
11982  }
11983  }
11984  }
11985 
11986  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11987  {
11988  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
11989 
11990  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
11991  this,
11992  memTypeIndex,
11993  preferredBlockSize,
11994  0,
11995  SIZE_MAX,
11996  GetBufferImageGranularity(),
11997  pCreateInfo->frameInUseCount,
11998  false, // isCustomPool
11999  false, // explicitBlockSize
12000  false); // linearAlgorithm
12001  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
12002  // becase minBlockCount is 0.
12003  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
12004 
12005  }
12006 }
12007 
12008 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
12009 {
12010  VkResult res = VK_SUCCESS;
12011 
12012  if(pCreateInfo->pRecordSettings != VMA_NULL &&
12013  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
12014  {
12015 #if VMA_RECORDING_ENABLED
12016  m_pRecorder = vma_new(this, VmaRecorder)();
12017  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
12018  if(res != VK_SUCCESS)
12019  {
12020  return res;
12021  }
12022  m_pRecorder->WriteConfiguration(
12023  m_PhysicalDeviceProperties,
12024  m_MemProps,
12025  m_UseKhrDedicatedAllocation);
12026  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
12027 #else
12028  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
12029  return VK_ERROR_FEATURE_NOT_PRESENT;
12030 #endif
12031  }
12032 
12033  return res;
12034 }
12035 
12036 VmaAllocator_T::~VmaAllocator_T()
12037 {
12038 #if VMA_RECORDING_ENABLED
12039  if(m_pRecorder != VMA_NULL)
12040  {
12041  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
12042  vma_delete(this, m_pRecorder);
12043  }
12044 #endif
12045 
12046  VMA_ASSERT(m_Pools.empty());
12047 
12048  for(size_t i = GetMemoryTypeCount(); i--; )
12049  {
12050  vma_delete(this, m_pDedicatedAllocations[i]);
12051  vma_delete(this, m_pBlockVectors[i]);
12052  }
12053 }
12054 
12055 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
12056 {
12057 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12058  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
12059  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
12060  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
12061  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
12062  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
12063  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
12064  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
12065  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
12066  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
12067  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
12068  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
12069  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
12070  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
12071  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
12072  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
12073  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
12074 #if VMA_DEDICATED_ALLOCATION
12075  if(m_UseKhrDedicatedAllocation)
12076  {
12077  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
12078  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
12079  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
12080  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
12081  }
12082 #endif // #if VMA_DEDICATED_ALLOCATION
12083 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
12084 
12085 #define VMA_COPY_IF_NOT_NULL(funcName) \
12086  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
12087 
12088  if(pVulkanFunctions != VMA_NULL)
12089  {
12090  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
12091  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
12092  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
12093  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
12094  VMA_COPY_IF_NOT_NULL(vkMapMemory);
12095  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
12096  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
12097  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
12098  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
12099  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
12100  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
12101  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
12102  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12103  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12104  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12105  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12106 #if VMA_DEDICATED_ALLOCATION
12107  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12108  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12109 #endif
12110  }
12111 
12112 #undef VMA_COPY_IF_NOT_NULL
12113 
12114  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12115  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12116  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12117  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12118  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12119  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12120  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12121  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12122  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12123  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12124  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12125  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12126  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12127  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12128  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12129  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12130  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12131  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12132 #if VMA_DEDICATED_ALLOCATION
12133  if(m_UseKhrDedicatedAllocation)
12134  {
12135  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12136  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12137  }
12138 #endif
12139 }
12140 
12141 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12142 {
12143  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12144  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12145  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12146  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12147 }
12148 
12149 VkResult VmaAllocator_T::AllocateMemoryOfType(
12150  VkDeviceSize size,
12151  VkDeviceSize alignment,
12152  bool dedicatedAllocation,
12153  VkBuffer dedicatedBuffer,
12154  VkImage dedicatedImage,
12155  const VmaAllocationCreateInfo& createInfo,
12156  uint32_t memTypeIndex,
12157  VmaSuballocationType suballocType,
12158  size_t allocationCount,
12159  VmaAllocation* pAllocations)
12160 {
12161  VMA_ASSERT(pAllocations != VMA_NULL);
12162  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, vkMemReq.size);
12163 
12164  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12165 
12166  // If memory type is not HOST_VISIBLE, disable MAPPED.
12167  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12168  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12169  {
12170  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12171  }
12172 
12173  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12174  VMA_ASSERT(blockVector);
12175 
12176  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12177  bool preferDedicatedMemory =
12178  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12179  dedicatedAllocation ||
12180  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12181  size > preferredBlockSize / 2;
12182 
12183  if(preferDedicatedMemory &&
12184  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12185  finalCreateInfo.pool == VK_NULL_HANDLE)
12186  {
12188  }
12189 
12190  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12191  {
12192  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12193  {
12194  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12195  }
12196  else
12197  {
12198  return AllocateDedicatedMemory(
12199  size,
12200  suballocType,
12201  memTypeIndex,
12202  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12203  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12204  finalCreateInfo.pUserData,
12205  dedicatedBuffer,
12206  dedicatedImage,
12207  allocationCount,
12208  pAllocations);
12209  }
12210  }
12211  else
12212  {
12213  VkResult res = blockVector->Allocate(
12214  VK_NULL_HANDLE, // hCurrentPool
12215  m_CurrentFrameIndex.load(),
12216  size,
12217  alignment,
12218  finalCreateInfo,
12219  suballocType,
12220  allocationCount,
12221  pAllocations);
12222  if(res == VK_SUCCESS)
12223  {
12224  return res;
12225  }
12226 
12227  // 5. Try dedicated memory.
12228  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12229  {
12230  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12231  }
12232  else
12233  {
12234  res = AllocateDedicatedMemory(
12235  size,
12236  suballocType,
12237  memTypeIndex,
12238  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12239  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12240  finalCreateInfo.pUserData,
12241  dedicatedBuffer,
12242  dedicatedImage,
12243  allocationCount,
12244  pAllocations);
12245  if(res == VK_SUCCESS)
12246  {
12247  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12248  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12249  return VK_SUCCESS;
12250  }
12251  else
12252  {
12253  // Everything failed: Return error code.
12254  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12255  return res;
12256  }
12257  }
12258  }
12259 }
12260 
12261 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12262  VkDeviceSize size,
12263  VmaSuballocationType suballocType,
12264  uint32_t memTypeIndex,
12265  bool map,
12266  bool isUserDataString,
12267  void* pUserData,
12268  VkBuffer dedicatedBuffer,
12269  VkImage dedicatedImage,
12270  size_t allocationCount,
12271  VmaAllocation* pAllocations)
12272 {
12273  VMA_ASSERT(allocationCount > 0 && pAllocations);
12274 
12275  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12276  allocInfo.memoryTypeIndex = memTypeIndex;
12277  allocInfo.allocationSize = size;
12278 
12279 #if VMA_DEDICATED_ALLOCATION
12280  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12281  if(m_UseKhrDedicatedAllocation)
12282  {
12283  if(dedicatedBuffer != VK_NULL_HANDLE)
12284  {
12285  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12286  dedicatedAllocInfo.buffer = dedicatedBuffer;
12287  allocInfo.pNext = &dedicatedAllocInfo;
12288  }
12289  else if(dedicatedImage != VK_NULL_HANDLE)
12290  {
12291  dedicatedAllocInfo.image = dedicatedImage;
12292  allocInfo.pNext = &dedicatedAllocInfo;
12293  }
12294  }
12295 #endif // #if VMA_DEDICATED_ALLOCATION
12296 
12297  size_t allocIndex;
12298  VkResult res;
12299  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12300  {
12301  res = AllocateDedicatedMemoryPage(
12302  size,
12303  suballocType,
12304  memTypeIndex,
12305  allocInfo,
12306  map,
12307  isUserDataString,
12308  pUserData,
12309  pAllocations + allocIndex);
12310  if(res != VK_SUCCESS)
12311  {
12312  break;
12313  }
12314  }
12315 
12316  if(res == VK_SUCCESS)
12317  {
12318  // Register them in m_pDedicatedAllocations.
12319  {
12320  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12321  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12322  VMA_ASSERT(pDedicatedAllocations);
12323  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12324  {
12325  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
12326  }
12327  }
12328 
12329  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
12330  }
12331  else
12332  {
12333  // Free all already created allocations.
12334  while(allocIndex--)
12335  {
12336  VmaAllocation currAlloc = pAllocations[allocIndex];
12337  VkDeviceMemory hMemory = currAlloc->GetMemory();
12338 
12339  /*
12340  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
12341  before vkFreeMemory.
12342 
12343  if(currAlloc->GetMappedData() != VMA_NULL)
12344  {
12345  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
12346  }
12347  */
12348 
12349  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
12350 
12351  currAlloc->SetUserData(this, VMA_NULL);
12352  vma_delete(this, currAlloc);
12353  }
12354 
12355  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12356  }
12357 
12358  return res;
12359 }
12360 
12361 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
12362  VkDeviceSize size,
12363  VmaSuballocationType suballocType,
12364  uint32_t memTypeIndex,
12365  const VkMemoryAllocateInfo& allocInfo,
12366  bool map,
12367  bool isUserDataString,
12368  void* pUserData,
12369  VmaAllocation* pAllocation)
12370 {
12371  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12372  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12373  if(res < 0)
12374  {
12375  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12376  return res;
12377  }
12378 
12379  void* pMappedData = VMA_NULL;
12380  if(map)
12381  {
12382  res = (*m_VulkanFunctions.vkMapMemory)(
12383  m_hDevice,
12384  hMemory,
12385  0,
12386  VK_WHOLE_SIZE,
12387  0,
12388  &pMappedData);
12389  if(res < 0)
12390  {
12391  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12392  FreeVulkanMemory(memTypeIndex, size, hMemory);
12393  return res;
12394  }
12395  }
12396 
12397  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12398  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12399  (*pAllocation)->SetUserData(this, pUserData);
12400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12401  {
12402  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12403  }
12404 
12405  return VK_SUCCESS;
12406 }
12407 
12408 void VmaAllocator_T::GetBufferMemoryRequirements(
12409  VkBuffer hBuffer,
12410  VkMemoryRequirements& memReq,
12411  bool& requiresDedicatedAllocation,
12412  bool& prefersDedicatedAllocation) const
12413 {
12414 #if VMA_DEDICATED_ALLOCATION
12415  if(m_UseKhrDedicatedAllocation)
12416  {
12417  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12418  memReqInfo.buffer = hBuffer;
12419 
12420  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12421 
12422  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12423  memReq2.pNext = &memDedicatedReq;
12424 
12425  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12426 
12427  memReq = memReq2.memoryRequirements;
12428  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12429  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12430  }
12431  else
12432 #endif // #if VMA_DEDICATED_ALLOCATION
12433  {
12434  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12435  requiresDedicatedAllocation = false;
12436  prefersDedicatedAllocation = false;
12437  }
12438 }
12439 
12440 void VmaAllocator_T::GetImageMemoryRequirements(
12441  VkImage hImage,
12442  VkMemoryRequirements& memReq,
12443  bool& requiresDedicatedAllocation,
12444  bool& prefersDedicatedAllocation) const
12445 {
12446 #if VMA_DEDICATED_ALLOCATION
12447  if(m_UseKhrDedicatedAllocation)
12448  {
12449  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12450  memReqInfo.image = hImage;
12451 
12452  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12453 
12454  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12455  memReq2.pNext = &memDedicatedReq;
12456 
12457  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12458 
12459  memReq = memReq2.memoryRequirements;
12460  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12461  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12462  }
12463  else
12464 #endif // #if VMA_DEDICATED_ALLOCATION
12465  {
12466  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12467  requiresDedicatedAllocation = false;
12468  prefersDedicatedAllocation = false;
12469  }
12470 }
12471 
12472 VkResult VmaAllocator_T::AllocateMemory(
12473  const VkMemoryRequirements& vkMemReq,
12474  bool requiresDedicatedAllocation,
12475  bool prefersDedicatedAllocation,
12476  VkBuffer dedicatedBuffer,
12477  VkImage dedicatedImage,
12478  const VmaAllocationCreateInfo& createInfo,
12479  VmaSuballocationType suballocType,
12480  size_t allocationCount,
12481  VmaAllocation* pAllocations)
12482 {
12483  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12484 
12485  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12486 
12487  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12488  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12489  {
12490  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12491  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12492  }
12493  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12495  {
12496  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12497  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12498  }
12499  if(requiresDedicatedAllocation)
12500  {
12501  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12502  {
12503  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12504  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12505  }
12506  if(createInfo.pool != VK_NULL_HANDLE)
12507  {
12508  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12509  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12510  }
12511  }
12512  if((createInfo.pool != VK_NULL_HANDLE) &&
12513  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12514  {
12515  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12516  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12517  }
12518 
12519  if(createInfo.pool != VK_NULL_HANDLE)
12520  {
12521  const VkDeviceSize alignmentForPool = VMA_MAX(
12522  vkMemReq.alignment,
12523  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12524  return createInfo.pool->m_BlockVector.Allocate(
12525  createInfo.pool,
12526  m_CurrentFrameIndex.load(),
12527  vkMemReq.size,
12528  alignmentForPool,
12529  createInfo,
12530  suballocType,
12531  allocationCount,
12532  pAllocations);
12533  }
12534  else
12535  {
12536  // Bit mask of memory Vulkan types acceptable for this allocation.
12537  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12538  uint32_t memTypeIndex = UINT32_MAX;
12539  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12540  if(res == VK_SUCCESS)
12541  {
12542  VkDeviceSize alignmentForMemType = VMA_MAX(
12543  vkMemReq.alignment,
12544  GetMemoryTypeMinAlignment(memTypeIndex));
12545 
12546  res = AllocateMemoryOfType(
12547  vkMemReq.size,
12548  alignmentForMemType,
12549  requiresDedicatedAllocation || prefersDedicatedAllocation,
12550  dedicatedBuffer,
12551  dedicatedImage,
12552  createInfo,
12553  memTypeIndex,
12554  suballocType,
12555  allocationCount,
12556  pAllocations);
12557  // Succeeded on first try.
12558  if(res == VK_SUCCESS)
12559  {
12560  return res;
12561  }
12562  // Allocation from this memory type failed. Try other compatible memory types.
12563  else
12564  {
12565  for(;;)
12566  {
12567  // Remove old memTypeIndex from list of possibilities.
12568  memoryTypeBits &= ~(1u << memTypeIndex);
12569  // Find alternative memTypeIndex.
12570  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12571  if(res == VK_SUCCESS)
12572  {
12573  alignmentForMemType = VMA_MAX(
12574  vkMemReq.alignment,
12575  GetMemoryTypeMinAlignment(memTypeIndex));
12576 
12577  res = AllocateMemoryOfType(
12578  vkMemReq.size,
12579  alignmentForMemType,
12580  requiresDedicatedAllocation || prefersDedicatedAllocation,
12581  dedicatedBuffer,
12582  dedicatedImage,
12583  createInfo,
12584  memTypeIndex,
12585  suballocType,
12586  allocationCount,
12587  pAllocations);
12588  // Allocation from this alternative memory type succeeded.
12589  if(res == VK_SUCCESS)
12590  {
12591  return res;
12592  }
12593  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12594  }
12595  // No other matching memory type index could be found.
12596  else
12597  {
12598  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12599  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12600  }
12601  }
12602  }
12603  }
12604  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12605  else
12606  return res;
12607  }
12608 }
12609 
12610 void VmaAllocator_T::FreeMemory(
12611  size_t allocationCount,
12612  const VmaAllocation* pAllocations)
12613 {
12614  VMA_ASSERT(pAllocations);
12615 
12616  for(size_t allocIndex = allocationCount; allocIndex--; )
12617  {
12618  VmaAllocation allocation = pAllocations[allocIndex];
12619 
12620  if(allocation != VK_NULL_HANDLE)
12621  {
12622  if(TouchAllocation(allocation))
12623  {
12624  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12625  {
12626  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12627  }
12628 
12629  switch(allocation->GetType())
12630  {
12631  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12632  {
12633  VmaBlockVector* pBlockVector = VMA_NULL;
12634  VmaPool hPool = allocation->GetPool();
12635  if(hPool != VK_NULL_HANDLE)
12636  {
12637  pBlockVector = &hPool->m_BlockVector;
12638  }
12639  else
12640  {
12641  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12642  pBlockVector = m_pBlockVectors[memTypeIndex];
12643  }
12644  pBlockVector->Free(allocation);
12645  }
12646  break;
12647  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12648  FreeDedicatedMemory(allocation);
12649  break;
12650  default:
12651  VMA_ASSERT(0);
12652  }
12653  }
12654 
12655  allocation->SetUserData(this, VMA_NULL);
12656  vma_delete(this, allocation);
12657  }
12658  }
12659 }
12660 
12661 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12662 {
12663  // Initialize.
12664  InitStatInfo(pStats->total);
12665  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12666  InitStatInfo(pStats->memoryType[i]);
12667  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12668  InitStatInfo(pStats->memoryHeap[i]);
12669 
12670  // Process default pools.
12671  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12672  {
12673  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12674  VMA_ASSERT(pBlockVector);
12675  pBlockVector->AddStats(pStats);
12676  }
12677 
12678  // Process custom pools.
12679  {
12680  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12681  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12682  {
12683  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12684  }
12685  }
12686 
12687  // Process dedicated allocations.
12688  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12689  {
12690  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12691  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12692  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12693  VMA_ASSERT(pDedicatedAllocVector);
12694  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12695  {
12696  VmaStatInfo allocationStatInfo;
12697  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12698  VmaAddStatInfo(pStats->total, allocationStatInfo);
12699  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12700  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12701  }
12702  }
12703 
12704  // Postprocess.
12705  VmaPostprocessCalcStatInfo(pStats->total);
12706  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12707  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12708  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12709  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12710 }
12711 
12712 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12713 
12714 VkResult VmaAllocator_T::Defragment(
12715  VmaAllocation* pAllocations,
12716  size_t allocationCount,
12717  VkBool32* pAllocationsChanged,
12718  const VmaDefragmentationInfo* pDefragmentationInfo,
12719  VmaDefragmentationStats* pDefragmentationStats)
12720 {
12721  if(pAllocationsChanged != VMA_NULL)
12722  {
12723  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
12724  }
12725  if(pDefragmentationStats != VMA_NULL)
12726  {
12727  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12728  }
12729 
12730  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12731 
12732  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12733 
12734  const size_t poolCount = m_Pools.size();
12735 
12736  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12737  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12738  {
12739  VmaAllocation hAlloc = pAllocations[allocIndex];
12740  VMA_ASSERT(hAlloc);
12741  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12742  // DedicatedAlloc cannot be defragmented.
12743  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12744  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12745  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12746  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12747  // Lost allocation cannot be defragmented.
12748  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12749  {
12750  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12751 
12752  const VmaPool hAllocPool = hAlloc->GetPool();
12753  // This allocation belongs to custom pool.
12754  if(hAllocPool != VK_NULL_HANDLE)
12755  {
12756  // Pools with linear or buddy algorithm are not defragmented.
12757  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12758  {
12759  pAllocBlockVector = &hAllocPool->m_BlockVector;
12760  }
12761  }
12762  // This allocation belongs to general pool.
12763  else
12764  {
12765  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12766  }
12767 
12768  if(pAllocBlockVector != VMA_NULL)
12769  {
12770  VmaDefragmentator* const pDefragmentator =
12771  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12772  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12773  &pAllocationsChanged[allocIndex] : VMA_NULL;
12774  pDefragmentator->AddAllocation(hAlloc, pChanged);
12775  }
12776  }
12777  }
12778 
12779  VkResult result = VK_SUCCESS;
12780 
12781  // ======== Main processing.
12782 
12783  VkDeviceSize maxBytesToMove = SIZE_MAX;
12784  uint32_t maxAllocationsToMove = UINT32_MAX;
12785  if(pDefragmentationInfo != VMA_NULL)
12786  {
12787  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12788  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12789  }
12790 
12791  // Process standard memory.
12792  for(uint32_t memTypeIndex = 0;
12793  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12794  ++memTypeIndex)
12795  {
12796  // Only HOST_VISIBLE memory types can be defragmented.
12797  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12798  {
12799  result = m_pBlockVectors[memTypeIndex]->Defragment(
12800  pDefragmentationStats,
12801  maxBytesToMove,
12802  maxAllocationsToMove);
12803  }
12804  }
12805 
12806  // Process custom pools.
12807  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12808  {
12809  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12810  pDefragmentationStats,
12811  maxBytesToMove,
12812  maxAllocationsToMove);
12813  }
12814 
12815  // ======== Destroy defragmentators.
12816 
12817  // Process custom pools.
12818  for(size_t poolIndex = poolCount; poolIndex--; )
12819  {
12820  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12821  }
12822 
12823  // Process standard memory.
12824  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12825  {
12826  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12827  {
12828  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12829  }
12830  }
12831 
12832  return result;
12833 }
12834 
12835 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12836 {
12837  if(hAllocation->CanBecomeLost())
12838  {
12839  /*
12840  Warning: This is a carefully designed algorithm.
12841  Do not modify unless you really know what you're doing :)
12842  */
12843  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12844  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12845  for(;;)
12846  {
12847  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12848  {
12849  pAllocationInfo->memoryType = UINT32_MAX;
12850  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12851  pAllocationInfo->offset = 0;
12852  pAllocationInfo->size = hAllocation->GetSize();
12853  pAllocationInfo->pMappedData = VMA_NULL;
12854  pAllocationInfo->pUserData = hAllocation->GetUserData();
12855  return;
12856  }
12857  else if(localLastUseFrameIndex == localCurrFrameIndex)
12858  {
12859  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12860  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12861  pAllocationInfo->offset = hAllocation->GetOffset();
12862  pAllocationInfo->size = hAllocation->GetSize();
12863  pAllocationInfo->pMappedData = VMA_NULL;
12864  pAllocationInfo->pUserData = hAllocation->GetUserData();
12865  return;
12866  }
12867  else // Last use time earlier than current time.
12868  {
12869  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12870  {
12871  localLastUseFrameIndex = localCurrFrameIndex;
12872  }
12873  }
12874  }
12875  }
12876  else
12877  {
12878 #if VMA_STATS_STRING_ENABLED
12879  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12880  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12881  for(;;)
12882  {
12883  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12884  if(localLastUseFrameIndex == localCurrFrameIndex)
12885  {
12886  break;
12887  }
12888  else // Last use time earlier than current time.
12889  {
12890  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12891  {
12892  localLastUseFrameIndex = localCurrFrameIndex;
12893  }
12894  }
12895  }
12896 #endif
12897 
12898  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12899  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12900  pAllocationInfo->offset = hAllocation->GetOffset();
12901  pAllocationInfo->size = hAllocation->GetSize();
12902  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12903  pAllocationInfo->pUserData = hAllocation->GetUserData();
12904  }
12905 }
12906 
12907 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12908 {
12909  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12910  if(hAllocation->CanBecomeLost())
12911  {
12912  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12913  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12914  for(;;)
12915  {
12916  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12917  {
12918  return false;
12919  }
12920  else if(localLastUseFrameIndex == localCurrFrameIndex)
12921  {
12922  return true;
12923  }
12924  else // Last use time earlier than current time.
12925  {
12926  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12927  {
12928  localLastUseFrameIndex = localCurrFrameIndex;
12929  }
12930  }
12931  }
12932  }
12933  else
12934  {
12935 #if VMA_STATS_STRING_ENABLED
12936  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12937  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12938  for(;;)
12939  {
12940  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12941  if(localLastUseFrameIndex == localCurrFrameIndex)
12942  {
12943  break;
12944  }
12945  else // Last use time earlier than current time.
12946  {
12947  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12948  {
12949  localLastUseFrameIndex = localCurrFrameIndex;
12950  }
12951  }
12952  }
12953 #endif
12954 
12955  return true;
12956  }
12957 }
12958 
12959 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12960 {
12961  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12962 
12963  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12964 
12965  if(newCreateInfo.maxBlockCount == 0)
12966  {
12967  newCreateInfo.maxBlockCount = SIZE_MAX;
12968  }
12969  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12970  {
12971  return VK_ERROR_INITIALIZATION_FAILED;
12972  }
12973 
12974  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12975 
12976  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12977 
12978  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12979  if(res != VK_SUCCESS)
12980  {
12981  vma_delete(this, *pPool);
12982  *pPool = VMA_NULL;
12983  return res;
12984  }
12985 
12986  // Add to m_Pools.
12987  {
12988  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12989  (*pPool)->SetId(m_NextPoolId++);
12990  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12991  }
12992 
12993  return VK_SUCCESS;
12994 }
12995 
12996 void VmaAllocator_T::DestroyPool(VmaPool pool)
12997 {
12998  // Remove from m_Pools.
12999  {
13000  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13001  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
13002  VMA_ASSERT(success && "Pool not found in Allocator.");
13003  }
13004 
13005  vma_delete(this, pool);
13006 }
13007 
13008 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
13009 {
13010  pool->m_BlockVector.GetPoolStats(pPoolStats);
13011 }
13012 
13013 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
13014 {
13015  m_CurrentFrameIndex.store(frameIndex);
13016 }
13017 
13018 void VmaAllocator_T::MakePoolAllocationsLost(
13019  VmaPool hPool,
13020  size_t* pLostAllocationCount)
13021 {
13022  hPool->m_BlockVector.MakePoolAllocationsLost(
13023  m_CurrentFrameIndex.load(),
13024  pLostAllocationCount);
13025 }
13026 
13027 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
13028 {
13029  return hPool->m_BlockVector.CheckCorruption();
13030 }
13031 
13032 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
13033 {
13034  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
13035 
13036  // Process default pools.
13037  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13038  {
13039  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
13040  {
13041  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
13042  VMA_ASSERT(pBlockVector);
13043  VkResult localRes = pBlockVector->CheckCorruption();
13044  switch(localRes)
13045  {
13046  case VK_ERROR_FEATURE_NOT_PRESENT:
13047  break;
13048  case VK_SUCCESS:
13049  finalRes = VK_SUCCESS;
13050  break;
13051  default:
13052  return localRes;
13053  }
13054  }
13055  }
13056 
13057  // Process custom pools.
13058  {
13059  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13060  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
13061  {
13062  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
13063  {
13064  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
13065  switch(localRes)
13066  {
13067  case VK_ERROR_FEATURE_NOT_PRESENT:
13068  break;
13069  case VK_SUCCESS:
13070  finalRes = VK_SUCCESS;
13071  break;
13072  default:
13073  return localRes;
13074  }
13075  }
13076  }
13077  }
13078 
13079  return finalRes;
13080 }
13081 
13082 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
13083 {
13084  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
13085  (*pAllocation)->InitLost();
13086 }
13087 
13088 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
13089 {
13090  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
13091 
13092  VkResult res;
13093  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13094  {
13095  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13096  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
13097  {
13098  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13099  if(res == VK_SUCCESS)
13100  {
13101  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
13102  }
13103  }
13104  else
13105  {
13106  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
13107  }
13108  }
13109  else
13110  {
13111  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
13112  }
13113 
13114  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
13115  {
13116  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
13117  }
13118 
13119  return res;
13120 }
13121 
13122 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
13123 {
13124  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
13125  {
13126  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
13127  }
13128 
13129  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
13130 
13131  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
13132  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
13133  {
13134  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
13135  m_HeapSizeLimit[heapIndex] += size;
13136  }
13137 }
13138 
13139 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
13140 {
13141  if(hAllocation->CanBecomeLost())
13142  {
13143  return VK_ERROR_MEMORY_MAP_FAILED;
13144  }
13145 
13146  switch(hAllocation->GetType())
13147  {
13148  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13149  {
13150  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13151  char *pBytes = VMA_NULL;
13152  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
13153  if(res == VK_SUCCESS)
13154  {
13155  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
13156  hAllocation->BlockAllocMap();
13157  }
13158  return res;
13159  }
13160  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13161  return hAllocation->DedicatedAllocMap(this, ppData);
13162  default:
13163  VMA_ASSERT(0);
13164  return VK_ERROR_MEMORY_MAP_FAILED;
13165  }
13166 }
13167 
13168 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
13169 {
13170  switch(hAllocation->GetType())
13171  {
13172  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13173  {
13174  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
13175  hAllocation->BlockAllocUnmap();
13176  pBlock->Unmap(this, 1);
13177  }
13178  break;
13179  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13180  hAllocation->DedicatedAllocUnmap(this);
13181  break;
13182  default:
13183  VMA_ASSERT(0);
13184  }
13185 }
13186 
13187 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13188 {
13189  VkResult res = VK_SUCCESS;
13190  switch(hAllocation->GetType())
13191  {
13192  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13193  res = GetVulkanFunctions().vkBindBufferMemory(
13194  m_hDevice,
13195  hBuffer,
13196  hAllocation->GetMemory(),
13197  0); //memoryOffset
13198  break;
13199  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13200  {
13201  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13202  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13203  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13204  break;
13205  }
13206  default:
13207  VMA_ASSERT(0);
13208  }
13209  return res;
13210 }
13211 
13212 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13213 {
13214  VkResult res = VK_SUCCESS;
13215  switch(hAllocation->GetType())
13216  {
13217  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13218  res = GetVulkanFunctions().vkBindImageMemory(
13219  m_hDevice,
13220  hImage,
13221  hAllocation->GetMemory(),
13222  0); //memoryOffset
13223  break;
13224  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13225  {
13226  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13227  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13228  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13229  break;
13230  }
13231  default:
13232  VMA_ASSERT(0);
13233  }
13234  return res;
13235 }
13236 
13237 void VmaAllocator_T::FlushOrInvalidateAllocation(
13238  VmaAllocation hAllocation,
13239  VkDeviceSize offset, VkDeviceSize size,
13240  VMA_CACHE_OPERATION op)
13241 {
13242  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13243  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13244  {
13245  const VkDeviceSize allocationSize = hAllocation->GetSize();
13246  VMA_ASSERT(offset <= allocationSize);
13247 
13248  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13249 
13250  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13251  memRange.memory = hAllocation->GetMemory();
13252 
13253  switch(hAllocation->GetType())
13254  {
13255  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13256  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13257  if(size == VK_WHOLE_SIZE)
13258  {
13259  memRange.size = allocationSize - memRange.offset;
13260  }
13261  else
13262  {
13263  VMA_ASSERT(offset + size <= allocationSize);
13264  memRange.size = VMA_MIN(
13265  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13266  allocationSize - memRange.offset);
13267  }
13268  break;
13269 
13270  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13271  {
13272  // 1. Still within this allocation.
13273  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13274  if(size == VK_WHOLE_SIZE)
13275  {
13276  size = allocationSize - offset;
13277  }
13278  else
13279  {
13280  VMA_ASSERT(offset + size <= allocationSize);
13281  }
13282  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13283 
13284  // 2. Adjust to whole block.
13285  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13286  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13287  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13288  memRange.offset += allocationOffset;
13289  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13290 
13291  break;
13292  }
13293 
13294  default:
13295  VMA_ASSERT(0);
13296  }
13297 
13298  switch(op)
13299  {
13300  case VMA_CACHE_FLUSH:
13301  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13302  break;
13303  case VMA_CACHE_INVALIDATE:
13304  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13305  break;
13306  default:
13307  VMA_ASSERT(0);
13308  }
13309  }
13310  // else: Just ignore this call.
13311 }
13312 
13313 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13314 {
13315  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13316 
13317  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13318  {
13319  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13320  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13321  VMA_ASSERT(pDedicatedAllocations);
13322  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13323  VMA_ASSERT(success);
13324  }
13325 
13326  VkDeviceMemory hMemory = allocation->GetMemory();
13327 
13328  /*
13329  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13330  before vkFreeMemory.
13331 
13332  if(allocation->GetMappedData() != VMA_NULL)
13333  {
13334  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13335  }
13336  */
13337 
13338  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13339 
13340  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13341 }
13342 
13343 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13344 {
13345  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13346  !hAllocation->CanBecomeLost() &&
13347  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13348  {
13349  void* pData = VMA_NULL;
13350  VkResult res = Map(hAllocation, &pData);
13351  if(res == VK_SUCCESS)
13352  {
13353  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13354  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13355  Unmap(hAllocation);
13356  }
13357  else
13358  {
13359  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13360  }
13361  }
13362 }
13363 
13364 #if VMA_STATS_STRING_ENABLED
13365 
13366 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13367 {
13368  bool dedicatedAllocationsStarted = false;
13369  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13370  {
13371  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13372  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13373  VMA_ASSERT(pDedicatedAllocVector);
13374  if(pDedicatedAllocVector->empty() == false)
13375  {
13376  if(dedicatedAllocationsStarted == false)
13377  {
13378  dedicatedAllocationsStarted = true;
13379  json.WriteString("DedicatedAllocations");
13380  json.BeginObject();
13381  }
13382 
13383  json.BeginString("Type ");
13384  json.ContinueString(memTypeIndex);
13385  json.EndString();
13386 
13387  json.BeginArray();
13388 
13389  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13390  {
13391  json.BeginObject(true);
13392  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13393  hAlloc->PrintParameters(json);
13394  json.EndObject();
13395  }
13396 
13397  json.EndArray();
13398  }
13399  }
13400  if(dedicatedAllocationsStarted)
13401  {
13402  json.EndObject();
13403  }
13404 
13405  {
13406  bool allocationsStarted = false;
13407  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13408  {
13409  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13410  {
13411  if(allocationsStarted == false)
13412  {
13413  allocationsStarted = true;
13414  json.WriteString("DefaultPools");
13415  json.BeginObject();
13416  }
13417 
13418  json.BeginString("Type ");
13419  json.ContinueString(memTypeIndex);
13420  json.EndString();
13421 
13422  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13423  }
13424  }
13425  if(allocationsStarted)
13426  {
13427  json.EndObject();
13428  }
13429  }
13430 
13431  // Custom pools
13432  {
13433  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13434  const size_t poolCount = m_Pools.size();
13435  if(poolCount > 0)
13436  {
13437  json.WriteString("Pools");
13438  json.BeginObject();
13439  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13440  {
13441  json.BeginString();
13442  json.ContinueString(m_Pools[poolIndex]->GetId());
13443  json.EndString();
13444 
13445  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13446  }
13447  json.EndObject();
13448  }
13449  }
13450 }
13451 
13452 #endif // #if VMA_STATS_STRING_ENABLED
13453 
13455 // Public interface
13456 
13457 VkResult vmaCreateAllocator(
13458  const VmaAllocatorCreateInfo* pCreateInfo,
13459  VmaAllocator* pAllocator)
13460 {
13461  VMA_ASSERT(pCreateInfo && pAllocator);
13462  VMA_DEBUG_LOG("vmaCreateAllocator");
13463  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13464  return (*pAllocator)->Init(pCreateInfo);
13465 }
13466 
13467 void vmaDestroyAllocator(
13468  VmaAllocator allocator)
13469 {
13470  if(allocator != VK_NULL_HANDLE)
13471  {
13472  VMA_DEBUG_LOG("vmaDestroyAllocator");
13473  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13474  vma_delete(&allocationCallbacks, allocator);
13475  }
13476 }
13477 
13479  VmaAllocator allocator,
13480  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13481 {
13482  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13483  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13484 }
13485 
13487  VmaAllocator allocator,
13488  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13489 {
13490  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13491  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13492 }
13493 
13495  VmaAllocator allocator,
13496  uint32_t memoryTypeIndex,
13497  VkMemoryPropertyFlags* pFlags)
13498 {
13499  VMA_ASSERT(allocator && pFlags);
13500  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13501  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13502 }
13503 
13505  VmaAllocator allocator,
13506  uint32_t frameIndex)
13507 {
13508  VMA_ASSERT(allocator);
13509  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13510 
13511  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13512 
13513  allocator->SetCurrentFrameIndex(frameIndex);
13514 }
13515 
13516 void vmaCalculateStats(
13517  VmaAllocator allocator,
13518  VmaStats* pStats)
13519 {
13520  VMA_ASSERT(allocator && pStats);
13521  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13522  allocator->CalculateStats(pStats);
13523 }
13524 
13525 #if VMA_STATS_STRING_ENABLED
13526 
13527 void vmaBuildStatsString(
13528  VmaAllocator allocator,
13529  char** ppStatsString,
13530  VkBool32 detailedMap)
13531 {
13532  VMA_ASSERT(allocator && ppStatsString);
13533  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13534 
13535  VmaStringBuilder sb(allocator);
13536  {
13537  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13538  json.BeginObject();
13539 
13540  VmaStats stats;
13541  allocator->CalculateStats(&stats);
13542 
13543  json.WriteString("Total");
13544  VmaPrintStatInfo(json, stats.total);
13545 
13546  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13547  {
13548  json.BeginString("Heap ");
13549  json.ContinueString(heapIndex);
13550  json.EndString();
13551  json.BeginObject();
13552 
13553  json.WriteString("Size");
13554  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13555 
13556  json.WriteString("Flags");
13557  json.BeginArray(true);
13558  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13559  {
13560  json.WriteString("DEVICE_LOCAL");
13561  }
13562  json.EndArray();
13563 
13564  if(stats.memoryHeap[heapIndex].blockCount > 0)
13565  {
13566  json.WriteString("Stats");
13567  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13568  }
13569 
13570  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13571  {
13572  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13573  {
13574  json.BeginString("Type ");
13575  json.ContinueString(typeIndex);
13576  json.EndString();
13577 
13578  json.BeginObject();
13579 
13580  json.WriteString("Flags");
13581  json.BeginArray(true);
13582  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13583  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13584  {
13585  json.WriteString("DEVICE_LOCAL");
13586  }
13587  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13588  {
13589  json.WriteString("HOST_VISIBLE");
13590  }
13591  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13592  {
13593  json.WriteString("HOST_COHERENT");
13594  }
13595  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13596  {
13597  json.WriteString("HOST_CACHED");
13598  }
13599  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13600  {
13601  json.WriteString("LAZILY_ALLOCATED");
13602  }
13603  json.EndArray();
13604 
13605  if(stats.memoryType[typeIndex].blockCount > 0)
13606  {
13607  json.WriteString("Stats");
13608  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13609  }
13610 
13611  json.EndObject();
13612  }
13613  }
13614 
13615  json.EndObject();
13616  }
13617  if(detailedMap == VK_TRUE)
13618  {
13619  allocator->PrintDetailedMap(json);
13620  }
13621 
13622  json.EndObject();
13623  }
13624 
13625  const size_t len = sb.GetLength();
13626  char* const pChars = vma_new_array(allocator, char, len + 1);
13627  if(len > 0)
13628  {
13629  memcpy(pChars, sb.GetData(), len);
13630  }
13631  pChars[len] = '\0';
13632  *ppStatsString = pChars;
13633 }
13634 
13635 void vmaFreeStatsString(
13636  VmaAllocator allocator,
13637  char* pStatsString)
13638 {
13639  if(pStatsString != VMA_NULL)
13640  {
13641  VMA_ASSERT(allocator);
13642  size_t len = strlen(pStatsString);
13643  vma_delete_array(allocator, pStatsString, len + 1);
13644  }
13645 }
13646 
13647 #endif // #if VMA_STATS_STRING_ENABLED
13648 
13649 /*
13650 This function is not protected by any mutex because it just reads immutable data.
13651 */
13652 VkResult vmaFindMemoryTypeIndex(
13653  VmaAllocator allocator,
13654  uint32_t memoryTypeBits,
13655  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13656  uint32_t* pMemoryTypeIndex)
13657 {
13658  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13659  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13660  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13661 
13662  if(pAllocationCreateInfo->memoryTypeBits != 0)
13663  {
13664  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13665  }
13666 
13667  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13668  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13669 
13670  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13671  if(mapped)
13672  {
13673  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13674  }
13675 
13676  // Convert usage to requiredFlags and preferredFlags.
13677  switch(pAllocationCreateInfo->usage)
13678  {
13680  break;
13682  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13683  {
13684  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13685  }
13686  break;
13688  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13689  break;
13691  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13692  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13693  {
13694  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13695  }
13696  break;
13698  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13699  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13700  break;
13701  default:
13702  break;
13703  }
13704 
13705  *pMemoryTypeIndex = UINT32_MAX;
13706  uint32_t minCost = UINT32_MAX;
13707  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13708  memTypeIndex < allocator->GetMemoryTypeCount();
13709  ++memTypeIndex, memTypeBit <<= 1)
13710  {
13711  // This memory type is acceptable according to memoryTypeBits bitmask.
13712  if((memTypeBit & memoryTypeBits) != 0)
13713  {
13714  const VkMemoryPropertyFlags currFlags =
13715  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13716  // This memory type contains requiredFlags.
13717  if((requiredFlags & ~currFlags) == 0)
13718  {
13719  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13720  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13721  // Remember memory type with lowest cost.
13722  if(currCost < minCost)
13723  {
13724  *pMemoryTypeIndex = memTypeIndex;
13725  if(currCost == 0)
13726  {
13727  return VK_SUCCESS;
13728  }
13729  minCost = currCost;
13730  }
13731  }
13732  }
13733  }
13734  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13735 }
13736 
13738  VmaAllocator allocator,
13739  const VkBufferCreateInfo* pBufferCreateInfo,
13740  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13741  uint32_t* pMemoryTypeIndex)
13742 {
13743  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13744  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13745  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13746  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13747 
13748  const VkDevice hDev = allocator->m_hDevice;
13749  VkBuffer hBuffer = VK_NULL_HANDLE;
13750  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13751  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13752  if(res == VK_SUCCESS)
13753  {
13754  VkMemoryRequirements memReq = {};
13755  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13756  hDev, hBuffer, &memReq);
13757 
13758  res = vmaFindMemoryTypeIndex(
13759  allocator,
13760  memReq.memoryTypeBits,
13761  pAllocationCreateInfo,
13762  pMemoryTypeIndex);
13763 
13764  allocator->GetVulkanFunctions().vkDestroyBuffer(
13765  hDev, hBuffer, allocator->GetAllocationCallbacks());
13766  }
13767  return res;
13768 }
13769 
13771  VmaAllocator allocator,
13772  const VkImageCreateInfo* pImageCreateInfo,
13773  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13774  uint32_t* pMemoryTypeIndex)
13775 {
13776  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13777  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13778  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13779  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13780 
13781  const VkDevice hDev = allocator->m_hDevice;
13782  VkImage hImage = VK_NULL_HANDLE;
13783  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13784  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13785  if(res == VK_SUCCESS)
13786  {
13787  VkMemoryRequirements memReq = {};
13788  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13789  hDev, hImage, &memReq);
13790 
13791  res = vmaFindMemoryTypeIndex(
13792  allocator,
13793  memReq.memoryTypeBits,
13794  pAllocationCreateInfo,
13795  pMemoryTypeIndex);
13796 
13797  allocator->GetVulkanFunctions().vkDestroyImage(
13798  hDev, hImage, allocator->GetAllocationCallbacks());
13799  }
13800  return res;
13801 }
13802 
13803 VkResult vmaCreatePool(
13804  VmaAllocator allocator,
13805  const VmaPoolCreateInfo* pCreateInfo,
13806  VmaPool* pPool)
13807 {
13808  VMA_ASSERT(allocator && pCreateInfo && pPool);
13809 
13810  VMA_DEBUG_LOG("vmaCreatePool");
13811 
13812  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13813 
13814  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13815 
13816 #if VMA_RECORDING_ENABLED
13817  if(allocator->GetRecorder() != VMA_NULL)
13818  {
13819  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13820  }
13821 #endif
13822 
13823  return res;
13824 }
13825 
13826 void vmaDestroyPool(
13827  VmaAllocator allocator,
13828  VmaPool pool)
13829 {
13830  VMA_ASSERT(allocator);
13831 
13832  if(pool == VK_NULL_HANDLE)
13833  {
13834  return;
13835  }
13836 
13837  VMA_DEBUG_LOG("vmaDestroyPool");
13838 
13839  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13840 
13841 #if VMA_RECORDING_ENABLED
13842  if(allocator->GetRecorder() != VMA_NULL)
13843  {
13844  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13845  }
13846 #endif
13847 
13848  allocator->DestroyPool(pool);
13849 }
13850 
13851 void vmaGetPoolStats(
13852  VmaAllocator allocator,
13853  VmaPool pool,
13854  VmaPoolStats* pPoolStats)
13855 {
13856  VMA_ASSERT(allocator && pool && pPoolStats);
13857 
13858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13859 
13860  allocator->GetPoolStats(pool, pPoolStats);
13861 }
13862 
13864  VmaAllocator allocator,
13865  VmaPool pool,
13866  size_t* pLostAllocationCount)
13867 {
13868  VMA_ASSERT(allocator && pool);
13869 
13870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13871 
13872 #if VMA_RECORDING_ENABLED
13873  if(allocator->GetRecorder() != VMA_NULL)
13874  {
13875  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13876  }
13877 #endif
13878 
13879  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13880 }
13881 
13882 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13883 {
13884  VMA_ASSERT(allocator && pool);
13885 
13886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13887 
13888  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13889 
13890  return allocator->CheckPoolCorruption(pool);
13891 }
13892 
13893 VkResult vmaAllocateMemory(
13894  VmaAllocator allocator,
13895  const VkMemoryRequirements* pVkMemoryRequirements,
13896  const VmaAllocationCreateInfo* pCreateInfo,
13897  VmaAllocation* pAllocation,
13898  VmaAllocationInfo* pAllocationInfo)
13899 {
13900  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13901 
13902  VMA_DEBUG_LOG("vmaAllocateMemory");
13903 
13904  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13905 
13906  VkResult result = allocator->AllocateMemory(
13907  *pVkMemoryRequirements,
13908  false, // requiresDedicatedAllocation
13909  false, // prefersDedicatedAllocation
13910  VK_NULL_HANDLE, // dedicatedBuffer
13911  VK_NULL_HANDLE, // dedicatedImage
13912  *pCreateInfo,
13913  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13914  1, // allocationCount
13915  pAllocation);
13916 
13917 #if VMA_RECORDING_ENABLED
13918  if(allocator->GetRecorder() != VMA_NULL)
13919  {
13920  allocator->GetRecorder()->RecordAllocateMemory(
13921  allocator->GetCurrentFrameIndex(),
13922  *pVkMemoryRequirements,
13923  *pCreateInfo,
13924  *pAllocation);
13925  }
13926 #endif
13927 
13928  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13929  {
13930  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13931  }
13932 
13933  return result;
13934 }
13935 
13936 VkResult vmaAllocateMemoryPages(
13937  VmaAllocator allocator,
13938  const VkMemoryRequirements* pVkMemoryRequirements,
13939  const VmaAllocationCreateInfo* pCreateInfo,
13940  size_t allocationCount,
13941  VmaAllocation* pAllocations,
13942  VmaAllocationInfo* pAllocationInfo)
13943 {
13944  if(allocationCount == 0)
13945  {
13946  return VK_SUCCESS;
13947  }
13948 
13949  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
13950 
13951  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
13952 
13953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13954 
13955  VkResult result = allocator->AllocateMemory(
13956  *pVkMemoryRequirements,
13957  false, // requiresDedicatedAllocation
13958  false, // prefersDedicatedAllocation
13959  VK_NULL_HANDLE, // dedicatedBuffer
13960  VK_NULL_HANDLE, // dedicatedImage
13961  *pCreateInfo,
13962  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13963  allocationCount,
13964  pAllocations);
13965 
13966 #if VMA_RECORDING_ENABLED
13967  if(allocator->GetRecorder() != VMA_NULL)
13968  {
13969  // TODO: Extend recording format with this function.
13970  /*
13971  allocator->GetRecorder()->RecordAllocateMemoryPages(
13972  allocator->GetCurrentFrameIndex(),
13973  *pVkMemoryRequirements,
13974  *pCreateInfo,
13975  *pAllocation);
13976  */
13977  }
13978 #endif
13979 
13980  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13981  {
13982  for(size_t i = 0; i < allocationCount; ++i)
13983  {
13984  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
13985  }
13986  }
13987 
13988  return result;
13989 }
13990 
13992  VmaAllocator allocator,
13993  VkBuffer buffer,
13994  const VmaAllocationCreateInfo* pCreateInfo,
13995  VmaAllocation* pAllocation,
13996  VmaAllocationInfo* pAllocationInfo)
13997 {
13998  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13999 
14000  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
14001 
14002  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14003 
14004  VkMemoryRequirements vkMemReq = {};
14005  bool requiresDedicatedAllocation = false;
14006  bool prefersDedicatedAllocation = false;
14007  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
14008  requiresDedicatedAllocation,
14009  prefersDedicatedAllocation);
14010 
14011  VkResult result = allocator->AllocateMemory(
14012  vkMemReq,
14013  requiresDedicatedAllocation,
14014  prefersDedicatedAllocation,
14015  buffer, // dedicatedBuffer
14016  VK_NULL_HANDLE, // dedicatedImage
14017  *pCreateInfo,
14018  VMA_SUBALLOCATION_TYPE_BUFFER,
14019  1, // allocationCount
14020  pAllocation);
14021 
14022 #if VMA_RECORDING_ENABLED
14023  if(allocator->GetRecorder() != VMA_NULL)
14024  {
14025  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
14026  allocator->GetCurrentFrameIndex(),
14027  vkMemReq,
14028  requiresDedicatedAllocation,
14029  prefersDedicatedAllocation,
14030  *pCreateInfo,
14031  *pAllocation);
14032  }
14033 #endif
14034 
14035  if(pAllocationInfo && result == VK_SUCCESS)
14036  {
14037  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14038  }
14039 
14040  return result;
14041 }
14042 
14043 VkResult vmaAllocateMemoryForImage(
14044  VmaAllocator allocator,
14045  VkImage image,
14046  const VmaAllocationCreateInfo* pCreateInfo,
14047  VmaAllocation* pAllocation,
14048  VmaAllocationInfo* pAllocationInfo)
14049 {
14050  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
14051 
14052  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
14053 
14054  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14055 
14056  VkMemoryRequirements vkMemReq = {};
14057  bool requiresDedicatedAllocation = false;
14058  bool prefersDedicatedAllocation = false;
14059  allocator->GetImageMemoryRequirements(image, vkMemReq,
14060  requiresDedicatedAllocation, prefersDedicatedAllocation);
14061 
14062  VkResult result = allocator->AllocateMemory(
14063  vkMemReq,
14064  requiresDedicatedAllocation,
14065  prefersDedicatedAllocation,
14066  VK_NULL_HANDLE, // dedicatedBuffer
14067  image, // dedicatedImage
14068  *pCreateInfo,
14069  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
14070  1, // allocationCount
14071  pAllocation);
14072 
14073 #if VMA_RECORDING_ENABLED
14074  if(allocator->GetRecorder() != VMA_NULL)
14075  {
14076  allocator->GetRecorder()->RecordAllocateMemoryForImage(
14077  allocator->GetCurrentFrameIndex(),
14078  vkMemReq,
14079  requiresDedicatedAllocation,
14080  prefersDedicatedAllocation,
14081  *pCreateInfo,
14082  *pAllocation);
14083  }
14084 #endif
14085 
14086  if(pAllocationInfo && result == VK_SUCCESS)
14087  {
14088  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14089  }
14090 
14091  return result;
14092 }
14093 
14094 void vmaFreeMemory(
14095  VmaAllocator allocator,
14096  VmaAllocation allocation)
14097 {
14098  VMA_ASSERT(allocator);
14099 
14100  if(allocation == VK_NULL_HANDLE)
14101  {
14102  return;
14103  }
14104 
14105  VMA_DEBUG_LOG("vmaFreeMemory");
14106 
14107  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14108 
14109 #if VMA_RECORDING_ENABLED
14110  if(allocator->GetRecorder() != VMA_NULL)
14111  {
14112  allocator->GetRecorder()->RecordFreeMemory(
14113  allocator->GetCurrentFrameIndex(),
14114  allocation);
14115  }
14116 #endif
14117 
14118  allocator->FreeMemory(
14119  1, // allocationCount
14120  &allocation);
14121 }
14122 
14123 void vmaFreeMemoryPages(
14124  VmaAllocator allocator,
14125  size_t allocationCount,
14126  VmaAllocation* pAllocations)
14127 {
14128  if(allocationCount == 0)
14129  {
14130  return;
14131  }
14132 
14133  VMA_ASSERT(allocator);
14134 
14135  VMA_DEBUG_LOG("vmaFreeMemoryPages");
14136 
14137  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14138 
14139 #if VMA_RECORDING_ENABLED
14140  // TODO Add this to recording file format.
14141  /*
14142  if(allocator->GetRecorder() != VMA_NULL)
14143  {
14144  allocator->GetRecorder()->RecordFreeMemoryPages(
14145  allocator->GetCurrentFrameIndex(),
14146  allocation);
14147  }
14148  */
14149 #endif
14150 
14151  allocator->FreeMemory(allocationCount, pAllocations);
14152 }
14153 
14155  VmaAllocator allocator,
14156  VmaAllocation allocation,
14157  VmaAllocationInfo* pAllocationInfo)
14158 {
14159  VMA_ASSERT(allocator && allocation && pAllocationInfo);
14160 
14161  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14162 
14163 #if VMA_RECORDING_ENABLED
14164  if(allocator->GetRecorder() != VMA_NULL)
14165  {
14166  allocator->GetRecorder()->RecordGetAllocationInfo(
14167  allocator->GetCurrentFrameIndex(),
14168  allocation);
14169  }
14170 #endif
14171 
14172  allocator->GetAllocationInfo(allocation, pAllocationInfo);
14173 }
14174 
14175 VkBool32 vmaTouchAllocation(
14176  VmaAllocator allocator,
14177  VmaAllocation allocation)
14178 {
14179  VMA_ASSERT(allocator && allocation);
14180 
14181  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14182 
14183 #if VMA_RECORDING_ENABLED
14184  if(allocator->GetRecorder() != VMA_NULL)
14185  {
14186  allocator->GetRecorder()->RecordTouchAllocation(
14187  allocator->GetCurrentFrameIndex(),
14188  allocation);
14189  }
14190 #endif
14191 
14192  return allocator->TouchAllocation(allocation);
14193 }
14194 
14196  VmaAllocator allocator,
14197  VmaAllocation allocation,
14198  void* pUserData)
14199 {
14200  VMA_ASSERT(allocator && allocation);
14201 
14202  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14203 
14204  allocation->SetUserData(allocator, pUserData);
14205 
14206 #if VMA_RECORDING_ENABLED
14207  if(allocator->GetRecorder() != VMA_NULL)
14208  {
14209  allocator->GetRecorder()->RecordSetAllocationUserData(
14210  allocator->GetCurrentFrameIndex(),
14211  allocation,
14212  pUserData);
14213  }
14214 #endif
14215 }
14216 
14218  VmaAllocator allocator,
14219  VmaAllocation* pAllocation)
14220 {
14221  VMA_ASSERT(allocator && pAllocation);
14222 
14223  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
14224 
14225  allocator->CreateLostAllocation(pAllocation);
14226 
14227 #if VMA_RECORDING_ENABLED
14228  if(allocator->GetRecorder() != VMA_NULL)
14229  {
14230  allocator->GetRecorder()->RecordCreateLostAllocation(
14231  allocator->GetCurrentFrameIndex(),
14232  *pAllocation);
14233  }
14234 #endif
14235 }
14236 
14237 VkResult vmaMapMemory(
14238  VmaAllocator allocator,
14239  VmaAllocation allocation,
14240  void** ppData)
14241 {
14242  VMA_ASSERT(allocator && allocation && ppData);
14243 
14244  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14245 
14246  VkResult res = allocator->Map(allocation, ppData);
14247 
14248 #if VMA_RECORDING_ENABLED
14249  if(allocator->GetRecorder() != VMA_NULL)
14250  {
14251  allocator->GetRecorder()->RecordMapMemory(
14252  allocator->GetCurrentFrameIndex(),
14253  allocation);
14254  }
14255 #endif
14256 
14257  return res;
14258 }
14259 
14260 void vmaUnmapMemory(
14261  VmaAllocator allocator,
14262  VmaAllocation allocation)
14263 {
14264  VMA_ASSERT(allocator && allocation);
14265 
14266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14267 
14268 #if VMA_RECORDING_ENABLED
14269  if(allocator->GetRecorder() != VMA_NULL)
14270  {
14271  allocator->GetRecorder()->RecordUnmapMemory(
14272  allocator->GetCurrentFrameIndex(),
14273  allocation);
14274  }
14275 #endif
14276 
14277  allocator->Unmap(allocation);
14278 }
14279 
14280 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14281 {
14282  VMA_ASSERT(allocator && allocation);
14283 
14284  VMA_DEBUG_LOG("vmaFlushAllocation");
14285 
14286  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14287 
14288  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14289 
14290 #if VMA_RECORDING_ENABLED
14291  if(allocator->GetRecorder() != VMA_NULL)
14292  {
14293  allocator->GetRecorder()->RecordFlushAllocation(
14294  allocator->GetCurrentFrameIndex(),
14295  allocation, offset, size);
14296  }
14297 #endif
14298 }
14299 
14300 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14301 {
14302  VMA_ASSERT(allocator && allocation);
14303 
14304  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14305 
14306  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14307 
14308  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14309 
14310 #if VMA_RECORDING_ENABLED
14311  if(allocator->GetRecorder() != VMA_NULL)
14312  {
14313  allocator->GetRecorder()->RecordInvalidateAllocation(
14314  allocator->GetCurrentFrameIndex(),
14315  allocation, offset, size);
14316  }
14317 #endif
14318 }
14319 
14320 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14321 {
14322  VMA_ASSERT(allocator);
14323 
14324  VMA_DEBUG_LOG("vmaCheckCorruption");
14325 
14326  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14327 
14328  return allocator->CheckCorruption(memoryTypeBits);
14329 }
14330 
14331 VkResult vmaDefragment(
14332  VmaAllocator allocator,
14333  VmaAllocation* pAllocations,
14334  size_t allocationCount,
14335  VkBool32* pAllocationsChanged,
14336  const VmaDefragmentationInfo *pDefragmentationInfo,
14337  VmaDefragmentationStats* pDefragmentationStats)
14338 {
14339  VMA_ASSERT(allocator && pAllocations);
14340 
14341  VMA_DEBUG_LOG("vmaDefragment");
14342 
14343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14344 
14345  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14346 }
14347 
14348 VkResult vmaBindBufferMemory(
14349  VmaAllocator allocator,
14350  VmaAllocation allocation,
14351  VkBuffer buffer)
14352 {
14353  VMA_ASSERT(allocator && allocation && buffer);
14354 
14355  VMA_DEBUG_LOG("vmaBindBufferMemory");
14356 
14357  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14358 
14359  return allocator->BindBufferMemory(allocation, buffer);
14360 }
14361 
14362 VkResult vmaBindImageMemory(
14363  VmaAllocator allocator,
14364  VmaAllocation allocation,
14365  VkImage image)
14366 {
14367  VMA_ASSERT(allocator && allocation && image);
14368 
14369  VMA_DEBUG_LOG("vmaBindImageMemory");
14370 
14371  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14372 
14373  return allocator->BindImageMemory(allocation, image);
14374 }
14375 
14376 VkResult vmaCreateBuffer(
14377  VmaAllocator allocator,
14378  const VkBufferCreateInfo* pBufferCreateInfo,
14379  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14380  VkBuffer* pBuffer,
14381  VmaAllocation* pAllocation,
14382  VmaAllocationInfo* pAllocationInfo)
14383 {
14384  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14385 
14386  VMA_DEBUG_LOG("vmaCreateBuffer");
14387 
14388  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14389 
14390  *pBuffer = VK_NULL_HANDLE;
14391  *pAllocation = VK_NULL_HANDLE;
14392 
14393  // 1. Create VkBuffer.
14394  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14395  allocator->m_hDevice,
14396  pBufferCreateInfo,
14397  allocator->GetAllocationCallbacks(),
14398  pBuffer);
14399  if(res >= 0)
14400  {
14401  // 2. vkGetBufferMemoryRequirements.
14402  VkMemoryRequirements vkMemReq = {};
14403  bool requiresDedicatedAllocation = false;
14404  bool prefersDedicatedAllocation = false;
14405  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14406  requiresDedicatedAllocation, prefersDedicatedAllocation);
14407 
14408  // Make sure alignment requirements for specific buffer usages reported
14409  // in Physical Device Properties are included in alignment reported by memory requirements.
14410  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14411  {
14412  VMA_ASSERT(vkMemReq.alignment %
14413  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14414  }
14415  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14416  {
14417  VMA_ASSERT(vkMemReq.alignment %
14418  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14419  }
14420  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14421  {
14422  VMA_ASSERT(vkMemReq.alignment %
14423  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14424  }
14425 
14426  // 3. Allocate memory using allocator.
14427  res = allocator->AllocateMemory(
14428  vkMemReq,
14429  requiresDedicatedAllocation,
14430  prefersDedicatedAllocation,
14431  *pBuffer, // dedicatedBuffer
14432  VK_NULL_HANDLE, // dedicatedImage
14433  *pAllocationCreateInfo,
14434  VMA_SUBALLOCATION_TYPE_BUFFER,
14435  1, // allocationCount
14436  pAllocation);
14437 
14438 #if VMA_RECORDING_ENABLED
14439  if(allocator->GetRecorder() != VMA_NULL)
14440  {
14441  allocator->GetRecorder()->RecordCreateBuffer(
14442  allocator->GetCurrentFrameIndex(),
14443  *pBufferCreateInfo,
14444  *pAllocationCreateInfo,
14445  *pAllocation);
14446  }
14447 #endif
14448 
14449  if(res >= 0)
14450  {
14451  // 3. Bind buffer with memory.
14452  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14453  if(res >= 0)
14454  {
14455  // All steps succeeded.
14456  #if VMA_STATS_STRING_ENABLED
14457  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14458  #endif
14459  if(pAllocationInfo != VMA_NULL)
14460  {
14461  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14462  }
14463 
14464  return VK_SUCCESS;
14465  }
14466  allocator->FreeMemory(
14467  1, // allocationCount
14468  pAllocation);
14469  *pAllocation = VK_NULL_HANDLE;
14470  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14471  *pBuffer = VK_NULL_HANDLE;
14472  return res;
14473  }
14474  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14475  *pBuffer = VK_NULL_HANDLE;
14476  return res;
14477  }
14478  return res;
14479 }
14480 
14481 void vmaDestroyBuffer(
14482  VmaAllocator allocator,
14483  VkBuffer buffer,
14484  VmaAllocation allocation)
14485 {
14486  VMA_ASSERT(allocator);
14487 
14488  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14489  {
14490  return;
14491  }
14492 
14493  VMA_DEBUG_LOG("vmaDestroyBuffer");
14494 
14495  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14496 
14497 #if VMA_RECORDING_ENABLED
14498  if(allocator->GetRecorder() != VMA_NULL)
14499  {
14500  allocator->GetRecorder()->RecordDestroyBuffer(
14501  allocator->GetCurrentFrameIndex(),
14502  allocation);
14503  }
14504 #endif
14505 
14506  if(buffer != VK_NULL_HANDLE)
14507  {
14508  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14509  }
14510 
14511  if(allocation != VK_NULL_HANDLE)
14512  {
14513  allocator->FreeMemory(
14514  1, // allocationCount
14515  &allocation);
14516  }
14517 }
14518 
14519 VkResult vmaCreateImage(
14520  VmaAllocator allocator,
14521  const VkImageCreateInfo* pImageCreateInfo,
14522  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14523  VkImage* pImage,
14524  VmaAllocation* pAllocation,
14525  VmaAllocationInfo* pAllocationInfo)
14526 {
14527  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14528 
14529  VMA_DEBUG_LOG("vmaCreateImage");
14530 
14531  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14532 
14533  *pImage = VK_NULL_HANDLE;
14534  *pAllocation = VK_NULL_HANDLE;
14535 
14536  // 1. Create VkImage.
14537  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14538  allocator->m_hDevice,
14539  pImageCreateInfo,
14540  allocator->GetAllocationCallbacks(),
14541  pImage);
14542  if(res >= 0)
14543  {
14544  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14545  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14546  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14547 
14548  // 2. Allocate memory using allocator.
14549  VkMemoryRequirements vkMemReq = {};
14550  bool requiresDedicatedAllocation = false;
14551  bool prefersDedicatedAllocation = false;
14552  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14553  requiresDedicatedAllocation, prefersDedicatedAllocation);
14554 
14555  res = allocator->AllocateMemory(
14556  vkMemReq,
14557  requiresDedicatedAllocation,
14558  prefersDedicatedAllocation,
14559  VK_NULL_HANDLE, // dedicatedBuffer
14560  *pImage, // dedicatedImage
14561  *pAllocationCreateInfo,
14562  suballocType,
14563  1, // allocationCount
14564  pAllocation);
14565 
14566 #if VMA_RECORDING_ENABLED
14567  if(allocator->GetRecorder() != VMA_NULL)
14568  {
14569  allocator->GetRecorder()->RecordCreateImage(
14570  allocator->GetCurrentFrameIndex(),
14571  *pImageCreateInfo,
14572  *pAllocationCreateInfo,
14573  *pAllocation);
14574  }
14575 #endif
14576 
14577  if(res >= 0)
14578  {
14579  // 3. Bind image with memory.
14580  res = allocator->BindImageMemory(*pAllocation, *pImage);
14581  if(res >= 0)
14582  {
14583  // All steps succeeded.
14584  #if VMA_STATS_STRING_ENABLED
14585  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14586  #endif
14587  if(pAllocationInfo != VMA_NULL)
14588  {
14589  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14590  }
14591 
14592  return VK_SUCCESS;
14593  }
14594  allocator->FreeMemory(
14595  1, // allocationCount
14596  pAllocation);
14597  *pAllocation = VK_NULL_HANDLE;
14598  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14599  *pImage = VK_NULL_HANDLE;
14600  return res;
14601  }
14602  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14603  *pImage = VK_NULL_HANDLE;
14604  return res;
14605  }
14606  return res;
14607 }
14608 
14609 void vmaDestroyImage(
14610  VmaAllocator allocator,
14611  VkImage image,
14612  VmaAllocation allocation)
14613 {
14614  VMA_ASSERT(allocator);
14615 
14616  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14617  {
14618  return;
14619  }
14620 
14621  VMA_DEBUG_LOG("vmaDestroyImage");
14622 
14623  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14624 
14625 #if VMA_RECORDING_ENABLED
14626  if(allocator->GetRecorder() != VMA_NULL)
14627  {
14628  allocator->GetRecorder()->RecordDestroyImage(
14629  allocator->GetCurrentFrameIndex(),
14630  allocation);
14631  }
14632 #endif
14633 
14634  if(image != VK_NULL_HANDLE)
14635  {
14636  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14637  }
14638  if(allocation != VK_NULL_HANDLE)
14639  {
14640  allocator->FreeMemory(
14641  1, // allocationCount
14642  &allocation);
14643  }
14644 }
14645 
14646 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1567
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1868
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1624
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1598
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2190
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1579
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1825
Definition: vk_mem_alloc.h:1928
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1571
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2290
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1621
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2586
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2079
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1468
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2171
Definition: vk_mem_alloc.h:1905
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1560
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1978
Definition: vk_mem_alloc.h:1852
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1633
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2107
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1686
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1618
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1856
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1758
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1576
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1757
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2590
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1650
VmaStatInfo total
Definition: vk_mem_alloc.h:1767
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2598
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1962
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2581
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1577
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1502
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1627
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2121
Definition: vk_mem_alloc.h:2115
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1693
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2300
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1572
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1596
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1999
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2141
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2177
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1558
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2124
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1803
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2576
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2594
Definition: vk_mem_alloc.h:1842
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1986
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1575
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1763
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1508
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1529
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1600
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1534
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2596
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1973
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2187
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1568
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1746
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2136
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1521
Definition: vk_mem_alloc.h:2111
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1912
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1759
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1525
Definition: vk_mem_alloc.h:1936
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2127
Definition: vk_mem_alloc.h:1851
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1574
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1968
Definition: vk_mem_alloc.h:1959
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1749
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1570
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2149
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1636
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2180
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1957
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1992
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1674
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1765
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1892
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1758
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1581
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1606
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1523
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1580
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2163
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1573
Definition: vk_mem_alloc.h:1923
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1614
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2314
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1630
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1758
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1755
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2168
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1932
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2295
Definition: vk_mem_alloc.h:1943
Definition: vk_mem_alloc.h:1955
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2592
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1566
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1753
Definition: vk_mem_alloc.h:1808
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2117
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1603
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1751
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1578
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1582
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1879
Definition: vk_mem_alloc.h:1950
Definition: vk_mem_alloc.h:1835
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2309
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1556
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1569
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2096
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2276
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1940
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2061
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1759
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1590
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1766
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2174
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1759
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2281