Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1397 #include <vulkan/vulkan.h>
1398 
1399 #if !defined(VMA_DEDICATED_ALLOCATION)
1400  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1401  #define VMA_DEDICATED_ALLOCATION 1
1402  #else
1403  #define VMA_DEDICATED_ALLOCATION 0
1404  #endif
1405 #endif
1406 
1416 VK_DEFINE_HANDLE(VmaAllocator)
1417 
1418 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1420  VmaAllocator allocator,
1421  uint32_t memoryType,
1422  VkDeviceMemory memory,
1423  VkDeviceSize size);
1425 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1426  VmaAllocator allocator,
1427  uint32_t memoryType,
1428  VkDeviceMemory memory,
1429  VkDeviceSize size);
1430 
1444 
1474 
1477 typedef VkFlags VmaAllocatorCreateFlags;
1478 
1483 typedef struct VmaVulkanFunctions {
1484  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1485  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1486  PFN_vkAllocateMemory vkAllocateMemory;
1487  PFN_vkFreeMemory vkFreeMemory;
1488  PFN_vkMapMemory vkMapMemory;
1489  PFN_vkUnmapMemory vkUnmapMemory;
1490  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1491  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1492  PFN_vkBindBufferMemory vkBindBufferMemory;
1493  PFN_vkBindImageMemory vkBindImageMemory;
1494  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1495  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1496  PFN_vkCreateBuffer vkCreateBuffer;
1497  PFN_vkDestroyBuffer vkDestroyBuffer;
1498  PFN_vkCreateImage vkCreateImage;
1499  PFN_vkDestroyImage vkDestroyImage;
1500 #if VMA_DEDICATED_ALLOCATION
1501  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1502  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1503 #endif
1505 
1507 typedef enum VmaRecordFlagBits {
1514 
1517 typedef VkFlags VmaRecordFlags;
1518 
1519 /*
1520 Define this macro to 0/1 to disable/enable support for recording functionality,
1521 available through VmaAllocatorCreateInfo::pRecordSettings.
1522 */
1523 #ifndef VMA_RECORDING_ENABLED
1524  #ifdef _WIN32
1525  #define VMA_RECORDING_ENABLED 1
1526  #else
1527  #define VMA_RECORDING_ENABLED 0
1528  #endif
1529 #endif
1530 
1532 typedef struct VmaRecordSettings
1533 {
1543  const char* pFilePath;
1545 
1548 {
1552 
1553  VkPhysicalDevice physicalDevice;
1555 
1556  VkDevice device;
1558 
1561 
1562  const VkAllocationCallbacks* pAllocationCallbacks;
1564 
1603  const VkDeviceSize* pHeapSizeLimit;
1624 
1626 VkResult vmaCreateAllocator(
1627  const VmaAllocatorCreateInfo* pCreateInfo,
1628  VmaAllocator* pAllocator);
1629 
1631 void vmaDestroyAllocator(
1632  VmaAllocator allocator);
1633 
1639  VmaAllocator allocator,
1640  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1641 
1647  VmaAllocator allocator,
1648  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1649 
1657  VmaAllocator allocator,
1658  uint32_t memoryTypeIndex,
1659  VkMemoryPropertyFlags* pFlags);
1660 
1670  VmaAllocator allocator,
1671  uint32_t frameIndex);
1672 
1675 typedef struct VmaStatInfo
1676 {
1678  uint32_t blockCount;
1684  VkDeviceSize usedBytes;
1686  VkDeviceSize unusedBytes;
1689 } VmaStatInfo;
1690 
1692 typedef struct VmaStats
1693 {
1694  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1695  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1697 } VmaStats;
1698 
1700 void vmaCalculateStats(
1701  VmaAllocator allocator,
1702  VmaStats* pStats);
1703 
1704 #define VMA_STATS_STRING_ENABLED 1
1705 
1706 #if VMA_STATS_STRING_ENABLED
1707 
1709 
1711 void vmaBuildStatsString(
1712  VmaAllocator allocator,
1713  char** ppStatsString,
1714  VkBool32 detailedMap);
1715 
1716 void vmaFreeStatsString(
1717  VmaAllocator allocator,
1718  char* pStatsString);
1719 
1720 #endif // #if VMA_STATS_STRING_ENABLED
1721 
1730 VK_DEFINE_HANDLE(VmaPool)
1731 
1732 typedef enum VmaMemoryUsage
1733 {
1782 } VmaMemoryUsage;
1783 
1798 
1853 
1866 
1876 
1883 
1887 
1889 {
1902  VkMemoryPropertyFlags requiredFlags;
1907  VkMemoryPropertyFlags preferredFlags;
1915  uint32_t memoryTypeBits;
1928  void* pUserData;
1930 
1947 VkResult vmaFindMemoryTypeIndex(
1948  VmaAllocator allocator,
1949  uint32_t memoryTypeBits,
1950  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1951  uint32_t* pMemoryTypeIndex);
1952 
1966  VmaAllocator allocator,
1967  const VkBufferCreateInfo* pBufferCreateInfo,
1968  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1969  uint32_t* pMemoryTypeIndex);
1970 
1984  VmaAllocator allocator,
1985  const VkImageCreateInfo* pImageCreateInfo,
1986  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1987  uint32_t* pMemoryTypeIndex);
1988 
2009 
2026 
2037 
2043 
2046 typedef VkFlags VmaPoolCreateFlags;
2047 
2050 typedef struct VmaPoolCreateInfo {
2065  VkDeviceSize blockSize;
2094 
2097 typedef struct VmaPoolStats {
2100  VkDeviceSize size;
2103  VkDeviceSize unusedSize;
2116  VkDeviceSize unusedRangeSizeMax;
2119  size_t blockCount;
2120 } VmaPoolStats;
2121 
2128 VkResult vmaCreatePool(
2129  VmaAllocator allocator,
2130  const VmaPoolCreateInfo* pCreateInfo,
2131  VmaPool* pPool);
2132 
2135 void vmaDestroyPool(
2136  VmaAllocator allocator,
2137  VmaPool pool);
2138 
2145 void vmaGetPoolStats(
2146  VmaAllocator allocator,
2147  VmaPool pool,
2148  VmaPoolStats* pPoolStats);
2149 
2157  VmaAllocator allocator,
2158  VmaPool pool,
2159  size_t* pLostAllocationCount);
2160 
2175 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2176 
2201 VK_DEFINE_HANDLE(VmaAllocation)
2202 
2203 
2205 typedef struct VmaAllocationInfo {
2210  uint32_t memoryType;
2219  VkDeviceMemory deviceMemory;
2224  VkDeviceSize offset;
2229  VkDeviceSize size;
2243  void* pUserData;
2245 
2256 VkResult vmaAllocateMemory(
2257  VmaAllocator allocator,
2258  const VkMemoryRequirements* pVkMemoryRequirements,
2259  const VmaAllocationCreateInfo* pCreateInfo,
2260  VmaAllocation* pAllocation,
2261  VmaAllocationInfo* pAllocationInfo);
2262 
2270  VmaAllocator allocator,
2271  VkBuffer buffer,
2272  const VmaAllocationCreateInfo* pCreateInfo,
2273  VmaAllocation* pAllocation,
2274  VmaAllocationInfo* pAllocationInfo);
2275 
2277 VkResult vmaAllocateMemoryForImage(
2278  VmaAllocator allocator,
2279  VkImage image,
2280  const VmaAllocationCreateInfo* pCreateInfo,
2281  VmaAllocation* pAllocation,
2282  VmaAllocationInfo* pAllocationInfo);
2283 
2285 void vmaFreeMemory(
2286  VmaAllocator allocator,
2287  VmaAllocation allocation);
2288 
2306  VmaAllocator allocator,
2307  VmaAllocation allocation,
2308  VmaAllocationInfo* pAllocationInfo);
2309 
2324 VkBool32 vmaTouchAllocation(
2325  VmaAllocator allocator,
2326  VmaAllocation allocation);
2327 
2342  VmaAllocator allocator,
2343  VmaAllocation allocation,
2344  void* pUserData);
2345 
2357  VmaAllocator allocator,
2358  VmaAllocation* pAllocation);
2359 
2394 VkResult vmaMapMemory(
2395  VmaAllocator allocator,
2396  VmaAllocation allocation,
2397  void** ppData);
2398 
2403 void vmaUnmapMemory(
2404  VmaAllocator allocator,
2405  VmaAllocation allocation);
2406 
2419 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2420 
2433 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2434 
2451 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2452 
2454 typedef struct VmaDefragmentationInfo {
2459  VkDeviceSize maxBytesToMove;
2466 
2468 typedef struct VmaDefragmentationStats {
2470  VkDeviceSize bytesMoved;
2472  VkDeviceSize bytesFreed;
2478 
2565 VkResult vmaDefragment(
2566  VmaAllocator allocator,
2567  VmaAllocation* pAllocations,
2568  size_t allocationCount,
2569  VkBool32* pAllocationsChanged,
2570  const VmaDefragmentationInfo *pDefragmentationInfo,
2571  VmaDefragmentationStats* pDefragmentationStats);
2572 
2585 VkResult vmaBindBufferMemory(
2586  VmaAllocator allocator,
2587  VmaAllocation allocation,
2588  VkBuffer buffer);
2589 
2602 VkResult vmaBindImageMemory(
2603  VmaAllocator allocator,
2604  VmaAllocation allocation,
2605  VkImage image);
2606 
2633 VkResult vmaCreateBuffer(
2634  VmaAllocator allocator,
2635  const VkBufferCreateInfo* pBufferCreateInfo,
2636  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2637  VkBuffer* pBuffer,
2638  VmaAllocation* pAllocation,
2639  VmaAllocationInfo* pAllocationInfo);
2640 
2652 void vmaDestroyBuffer(
2653  VmaAllocator allocator,
2654  VkBuffer buffer,
2655  VmaAllocation allocation);
2656 
2658 VkResult vmaCreateImage(
2659  VmaAllocator allocator,
2660  const VkImageCreateInfo* pImageCreateInfo,
2661  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2662  VkImage* pImage,
2663  VmaAllocation* pAllocation,
2664  VmaAllocationInfo* pAllocationInfo);
2665 
2677 void vmaDestroyImage(
2678  VmaAllocator allocator,
2679  VkImage image,
2680  VmaAllocation allocation);
2681 
2682 #ifdef __cplusplus
2683 }
2684 #endif
2685 
2686 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2687 
2688 // For Visual Studio IntelliSense.
2689 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2690 #define VMA_IMPLEMENTATION
2691 #endif
2692 
2693 #ifdef VMA_IMPLEMENTATION
2694 #undef VMA_IMPLEMENTATION
2695 
2696 #include <cstdint>
2697 #include <cstdlib>
2698 #include <cstring>
2699 
2700 /*******************************************************************************
2701 CONFIGURATION SECTION
2702 
2703 Define some of these macros before each #include of this header or change them
2704 here if you need other then default behavior depending on your environment.
2705 */
2706 
2707 /*
2708 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2709 internally, like:
2710 
2711  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2712 
2713 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2714 VmaAllocatorCreateInfo::pVulkanFunctions.
2715 */
2716 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2717 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2718 #endif
2719 
2720 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2721 //#define VMA_USE_STL_CONTAINERS 1
2722 
2723 /* Set this macro to 1 to make the library including and using STL containers:
2724 std::pair, std::vector, std::list, std::unordered_map.
2725 
2726 Set it to 0 or undefined to make the library using its own implementation of
2727 the containers.
2728 */
2729 #if VMA_USE_STL_CONTAINERS
2730  #define VMA_USE_STL_VECTOR 1
2731  #define VMA_USE_STL_UNORDERED_MAP 1
2732  #define VMA_USE_STL_LIST 1
2733 #endif
2734 
2735 #if VMA_USE_STL_VECTOR
2736  #include <vector>
2737 #endif
2738 
2739 #if VMA_USE_STL_UNORDERED_MAP
2740  #include <unordered_map>
2741 #endif
2742 
2743 #if VMA_USE_STL_LIST
2744  #include <list>
2745 #endif
2746 
2747 /*
2748 Following headers are used in this CONFIGURATION section only, so feel free to
2749 remove them if not needed.
2750 */
2751 #include <cassert> // for assert
2752 #include <algorithm> // for min, max
2753 #include <mutex> // for std::mutex
2754 #include <atomic> // for std::atomic
2755 
2756 #ifndef VMA_NULL
2757  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2758  #define VMA_NULL nullptr
2759 #endif
2760 
2761 #if defined(__APPLE__) || defined(__ANDROID__)
2762 #include <cstdlib>
2763 void *aligned_alloc(size_t alignment, size_t size)
2764 {
2765  // alignment must be >= sizeof(void*)
2766  if(alignment < sizeof(void*))
2767  {
2768  alignment = sizeof(void*);
2769  }
2770 
2771  void *pointer;
2772  if(posix_memalign(&pointer, alignment, size) == 0)
2773  return pointer;
2774  return VMA_NULL;
2775 }
2776 #endif
2777 
2778 // If your compiler is not compatible with C++11 and definition of
2779 // aligned_alloc() function is missing, uncommeting following line may help:
2780 
2781 //#include <malloc.h>
2782 
2783 // Normal assert to check for programmer's errors, especially in Debug configuration.
2784 #ifndef VMA_ASSERT
2785  #ifdef _DEBUG
2786  #define VMA_ASSERT(expr) assert(expr)
2787  #else
2788  #define VMA_ASSERT(expr)
2789  #endif
2790 #endif
2791 
2792 // Assert that will be called very often, like inside data structures e.g. operator[].
2793 // Making it non-empty can make program slow.
2794 #ifndef VMA_HEAVY_ASSERT
2795  #ifdef _DEBUG
2796  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2797  #else
2798  #define VMA_HEAVY_ASSERT(expr)
2799  #endif
2800 #endif
2801 
2802 #ifndef VMA_ALIGN_OF
2803  #define VMA_ALIGN_OF(type) (__alignof(type))
2804 #endif
2805 
2806 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2807  #if defined(_WIN32)
2808  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2809  #else
2810  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2811  #endif
2812 #endif
2813 
2814 #ifndef VMA_SYSTEM_FREE
2815  #if defined(_WIN32)
2816  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2817  #else
2818  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2819  #endif
2820 #endif
2821 
2822 #ifndef VMA_MIN
2823  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2824 #endif
2825 
2826 #ifndef VMA_MAX
2827  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2828 #endif
2829 
2830 #ifndef VMA_SWAP
2831  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2832 #endif
2833 
2834 #ifndef VMA_SORT
2835  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2836 #endif
2837 
2838 #ifndef VMA_DEBUG_LOG
2839  #define VMA_DEBUG_LOG(format, ...)
2840  /*
2841  #define VMA_DEBUG_LOG(format, ...) do { \
2842  printf(format, __VA_ARGS__); \
2843  printf("\n"); \
2844  } while(false)
2845  */
2846 #endif
2847 
2848 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2849 #if VMA_STATS_STRING_ENABLED
2850  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2851  {
2852  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2853  }
2854  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2855  {
2856  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2857  }
2858  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2859  {
2860  snprintf(outStr, strLen, "%p", ptr);
2861  }
2862 #endif
2863 
2864 #ifndef VMA_MUTEX
2865  class VmaMutex
2866  {
2867  public:
2868  VmaMutex() { }
2869  ~VmaMutex() { }
2870  void Lock() { m_Mutex.lock(); }
2871  void Unlock() { m_Mutex.unlock(); }
2872  private:
2873  std::mutex m_Mutex;
2874  };
2875  #define VMA_MUTEX VmaMutex
2876 #endif
2877 
2878 /*
2879 If providing your own implementation, you need to implement a subset of std::atomic:
2880 
2881 - Constructor(uint32_t desired)
2882 - uint32_t load() const
2883 - void store(uint32_t desired)
2884 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2885 */
2886 #ifndef VMA_ATOMIC_UINT32
2887  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2888 #endif
2889 
2890 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2891 
2895  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2896 #endif
2897 
2898 #ifndef VMA_DEBUG_ALIGNMENT
2899 
2903  #define VMA_DEBUG_ALIGNMENT (1)
2904 #endif
2905 
2906 #ifndef VMA_DEBUG_MARGIN
2907 
2911  #define VMA_DEBUG_MARGIN (0)
2912 #endif
2913 
2914 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2915 
2919  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2920 #endif
2921 
2922 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2923 
2928  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2929 #endif
2930 
2931 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2932 
2936  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2937 #endif
2938 
2939 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2940 
2944  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2945 #endif
2946 
2947 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2948  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2950 #endif
2951 
2952 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2953  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2955 #endif
2956 
2957 #ifndef VMA_CLASS_NO_COPY
2958  #define VMA_CLASS_NO_COPY(className) \
2959  private: \
2960  className(const className&) = delete; \
2961  className& operator=(const className&) = delete;
2962 #endif
2963 
2964 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2965 
2966 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2967 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2968 
2969 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2970 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2971 
2972 /*******************************************************************************
2973 END OF CONFIGURATION
2974 */
2975 
2976 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2977  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2978 
2979 // Returns number of bits set to 1 in (v).
2980 static inline uint32_t VmaCountBitsSet(uint32_t v)
2981 {
2982  uint32_t c = v - ((v >> 1) & 0x55555555);
2983  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2984  c = ((c >> 4) + c) & 0x0F0F0F0F;
2985  c = ((c >> 8) + c) & 0x00FF00FF;
2986  c = ((c >> 16) + c) & 0x0000FFFF;
2987  return c;
2988 }
2989 
2990 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2991 // Use types like uint32_t, uint64_t as T.
2992 template <typename T>
2993 static inline T VmaAlignUp(T val, T align)
2994 {
2995  return (val + align - 1) / align * align;
2996 }
2997 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2998 // Use types like uint32_t, uint64_t as T.
2999 template <typename T>
3000 static inline T VmaAlignDown(T val, T align)
3001 {
3002  return val / align * align;
3003 }
3004 
3005 // Division with mathematical rounding to nearest number.
3006 template <typename T>
3007 static inline T VmaRoundDiv(T x, T y)
3008 {
3009  return (x + (y / (T)2)) / y;
3010 }
3011 
3012 /*
3013 Returns true if given number is a power of two.
3014 T must be unsigned integer number or signed integer but always nonnegative.
3015 For 0 returns true.
3016 */
3017 template <typename T>
3018 inline bool VmaIsPow2(T x)
3019 {
3020  return (x & (x-1)) == 0;
3021 }
3022 
3023 // Returns smallest power of 2 greater or equal to v.
3024 static inline uint32_t VmaNextPow2(uint32_t v)
3025 {
3026  v--;
3027  v |= v >> 1;
3028  v |= v >> 2;
3029  v |= v >> 4;
3030  v |= v >> 8;
3031  v |= v >> 16;
3032  v++;
3033  return v;
3034 }
3035 static inline uint64_t VmaNextPow2(uint64_t v)
3036 {
3037  v--;
3038  v |= v >> 1;
3039  v |= v >> 2;
3040  v |= v >> 4;
3041  v |= v >> 8;
3042  v |= v >> 16;
3043  v |= v >> 32;
3044  v++;
3045  return v;
3046 }
3047 
3048 // Returns largest power of 2 less or equal to v.
3049 static inline uint32_t VmaPrevPow2(uint32_t v)
3050 {
3051  v |= v >> 1;
3052  v |= v >> 2;
3053  v |= v >> 4;
3054  v |= v >> 8;
3055  v |= v >> 16;
3056  v = v ^ (v >> 1);
3057  return v;
3058 }
3059 static inline uint64_t VmaPrevPow2(uint64_t v)
3060 {
3061  v |= v >> 1;
3062  v |= v >> 2;
3063  v |= v >> 4;
3064  v |= v >> 8;
3065  v |= v >> 16;
3066  v |= v >> 32;
3067  v = v ^ (v >> 1);
3068  return v;
3069 }
3070 
3071 static inline bool VmaStrIsEmpty(const char* pStr)
3072 {
3073  return pStr == VMA_NULL || *pStr == '\0';
3074 }
3075 
3076 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3077 {
3078  switch(algorithm)
3079  {
3081  return "Linear";
3083  return "Buddy";
3084  case 0:
3085  return "Default";
3086  default:
3087  VMA_ASSERT(0);
3088  return "";
3089  }
3090 }
3091 
3092 #ifndef VMA_SORT
3093 
3094 template<typename Iterator, typename Compare>
3095 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3096 {
3097  Iterator centerValue = end; --centerValue;
3098  Iterator insertIndex = beg;
3099  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3100  {
3101  if(cmp(*memTypeIndex, *centerValue))
3102  {
3103  if(insertIndex != memTypeIndex)
3104  {
3105  VMA_SWAP(*memTypeIndex, *insertIndex);
3106  }
3107  ++insertIndex;
3108  }
3109  }
3110  if(insertIndex != centerValue)
3111  {
3112  VMA_SWAP(*insertIndex, *centerValue);
3113  }
3114  return insertIndex;
3115 }
3116 
3117 template<typename Iterator, typename Compare>
3118 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3119 {
3120  if(beg < end)
3121  {
3122  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3123  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3124  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3125  }
3126 }
3127 
3128 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3129 
3130 #endif // #ifndef VMA_SORT
3131 
3132 /*
3133 Returns true if two memory blocks occupy overlapping pages.
3134 ResourceA must be in less memory offset than ResourceB.
3135 
3136 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3137 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3138 */
3139 static inline bool VmaBlocksOnSamePage(
3140  VkDeviceSize resourceAOffset,
3141  VkDeviceSize resourceASize,
3142  VkDeviceSize resourceBOffset,
3143  VkDeviceSize pageSize)
3144 {
3145  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3146  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3147  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3148  VkDeviceSize resourceBStart = resourceBOffset;
3149  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3150  return resourceAEndPage == resourceBStartPage;
3151 }
3152 
3153 enum VmaSuballocationType
3154 {
3155  VMA_SUBALLOCATION_TYPE_FREE = 0,
3156  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3157  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3158  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3159  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3160  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3161  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3162 };
3163 
3164 /*
3165 Returns true if given suballocation types could conflict and must respect
3166 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3167 or linear image and another one is optimal image. If type is unknown, behave
3168 conservatively.
3169 */
3170 static inline bool VmaIsBufferImageGranularityConflict(
3171  VmaSuballocationType suballocType1,
3172  VmaSuballocationType suballocType2)
3173 {
3174  if(suballocType1 > suballocType2)
3175  {
3176  VMA_SWAP(suballocType1, suballocType2);
3177  }
3178 
3179  switch(suballocType1)
3180  {
3181  case VMA_SUBALLOCATION_TYPE_FREE:
3182  return false;
3183  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3184  return true;
3185  case VMA_SUBALLOCATION_TYPE_BUFFER:
3186  return
3187  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3188  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3189  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3190  return
3191  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3192  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3193  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3194  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3195  return
3196  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3197  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3198  return false;
3199  default:
3200  VMA_ASSERT(0);
3201  return true;
3202  }
3203 }
3204 
3205 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3206 {
3207  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3208  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3209  // This condition is to silence clang compiler error: "comparison of unsigned expression < 0 is always false"
3210  if(numberCount > 0)
3211  {
3212  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3213  {
3214  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3215  }
3216  }
3217 }
3218 
3219 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3220 {
3221  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3222  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3223  // This condition is to silence clang compiler error: "comparison of unsigned expression < 0 is always false"
3224  if(numberCount > 0)
3225  {
3226  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3227  {
3228  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3229  {
3230  return false;
3231  }
3232  }
3233  }
3234  return true;
3235 }
3236 
3237 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3238 struct VmaMutexLock
3239 {
3240  VMA_CLASS_NO_COPY(VmaMutexLock)
3241 public:
3242  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3243  m_pMutex(useMutex ? &mutex : VMA_NULL)
3244  {
3245  if(m_pMutex)
3246  {
3247  m_pMutex->Lock();
3248  }
3249  }
3250 
3251  ~VmaMutexLock()
3252  {
3253  if(m_pMutex)
3254  {
3255  m_pMutex->Unlock();
3256  }
3257  }
3258 
3259 private:
3260  VMA_MUTEX* m_pMutex;
3261 };
3262 
3263 #if VMA_DEBUG_GLOBAL_MUTEX
3264  static VMA_MUTEX gDebugGlobalMutex;
3265  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3266 #else
3267  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3268 #endif
3269 
3270 // Minimum size of a free suballocation to register it in the free suballocation collection.
3271 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3272 
3273 /*
3274 Performs binary search and returns iterator to first element that is greater or
3275 equal to (key), according to comparison (cmp).
3276 
3277 Cmp should return true if first argument is less than second argument.
3278 
3279 Returned value is the found element, if present in the collection or place where
3280 new element with value (key) should be inserted.
3281 */
3282 template <typename CmpLess, typename IterT, typename KeyT>
3283 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3284 {
3285  size_t down = 0, up = (end - beg);
3286  while(down < up)
3287  {
3288  const size_t mid = (down + up) / 2;
3289  if(cmp(*(beg+mid), key))
3290  {
3291  down = mid + 1;
3292  }
3293  else
3294  {
3295  up = mid;
3296  }
3297  }
3298  return beg + down;
3299 }
3300 
3302 // Memory allocation
3303 
3304 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3305 {
3306  if((pAllocationCallbacks != VMA_NULL) &&
3307  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3308  {
3309  return (*pAllocationCallbacks->pfnAllocation)(
3310  pAllocationCallbacks->pUserData,
3311  size,
3312  alignment,
3313  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3314  }
3315  else
3316  {
3317  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3318  }
3319 }
3320 
3321 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3322 {
3323  if((pAllocationCallbacks != VMA_NULL) &&
3324  (pAllocationCallbacks->pfnFree != VMA_NULL))
3325  {
3326  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3327  }
3328  else
3329  {
3330  VMA_SYSTEM_FREE(ptr);
3331  }
3332 }
3333 
3334 template<typename T>
3335 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3336 {
3337  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3338 }
3339 
3340 template<typename T>
3341 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3342 {
3343  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3344 }
3345 
3346 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3347 
3348 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3349 
3350 template<typename T>
3351 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3352 {
3353  ptr->~T();
3354  VmaFree(pAllocationCallbacks, ptr);
3355 }
3356 
3357 template<typename T>
3358 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3359 {
3360  if(ptr != VMA_NULL)
3361  {
3362  for(size_t i = count; i--; )
3363  {
3364  ptr[i].~T();
3365  }
3366  VmaFree(pAllocationCallbacks, ptr);
3367  }
3368 }
3369 
3370 // STL-compatible allocator.
3371 template<typename T>
3372 class VmaStlAllocator
3373 {
3374 public:
3375  const VkAllocationCallbacks* const m_pCallbacks;
3376  typedef T value_type;
3377 
3378  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3379  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3380 
3381  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3382  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3383 
3384  template<typename U>
3385  bool operator==(const VmaStlAllocator<U>& rhs) const
3386  {
3387  return m_pCallbacks == rhs.m_pCallbacks;
3388  }
3389  template<typename U>
3390  bool operator!=(const VmaStlAllocator<U>& rhs) const
3391  {
3392  return m_pCallbacks != rhs.m_pCallbacks;
3393  }
3394 
3395  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3396 };
3397 
3398 #if VMA_USE_STL_VECTOR
3399 
3400 #define VmaVector std::vector
3401 
3402 template<typename T, typename allocatorT>
3403 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3404 {
3405  vec.insert(vec.begin() + index, item);
3406 }
3407 
3408 template<typename T, typename allocatorT>
3409 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3410 {
3411  vec.erase(vec.begin() + index);
3412 }
3413 
3414 #else // #if VMA_USE_STL_VECTOR
3415 
3416 /* Class with interface compatible with subset of std::vector.
3417 T must be POD because constructors and destructors are not called and memcpy is
3418 used for these objects. */
3419 template<typename T, typename AllocatorT>
3420 class VmaVector
3421 {
3422 public:
3423  typedef T value_type;
3424 
3425  VmaVector(const AllocatorT& allocator) :
3426  m_Allocator(allocator),
3427  m_pArray(VMA_NULL),
3428  m_Count(0),
3429  m_Capacity(0)
3430  {
3431  }
3432 
3433  VmaVector(size_t count, const AllocatorT& allocator) :
3434  m_Allocator(allocator),
3435  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3436  m_Count(count),
3437  m_Capacity(count)
3438  {
3439  }
3440 
3441  VmaVector(const VmaVector<T, AllocatorT>& src) :
3442  m_Allocator(src.m_Allocator),
3443  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3444  m_Count(src.m_Count),
3445  m_Capacity(src.m_Count)
3446  {
3447  if(m_Count != 0)
3448  {
3449  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3450  }
3451  }
3452 
3453  ~VmaVector()
3454  {
3455  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3456  }
3457 
3458  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3459  {
3460  if(&rhs != this)
3461  {
3462  resize(rhs.m_Count);
3463  if(m_Count != 0)
3464  {
3465  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3466  }
3467  }
3468  return *this;
3469  }
3470 
3471  bool empty() const { return m_Count == 0; }
3472  size_t size() const { return m_Count; }
3473  T* data() { return m_pArray; }
3474  const T* data() const { return m_pArray; }
3475 
3476  T& operator[](size_t index)
3477  {
3478  VMA_HEAVY_ASSERT(index < m_Count);
3479  return m_pArray[index];
3480  }
3481  const T& operator[](size_t index) const
3482  {
3483  VMA_HEAVY_ASSERT(index < m_Count);
3484  return m_pArray[index];
3485  }
3486 
3487  T& front()
3488  {
3489  VMA_HEAVY_ASSERT(m_Count > 0);
3490  return m_pArray[0];
3491  }
3492  const T& front() const
3493  {
3494  VMA_HEAVY_ASSERT(m_Count > 0);
3495  return m_pArray[0];
3496  }
3497  T& back()
3498  {
3499  VMA_HEAVY_ASSERT(m_Count > 0);
3500  return m_pArray[m_Count - 1];
3501  }
3502  const T& back() const
3503  {
3504  VMA_HEAVY_ASSERT(m_Count > 0);
3505  return m_pArray[m_Count - 1];
3506  }
3507 
3508  void reserve(size_t newCapacity, bool freeMemory = false)
3509  {
3510  newCapacity = VMA_MAX(newCapacity, m_Count);
3511 
3512  if((newCapacity < m_Capacity) && !freeMemory)
3513  {
3514  newCapacity = m_Capacity;
3515  }
3516 
3517  if(newCapacity != m_Capacity)
3518  {
3519  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3520  if(m_Count != 0)
3521  {
3522  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3523  }
3524  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3525  m_Capacity = newCapacity;
3526  m_pArray = newArray;
3527  }
3528  }
3529 
3530  void resize(size_t newCount, bool freeMemory = false)
3531  {
3532  size_t newCapacity = m_Capacity;
3533  if(newCount > m_Capacity)
3534  {
3535  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3536  }
3537  else if(freeMemory)
3538  {
3539  newCapacity = newCount;
3540  }
3541 
3542  if(newCapacity != m_Capacity)
3543  {
3544  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3545  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3546  if(elementsToCopy != 0)
3547  {
3548  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3549  }
3550  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3551  m_Capacity = newCapacity;
3552  m_pArray = newArray;
3553  }
3554 
3555  m_Count = newCount;
3556  }
3557 
3558  void clear(bool freeMemory = false)
3559  {
3560  resize(0, freeMemory);
3561  }
3562 
3563  void insert(size_t index, const T& src)
3564  {
3565  VMA_HEAVY_ASSERT(index <= m_Count);
3566  const size_t oldCount = size();
3567  resize(oldCount + 1);
3568  if(index < oldCount)
3569  {
3570  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3571  }
3572  m_pArray[index] = src;
3573  }
3574 
3575  void remove(size_t index)
3576  {
3577  VMA_HEAVY_ASSERT(index < m_Count);
3578  const size_t oldCount = size();
3579  if(index < oldCount - 1)
3580  {
3581  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3582  }
3583  resize(oldCount - 1);
3584  }
3585 
3586  void push_back(const T& src)
3587  {
3588  const size_t newIndex = size();
3589  resize(newIndex + 1);
3590  m_pArray[newIndex] = src;
3591  }
3592 
3593  void pop_back()
3594  {
3595  VMA_HEAVY_ASSERT(m_Count > 0);
3596  resize(size() - 1);
3597  }
3598 
3599  void push_front(const T& src)
3600  {
3601  insert(0, src);
3602  }
3603 
3604  void pop_front()
3605  {
3606  VMA_HEAVY_ASSERT(m_Count > 0);
3607  remove(0);
3608  }
3609 
3610  typedef T* iterator;
3611 
3612  iterator begin() { return m_pArray; }
3613  iterator end() { return m_pArray + m_Count; }
3614 
3615 private:
3616  AllocatorT m_Allocator;
3617  T* m_pArray;
3618  size_t m_Count;
3619  size_t m_Capacity;
3620 };
3621 
3622 template<typename T, typename allocatorT>
3623 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3624 {
3625  vec.insert(index, item);
3626 }
3627 
3628 template<typename T, typename allocatorT>
3629 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3630 {
3631  vec.remove(index);
3632 }
3633 
3634 #endif // #if VMA_USE_STL_VECTOR
3635 
3636 template<typename CmpLess, typename VectorT>
3637 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3638 {
3639  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3640  vector.data(),
3641  vector.data() + vector.size(),
3642  value,
3643  CmpLess()) - vector.data();
3644  VmaVectorInsert(vector, indexToInsert, value);
3645  return indexToInsert;
3646 }
3647 
3648 template<typename CmpLess, typename VectorT>
3649 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3650 {
3651  CmpLess comparator;
3652  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3653  vector.begin(),
3654  vector.end(),
3655  value,
3656  comparator);
3657  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3658  {
3659  size_t indexToRemove = it - vector.begin();
3660  VmaVectorRemove(vector, indexToRemove);
3661  return true;
3662  }
3663  return false;
3664 }
3665 
3666 template<typename CmpLess, typename IterT, typename KeyT>
3667 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3668 {
3669  CmpLess comparator;
3670  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3671  beg, end, value, comparator);
3672  if(it == end ||
3673  (!comparator(*it, value) && !comparator(value, *it)))
3674  {
3675  return it;
3676  }
3677  return end;
3678 }
3679 
3681 // class VmaPoolAllocator
3682 
3683 /*
3684 Allocator for objects of type T using a list of arrays (pools) to speed up
3685 allocation. Number of elements that can be allocated is not bounded because
3686 allocator can create multiple blocks.
3687 */
3688 template<typename T>
3689 class VmaPoolAllocator
3690 {
3691  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3692 public:
3693  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3694  ~VmaPoolAllocator();
3695  void Clear();
3696  T* Alloc();
3697  void Free(T* ptr);
3698 
3699 private:
3700  union Item
3701  {
3702  uint32_t NextFreeIndex;
3703  T Value;
3704  };
3705 
3706  struct ItemBlock
3707  {
3708  Item* pItems;
3709  uint32_t FirstFreeIndex;
3710  };
3711 
3712  const VkAllocationCallbacks* m_pAllocationCallbacks;
3713  size_t m_ItemsPerBlock;
3714  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3715 
3716  ItemBlock& CreateNewBlock();
3717 };
3718 
3719 template<typename T>
3720 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3721  m_pAllocationCallbacks(pAllocationCallbacks),
3722  m_ItemsPerBlock(itemsPerBlock),
3723  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3724 {
3725  VMA_ASSERT(itemsPerBlock > 0);
3726 }
3727 
3728 template<typename T>
3729 VmaPoolAllocator<T>::~VmaPoolAllocator()
3730 {
3731  Clear();
3732 }
3733 
3734 template<typename T>
3735 void VmaPoolAllocator<T>::Clear()
3736 {
3737  for(size_t i = m_ItemBlocks.size(); i--; )
3738  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3739  m_ItemBlocks.clear();
3740 }
3741 
3742 template<typename T>
3743 T* VmaPoolAllocator<T>::Alloc()
3744 {
3745  for(size_t i = m_ItemBlocks.size(); i--; )
3746  {
3747  ItemBlock& block = m_ItemBlocks[i];
3748  // This block has some free items: Use first one.
3749  if(block.FirstFreeIndex != UINT32_MAX)
3750  {
3751  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3752  block.FirstFreeIndex = pItem->NextFreeIndex;
3753  return &pItem->Value;
3754  }
3755  }
3756 
3757  // No block has free item: Create new one and use it.
3758  ItemBlock& newBlock = CreateNewBlock();
3759  Item* const pItem = &newBlock.pItems[0];
3760  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3761  return &pItem->Value;
3762 }
3763 
3764 template<typename T>
3765 void VmaPoolAllocator<T>::Free(T* ptr)
3766 {
3767  // Search all memory blocks to find ptr.
3768  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3769  {
3770  ItemBlock& block = m_ItemBlocks[i];
3771 
3772  // Casting to union.
3773  Item* pItemPtr;
3774  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3775 
3776  // Check if pItemPtr is in address range of this block.
3777  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3778  {
3779  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3780  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3781  block.FirstFreeIndex = index;
3782  return;
3783  }
3784  }
3785  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3786 }
3787 
3788 template<typename T>
3789 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3790 {
3791  ItemBlock newBlock = {
3792  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3793 
3794  m_ItemBlocks.push_back(newBlock);
3795 
3796  // Setup singly-linked list of all free items in this block.
3797  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3798  newBlock.pItems[i].NextFreeIndex = i + 1;
3799  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3800  return m_ItemBlocks.back();
3801 }
3802 
3804 // class VmaRawList, VmaList
3805 
3806 #if VMA_USE_STL_LIST
3807 
3808 #define VmaList std::list
3809 
3810 #else // #if VMA_USE_STL_LIST
3811 
3812 template<typename T>
3813 struct VmaListItem
3814 {
3815  VmaListItem* pPrev;
3816  VmaListItem* pNext;
3817  T Value;
3818 };
3819 
3820 // Doubly linked list.
3821 template<typename T>
3822 class VmaRawList
3823 {
3824  VMA_CLASS_NO_COPY(VmaRawList)
3825 public:
3826  typedef VmaListItem<T> ItemType;
3827 
3828  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3829  ~VmaRawList();
3830  void Clear();
3831 
3832  size_t GetCount() const { return m_Count; }
3833  bool IsEmpty() const { return m_Count == 0; }
3834 
3835  ItemType* Front() { return m_pFront; }
3836  const ItemType* Front() const { return m_pFront; }
3837  ItemType* Back() { return m_pBack; }
3838  const ItemType* Back() const { return m_pBack; }
3839 
3840  ItemType* PushBack();
3841  ItemType* PushFront();
3842  ItemType* PushBack(const T& value);
3843  ItemType* PushFront(const T& value);
3844  void PopBack();
3845  void PopFront();
3846 
3847  // Item can be null - it means PushBack.
3848  ItemType* InsertBefore(ItemType* pItem);
3849  // Item can be null - it means PushFront.
3850  ItemType* InsertAfter(ItemType* pItem);
3851 
3852  ItemType* InsertBefore(ItemType* pItem, const T& value);
3853  ItemType* InsertAfter(ItemType* pItem, const T& value);
3854 
3855  void Remove(ItemType* pItem);
3856 
3857 private:
3858  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3859  VmaPoolAllocator<ItemType> m_ItemAllocator;
3860  ItemType* m_pFront;
3861  ItemType* m_pBack;
3862  size_t m_Count;
3863 };
3864 
3865 template<typename T>
3866 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3867  m_pAllocationCallbacks(pAllocationCallbacks),
3868  m_ItemAllocator(pAllocationCallbacks, 128),
3869  m_pFront(VMA_NULL),
3870  m_pBack(VMA_NULL),
3871  m_Count(0)
3872 {
3873 }
3874 
3875 template<typename T>
3876 VmaRawList<T>::~VmaRawList()
3877 {
3878  // Intentionally not calling Clear, because that would be unnecessary
3879  // computations to return all items to m_ItemAllocator as free.
3880 }
3881 
3882 template<typename T>
3883 void VmaRawList<T>::Clear()
3884 {
3885  if(IsEmpty() == false)
3886  {
3887  ItemType* pItem = m_pBack;
3888  while(pItem != VMA_NULL)
3889  {
3890  ItemType* const pPrevItem = pItem->pPrev;
3891  m_ItemAllocator.Free(pItem);
3892  pItem = pPrevItem;
3893  }
3894  m_pFront = VMA_NULL;
3895  m_pBack = VMA_NULL;
3896  m_Count = 0;
3897  }
3898 }
3899 
3900 template<typename T>
3901 VmaListItem<T>* VmaRawList<T>::PushBack()
3902 {
3903  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3904  pNewItem->pNext = VMA_NULL;
3905  if(IsEmpty())
3906  {
3907  pNewItem->pPrev = VMA_NULL;
3908  m_pFront = pNewItem;
3909  m_pBack = pNewItem;
3910  m_Count = 1;
3911  }
3912  else
3913  {
3914  pNewItem->pPrev = m_pBack;
3915  m_pBack->pNext = pNewItem;
3916  m_pBack = pNewItem;
3917  ++m_Count;
3918  }
3919  return pNewItem;
3920 }
3921 
3922 template<typename T>
3923 VmaListItem<T>* VmaRawList<T>::PushFront()
3924 {
3925  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3926  pNewItem->pPrev = VMA_NULL;
3927  if(IsEmpty())
3928  {
3929  pNewItem->pNext = VMA_NULL;
3930  m_pFront = pNewItem;
3931  m_pBack = pNewItem;
3932  m_Count = 1;
3933  }
3934  else
3935  {
3936  pNewItem->pNext = m_pFront;
3937  m_pFront->pPrev = pNewItem;
3938  m_pFront = pNewItem;
3939  ++m_Count;
3940  }
3941  return pNewItem;
3942 }
3943 
3944 template<typename T>
3945 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3946 {
3947  ItemType* const pNewItem = PushBack();
3948  pNewItem->Value = value;
3949  return pNewItem;
3950 }
3951 
3952 template<typename T>
3953 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3954 {
3955  ItemType* const pNewItem = PushFront();
3956  pNewItem->Value = value;
3957  return pNewItem;
3958 }
3959 
3960 template<typename T>
3961 void VmaRawList<T>::PopBack()
3962 {
3963  VMA_HEAVY_ASSERT(m_Count > 0);
3964  ItemType* const pBackItem = m_pBack;
3965  ItemType* const pPrevItem = pBackItem->pPrev;
3966  if(pPrevItem != VMA_NULL)
3967  {
3968  pPrevItem->pNext = VMA_NULL;
3969  }
3970  m_pBack = pPrevItem;
3971  m_ItemAllocator.Free(pBackItem);
3972  --m_Count;
3973 }
3974 
3975 template<typename T>
3976 void VmaRawList<T>::PopFront()
3977 {
3978  VMA_HEAVY_ASSERT(m_Count > 0);
3979  ItemType* const pFrontItem = m_pFront;
3980  ItemType* const pNextItem = pFrontItem->pNext;
3981  if(pNextItem != VMA_NULL)
3982  {
3983  pNextItem->pPrev = VMA_NULL;
3984  }
3985  m_pFront = pNextItem;
3986  m_ItemAllocator.Free(pFrontItem);
3987  --m_Count;
3988 }
3989 
3990 template<typename T>
3991 void VmaRawList<T>::Remove(ItemType* pItem)
3992 {
3993  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3994  VMA_HEAVY_ASSERT(m_Count > 0);
3995 
3996  if(pItem->pPrev != VMA_NULL)
3997  {
3998  pItem->pPrev->pNext = pItem->pNext;
3999  }
4000  else
4001  {
4002  VMA_HEAVY_ASSERT(m_pFront == pItem);
4003  m_pFront = pItem->pNext;
4004  }
4005 
4006  if(pItem->pNext != VMA_NULL)
4007  {
4008  pItem->pNext->pPrev = pItem->pPrev;
4009  }
4010  else
4011  {
4012  VMA_HEAVY_ASSERT(m_pBack == pItem);
4013  m_pBack = pItem->pPrev;
4014  }
4015 
4016  m_ItemAllocator.Free(pItem);
4017  --m_Count;
4018 }
4019 
4020 template<typename T>
4021 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4022 {
4023  if(pItem != VMA_NULL)
4024  {
4025  ItemType* const prevItem = pItem->pPrev;
4026  ItemType* const newItem = m_ItemAllocator.Alloc();
4027  newItem->pPrev = prevItem;
4028  newItem->pNext = pItem;
4029  pItem->pPrev = newItem;
4030  if(prevItem != VMA_NULL)
4031  {
4032  prevItem->pNext = newItem;
4033  }
4034  else
4035  {
4036  VMA_HEAVY_ASSERT(m_pFront == pItem);
4037  m_pFront = newItem;
4038  }
4039  ++m_Count;
4040  return newItem;
4041  }
4042  else
4043  return PushBack();
4044 }
4045 
4046 template<typename T>
4047 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4048 {
4049  if(pItem != VMA_NULL)
4050  {
4051  ItemType* const nextItem = pItem->pNext;
4052  ItemType* const newItem = m_ItemAllocator.Alloc();
4053  newItem->pNext = nextItem;
4054  newItem->pPrev = pItem;
4055  pItem->pNext = newItem;
4056  if(nextItem != VMA_NULL)
4057  {
4058  nextItem->pPrev = newItem;
4059  }
4060  else
4061  {
4062  VMA_HEAVY_ASSERT(m_pBack == pItem);
4063  m_pBack = newItem;
4064  }
4065  ++m_Count;
4066  return newItem;
4067  }
4068  else
4069  return PushFront();
4070 }
4071 
4072 template<typename T>
4073 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4074 {
4075  ItemType* const newItem = InsertBefore(pItem);
4076  newItem->Value = value;
4077  return newItem;
4078 }
4079 
4080 template<typename T>
4081 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4082 {
4083  ItemType* const newItem = InsertAfter(pItem);
4084  newItem->Value = value;
4085  return newItem;
4086 }
4087 
4088 template<typename T, typename AllocatorT>
4089 class VmaList
4090 {
4091  VMA_CLASS_NO_COPY(VmaList)
4092 public:
4093  class iterator
4094  {
4095  public:
4096  iterator() :
4097  m_pList(VMA_NULL),
4098  m_pItem(VMA_NULL)
4099  {
4100  }
4101 
4102  T& operator*() const
4103  {
4104  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4105  return m_pItem->Value;
4106  }
4107  T* operator->() const
4108  {
4109  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4110  return &m_pItem->Value;
4111  }
4112 
4113  iterator& operator++()
4114  {
4115  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4116  m_pItem = m_pItem->pNext;
4117  return *this;
4118  }
4119  iterator& operator--()
4120  {
4121  if(m_pItem != VMA_NULL)
4122  {
4123  m_pItem = m_pItem->pPrev;
4124  }
4125  else
4126  {
4127  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4128  m_pItem = m_pList->Back();
4129  }
4130  return *this;
4131  }
4132 
4133  iterator operator++(int)
4134  {
4135  iterator result = *this;
4136  ++*this;
4137  return result;
4138  }
4139  iterator operator--(int)
4140  {
4141  iterator result = *this;
4142  --*this;
4143  return result;
4144  }
4145 
4146  bool operator==(const iterator& rhs) const
4147  {
4148  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4149  return m_pItem == rhs.m_pItem;
4150  }
4151  bool operator!=(const iterator& rhs) const
4152  {
4153  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4154  return m_pItem != rhs.m_pItem;
4155  }
4156 
4157  private:
4158  VmaRawList<T>* m_pList;
4159  VmaListItem<T>* m_pItem;
4160 
4161  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4162  m_pList(pList),
4163  m_pItem(pItem)
4164  {
4165  }
4166 
4167  friend class VmaList<T, AllocatorT>;
4168  };
4169 
4170  class const_iterator
4171  {
4172  public:
4173  const_iterator() :
4174  m_pList(VMA_NULL),
4175  m_pItem(VMA_NULL)
4176  {
4177  }
4178 
4179  const_iterator(const iterator& src) :
4180  m_pList(src.m_pList),
4181  m_pItem(src.m_pItem)
4182  {
4183  }
4184 
4185  const T& operator*() const
4186  {
4187  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4188  return m_pItem->Value;
4189  }
4190  const T* operator->() const
4191  {
4192  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4193  return &m_pItem->Value;
4194  }
4195 
4196  const_iterator& operator++()
4197  {
4198  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4199  m_pItem = m_pItem->pNext;
4200  return *this;
4201  }
4202  const_iterator& operator--()
4203  {
4204  if(m_pItem != VMA_NULL)
4205  {
4206  m_pItem = m_pItem->pPrev;
4207  }
4208  else
4209  {
4210  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4211  m_pItem = m_pList->Back();
4212  }
4213  return *this;
4214  }
4215 
4216  const_iterator operator++(int)
4217  {
4218  const_iterator result = *this;
4219  ++*this;
4220  return result;
4221  }
4222  const_iterator operator--(int)
4223  {
4224  const_iterator result = *this;
4225  --*this;
4226  return result;
4227  }
4228 
4229  bool operator==(const const_iterator& rhs) const
4230  {
4231  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4232  return m_pItem == rhs.m_pItem;
4233  }
4234  bool operator!=(const const_iterator& rhs) const
4235  {
4236  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4237  return m_pItem != rhs.m_pItem;
4238  }
4239 
4240  private:
4241  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4242  m_pList(pList),
4243  m_pItem(pItem)
4244  {
4245  }
4246 
4247  const VmaRawList<T>* m_pList;
4248  const VmaListItem<T>* m_pItem;
4249 
4250  friend class VmaList<T, AllocatorT>;
4251  };
4252 
4253  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4254 
4255  bool empty() const { return m_RawList.IsEmpty(); }
4256  size_t size() const { return m_RawList.GetCount(); }
4257 
4258  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4259  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4260 
4261  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4262  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4263 
4264  void clear() { m_RawList.Clear(); }
4265  void push_back(const T& value) { m_RawList.PushBack(value); }
4266  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4267  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4268 
4269 private:
4270  VmaRawList<T> m_RawList;
4271 };
4272 
4273 #endif // #if VMA_USE_STL_LIST
4274 
4276 // class VmaMap
4277 
4278 // Unused in this version.
4279 #if 0
4280 
4281 #if VMA_USE_STL_UNORDERED_MAP
4282 
4283 #define VmaPair std::pair
4284 
4285 #define VMA_MAP_TYPE(KeyT, ValueT) \
4286  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4287 
4288 #else // #if VMA_USE_STL_UNORDERED_MAP
4289 
4290 template<typename T1, typename T2>
4291 struct VmaPair
4292 {
4293  T1 first;
4294  T2 second;
4295 
4296  VmaPair() : first(), second() { }
4297  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4298 };
4299 
4300 /* Class compatible with subset of interface of std::unordered_map.
4301 KeyT, ValueT must be POD because they will be stored in VmaVector.
4302 */
4303 template<typename KeyT, typename ValueT>
4304 class VmaMap
4305 {
4306 public:
4307  typedef VmaPair<KeyT, ValueT> PairType;
4308  typedef PairType* iterator;
4309 
4310  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4311 
4312  iterator begin() { return m_Vector.begin(); }
4313  iterator end() { return m_Vector.end(); }
4314 
4315  void insert(const PairType& pair);
4316  iterator find(const KeyT& key);
4317  void erase(iterator it);
4318 
4319 private:
4320  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4321 };
4322 
4323 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4324 
4325 template<typename FirstT, typename SecondT>
4326 struct VmaPairFirstLess
4327 {
4328  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4329  {
4330  return lhs.first < rhs.first;
4331  }
4332  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4333  {
4334  return lhs.first < rhsFirst;
4335  }
4336 };
4337 
4338 template<typename KeyT, typename ValueT>
4339 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4340 {
4341  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4342  m_Vector.data(),
4343  m_Vector.data() + m_Vector.size(),
4344  pair,
4345  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4346  VmaVectorInsert(m_Vector, indexToInsert, pair);
4347 }
4348 
4349 template<typename KeyT, typename ValueT>
4350 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4351 {
4352  PairType* it = VmaBinaryFindFirstNotLess(
4353  m_Vector.data(),
4354  m_Vector.data() + m_Vector.size(),
4355  key,
4356  VmaPairFirstLess<KeyT, ValueT>());
4357  if((it != m_Vector.end()) && (it->first == key))
4358  {
4359  return it;
4360  }
4361  else
4362  {
4363  return m_Vector.end();
4364  }
4365 }
4366 
4367 template<typename KeyT, typename ValueT>
4368 void VmaMap<KeyT, ValueT>::erase(iterator it)
4369 {
4370  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4371 }
4372 
4373 #endif // #if VMA_USE_STL_UNORDERED_MAP
4374 
4375 #endif // #if 0
4376 
4378 
4379 class VmaDeviceMemoryBlock;
4380 
4381 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4382 
4383 struct VmaAllocation_T
4384 {
4385  VMA_CLASS_NO_COPY(VmaAllocation_T)
4386 private:
4387  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4388 
4389  enum FLAGS
4390  {
4391  FLAG_USER_DATA_STRING = 0x01,
4392  };
4393 
4394 public:
4395  enum ALLOCATION_TYPE
4396  {
4397  ALLOCATION_TYPE_NONE,
4398  ALLOCATION_TYPE_BLOCK,
4399  ALLOCATION_TYPE_DEDICATED,
4400  };
4401 
4402  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4403  m_Alignment(1),
4404  m_Size(0),
4405  m_pUserData(VMA_NULL),
4406  m_LastUseFrameIndex(currentFrameIndex),
4407  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4408  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4409  m_MapCount(0),
4410  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4411  {
4412 #if VMA_STATS_STRING_ENABLED
4413  m_CreationFrameIndex = currentFrameIndex;
4414  m_BufferImageUsage = 0;
4415 #endif
4416  }
4417 
4418  ~VmaAllocation_T()
4419  {
4420  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4421 
4422  // Check if owned string was freed.
4423  VMA_ASSERT(m_pUserData == VMA_NULL);
4424  }
4425 
4426  void InitBlockAllocation(
4427  VmaPool hPool,
4428  VmaDeviceMemoryBlock* block,
4429  VkDeviceSize offset,
4430  VkDeviceSize alignment,
4431  VkDeviceSize size,
4432  VmaSuballocationType suballocationType,
4433  bool mapped,
4434  bool canBecomeLost)
4435  {
4436  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4437  VMA_ASSERT(block != VMA_NULL);
4438  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4439  m_Alignment = alignment;
4440  m_Size = size;
4441  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4442  m_SuballocationType = (uint8_t)suballocationType;
4443  m_BlockAllocation.m_hPool = hPool;
4444  m_BlockAllocation.m_Block = block;
4445  m_BlockAllocation.m_Offset = offset;
4446  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4447  }
4448 
4449  void InitLost()
4450  {
4451  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4452  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4453  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4454  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4455  m_BlockAllocation.m_Block = VMA_NULL;
4456  m_BlockAllocation.m_Offset = 0;
4457  m_BlockAllocation.m_CanBecomeLost = true;
4458  }
4459 
4460  void ChangeBlockAllocation(
4461  VmaAllocator hAllocator,
4462  VmaDeviceMemoryBlock* block,
4463  VkDeviceSize offset);
4464 
4465  // pMappedData not null means allocation is created with MAPPED flag.
4466  void InitDedicatedAllocation(
4467  uint32_t memoryTypeIndex,
4468  VkDeviceMemory hMemory,
4469  VmaSuballocationType suballocationType,
4470  void* pMappedData,
4471  VkDeviceSize size)
4472  {
4473  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4474  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4475  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4476  m_Alignment = 0;
4477  m_Size = size;
4478  m_SuballocationType = (uint8_t)suballocationType;
4479  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4480  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4481  m_DedicatedAllocation.m_hMemory = hMemory;
4482  m_DedicatedAllocation.m_pMappedData = pMappedData;
4483  }
4484 
4485  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4486  VkDeviceSize GetAlignment() const { return m_Alignment; }
4487  VkDeviceSize GetSize() const { return m_Size; }
4488  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4489  void* GetUserData() const { return m_pUserData; }
4490  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4491  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4492 
4493  VmaDeviceMemoryBlock* GetBlock() const
4494  {
4495  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4496  return m_BlockAllocation.m_Block;
4497  }
4498  VkDeviceSize GetOffset() const;
4499  VkDeviceMemory GetMemory() const;
4500  uint32_t GetMemoryTypeIndex() const;
4501  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4502  void* GetMappedData() const;
4503  bool CanBecomeLost() const;
4504  VmaPool GetPool() const;
4505 
4506  uint32_t GetLastUseFrameIndex() const
4507  {
4508  return m_LastUseFrameIndex.load();
4509  }
4510  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4511  {
4512  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4513  }
4514  /*
4515  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4516  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4517  - Else, returns false.
4518 
4519  If hAllocation is already lost, assert - you should not call it then.
4520  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4521  */
4522  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4523 
4524  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4525  {
4526  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4527  outInfo.blockCount = 1;
4528  outInfo.allocationCount = 1;
4529  outInfo.unusedRangeCount = 0;
4530  outInfo.usedBytes = m_Size;
4531  outInfo.unusedBytes = 0;
4532  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4533  outInfo.unusedRangeSizeMin = UINT64_MAX;
4534  outInfo.unusedRangeSizeMax = 0;
4535  }
4536 
4537  void BlockAllocMap();
4538  void BlockAllocUnmap();
4539  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4540  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4541 
4542 #if VMA_STATS_STRING_ENABLED
4543  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4544  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4545 
4546  void InitBufferImageUsage(uint32_t bufferImageUsage)
4547  {
4548  VMA_ASSERT(m_BufferImageUsage == 0);
4549  m_BufferImageUsage = bufferImageUsage;
4550  }
4551 
4552  void PrintParameters(class VmaJsonWriter& json) const;
4553 #endif
4554 
4555 private:
4556  VkDeviceSize m_Alignment;
4557  VkDeviceSize m_Size;
4558  void* m_pUserData;
4559  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4560  uint8_t m_Type; // ALLOCATION_TYPE
4561  uint8_t m_SuballocationType; // VmaSuballocationType
4562  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4563  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4564  uint8_t m_MapCount;
4565  uint8_t m_Flags; // enum FLAGS
4566 
4567  // Allocation out of VmaDeviceMemoryBlock.
4568  struct BlockAllocation
4569  {
4570  VmaPool m_hPool; // Null if belongs to general memory.
4571  VmaDeviceMemoryBlock* m_Block;
4572  VkDeviceSize m_Offset;
4573  bool m_CanBecomeLost;
4574  };
4575 
4576  // Allocation for an object that has its own private VkDeviceMemory.
4577  struct DedicatedAllocation
4578  {
4579  uint32_t m_MemoryTypeIndex;
4580  VkDeviceMemory m_hMemory;
4581  void* m_pMappedData; // Not null means memory is mapped.
4582  };
4583 
4584  union
4585  {
4586  // Allocation out of VmaDeviceMemoryBlock.
4587  BlockAllocation m_BlockAllocation;
4588  // Allocation for an object that has its own private VkDeviceMemory.
4589  DedicatedAllocation m_DedicatedAllocation;
4590  };
4591 
4592 #if VMA_STATS_STRING_ENABLED
4593  uint32_t m_CreationFrameIndex;
4594  uint32_t m_BufferImageUsage; // 0 if unknown.
4595 #endif
4596 
4597  void FreeUserDataString(VmaAllocator hAllocator);
4598 };
4599 
4600 /*
4601 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4602 allocated memory block or free.
4603 */
4604 struct VmaSuballocation
4605 {
4606  VkDeviceSize offset;
4607  VkDeviceSize size;
4608  VmaAllocation hAllocation;
4609  VmaSuballocationType type;
4610 };
4611 
4612 // Comparator for offsets.
4613 struct VmaSuballocationOffsetLess
4614 {
4615  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4616  {
4617  return lhs.offset < rhs.offset;
4618  }
4619 };
4620 struct VmaSuballocationOffsetGreater
4621 {
4622  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4623  {
4624  return lhs.offset > rhs.offset;
4625  }
4626 };
4627 
4628 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4629 
4630 // Cost of one additional allocation lost, as equivalent in bytes.
4631 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4632 
4633 /*
4634 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4635 
4636 If canMakeOtherLost was false:
4637 - item points to a FREE suballocation.
4638 - itemsToMakeLostCount is 0.
4639 
4640 If canMakeOtherLost was true:
4641 - item points to first of sequence of suballocations, which are either FREE,
4642  or point to VmaAllocations that can become lost.
4643 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4644  the requested allocation to succeed.
4645 */
4646 struct VmaAllocationRequest
4647 {
4648  VkDeviceSize offset;
4649  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4650  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4651  VmaSuballocationList::iterator item;
4652  size_t itemsToMakeLostCount;
4653  void* customData;
4654 
4655  VkDeviceSize CalcCost() const
4656  {
4657  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4658  }
4659 };
4660 
4661 /*
4662 Data structure used for bookkeeping of allocations and unused ranges of memory
4663 in a single VkDeviceMemory block.
4664 */
4665 class VmaBlockMetadata
4666 {
4667 public:
4668  VmaBlockMetadata(VmaAllocator hAllocator);
4669  virtual ~VmaBlockMetadata() { }
4670  virtual void Init(VkDeviceSize size) { m_Size = size; }
4671 
4672  // Validates all data structures inside this object. If not valid, returns false.
4673  virtual bool Validate() const = 0;
4674  VkDeviceSize GetSize() const { return m_Size; }
4675  virtual size_t GetAllocationCount() const = 0;
4676  virtual VkDeviceSize GetSumFreeSize() const = 0;
4677  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4678  // Returns true if this block is empty - contains only single free suballocation.
4679  virtual bool IsEmpty() const = 0;
4680 
4681  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4682  // Shouldn't modify blockCount.
4683  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4684 
4685 #if VMA_STATS_STRING_ENABLED
4686  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4687 #endif
4688 
4689  // Tries to find a place for suballocation with given parameters inside this block.
4690  // If succeeded, fills pAllocationRequest and returns true.
4691  // If failed, returns false.
4692  virtual bool CreateAllocationRequest(
4693  uint32_t currentFrameIndex,
4694  uint32_t frameInUseCount,
4695  VkDeviceSize bufferImageGranularity,
4696  VkDeviceSize allocSize,
4697  VkDeviceSize allocAlignment,
4698  bool upperAddress,
4699  VmaSuballocationType allocType,
4700  bool canMakeOtherLost,
4701  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4702  VmaAllocationRequest* pAllocationRequest) = 0;
4703 
4704  virtual bool MakeRequestedAllocationsLost(
4705  uint32_t currentFrameIndex,
4706  uint32_t frameInUseCount,
4707  VmaAllocationRequest* pAllocationRequest) = 0;
4708 
4709  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4710 
4711  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4712 
4713  // Makes actual allocation based on request. Request must already be checked and valid.
4714  virtual void Alloc(
4715  const VmaAllocationRequest& request,
4716  VmaSuballocationType type,
4717  VkDeviceSize allocSize,
4718  bool upperAddress,
4719  VmaAllocation hAllocation) = 0;
4720 
4721  // Frees suballocation assigned to given memory region.
4722  virtual void Free(const VmaAllocation allocation) = 0;
4723  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4724 
4725 protected:
4726  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4727 
4728 #if VMA_STATS_STRING_ENABLED
4729  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4730  VkDeviceSize unusedBytes,
4731  size_t allocationCount,
4732  size_t unusedRangeCount) const;
4733  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4734  VkDeviceSize offset,
4735  VmaAllocation hAllocation) const;
4736  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4737  VkDeviceSize offset,
4738  VkDeviceSize size) const;
4739  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4740 #endif
4741 
4742 private:
4743  VkDeviceSize m_Size;
4744  const VkAllocationCallbacks* m_pAllocationCallbacks;
4745 };
4746 
4747 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4748  VMA_ASSERT(0 && "Validation failed: " ## #cond); \
4749  return false; \
4750  } } while(false)
4751 
4752 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4753 {
4754  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4755 public:
4756  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4757  virtual ~VmaBlockMetadata_Generic();
4758  virtual void Init(VkDeviceSize size);
4759 
4760  virtual bool Validate() const;
4761  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4762  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4763  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4764  virtual bool IsEmpty() const;
4765 
4766  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4767  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4768 
4769 #if VMA_STATS_STRING_ENABLED
4770  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4771 #endif
4772 
4773  virtual bool CreateAllocationRequest(
4774  uint32_t currentFrameIndex,
4775  uint32_t frameInUseCount,
4776  VkDeviceSize bufferImageGranularity,
4777  VkDeviceSize allocSize,
4778  VkDeviceSize allocAlignment,
4779  bool upperAddress,
4780  VmaSuballocationType allocType,
4781  bool canMakeOtherLost,
4782  uint32_t strategy,
4783  VmaAllocationRequest* pAllocationRequest);
4784 
4785  virtual bool MakeRequestedAllocationsLost(
4786  uint32_t currentFrameIndex,
4787  uint32_t frameInUseCount,
4788  VmaAllocationRequest* pAllocationRequest);
4789 
4790  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4791 
4792  virtual VkResult CheckCorruption(const void* pBlockData);
4793 
4794  virtual void Alloc(
4795  const VmaAllocationRequest& request,
4796  VmaSuballocationType type,
4797  VkDeviceSize allocSize,
4798  bool upperAddress,
4799  VmaAllocation hAllocation);
4800 
4801  virtual void Free(const VmaAllocation allocation);
4802  virtual void FreeAtOffset(VkDeviceSize offset);
4803 
4804 private:
4805  uint32_t m_FreeCount;
4806  VkDeviceSize m_SumFreeSize;
4807  VmaSuballocationList m_Suballocations;
4808  // Suballocations that are free and have size greater than certain threshold.
4809  // Sorted by size, ascending.
4810  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4811 
4812  bool ValidateFreeSuballocationList() const;
4813 
4814  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4815  // If yes, fills pOffset and returns true. If no, returns false.
4816  bool CheckAllocation(
4817  uint32_t currentFrameIndex,
4818  uint32_t frameInUseCount,
4819  VkDeviceSize bufferImageGranularity,
4820  VkDeviceSize allocSize,
4821  VkDeviceSize allocAlignment,
4822  VmaSuballocationType allocType,
4823  VmaSuballocationList::const_iterator suballocItem,
4824  bool canMakeOtherLost,
4825  VkDeviceSize* pOffset,
4826  size_t* itemsToMakeLostCount,
4827  VkDeviceSize* pSumFreeSize,
4828  VkDeviceSize* pSumItemSize) const;
4829  // Given free suballocation, it merges it with following one, which must also be free.
4830  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4831  // Releases given suballocation, making it free.
4832  // Merges it with adjacent free suballocations if applicable.
4833  // Returns iterator to new free suballocation at this place.
4834  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4835  // Given free suballocation, it inserts it into sorted list of
4836  // m_FreeSuballocationsBySize if it's suitable.
4837  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4838  // Given free suballocation, it removes it from sorted list of
4839  // m_FreeSuballocationsBySize if it's suitable.
4840  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4841 };
4842 
4843 /*
4844 Allocations and their references in internal data structure look like this:
4845 
4846 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4847 
4848  0 +-------+
4849  | |
4850  | |
4851  | |
4852  +-------+
4853  | Alloc | 1st[m_1stNullItemsBeginCount]
4854  +-------+
4855  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4856  +-------+
4857  | ... |
4858  +-------+
4859  | Alloc | 1st[1st.size() - 1]
4860  +-------+
4861  | |
4862  | |
4863  | |
4864 GetSize() +-------+
4865 
4866 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4867 
4868  0 +-------+
4869  | Alloc | 2nd[0]
4870  +-------+
4871  | Alloc | 2nd[1]
4872  +-------+
4873  | ... |
4874  +-------+
4875  | Alloc | 2nd[2nd.size() - 1]
4876  +-------+
4877  | |
4878  | |
4879  | |
4880  +-------+
4881  | Alloc | 1st[m_1stNullItemsBeginCount]
4882  +-------+
4883  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4884  +-------+
4885  | ... |
4886  +-------+
4887  | Alloc | 1st[1st.size() - 1]
4888  +-------+
4889  | |
4890 GetSize() +-------+
4891 
4892 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4893 
4894  0 +-------+
4895  | |
4896  | |
4897  | |
4898  +-------+
4899  | Alloc | 1st[m_1stNullItemsBeginCount]
4900  +-------+
4901  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4902  +-------+
4903  | ... |
4904  +-------+
4905  | Alloc | 1st[1st.size() - 1]
4906  +-------+
4907  | |
4908  | |
4909  | |
4910  +-------+
4911  | Alloc | 2nd[2nd.size() - 1]
4912  +-------+
4913  | ... |
4914  +-------+
4915  | Alloc | 2nd[1]
4916  +-------+
4917  | Alloc | 2nd[0]
4918 GetSize() +-------+
4919 
4920 */
4921 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4922 {
4923  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4924 public:
4925  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4926  virtual ~VmaBlockMetadata_Linear();
4927  virtual void Init(VkDeviceSize size);
4928 
4929  virtual bool Validate() const;
4930  virtual size_t GetAllocationCount() const;
4931  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4932  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4933  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4934 
4935  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4936  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4937 
4938 #if VMA_STATS_STRING_ENABLED
4939  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4940 #endif
4941 
4942  virtual bool CreateAllocationRequest(
4943  uint32_t currentFrameIndex,
4944  uint32_t frameInUseCount,
4945  VkDeviceSize bufferImageGranularity,
4946  VkDeviceSize allocSize,
4947  VkDeviceSize allocAlignment,
4948  bool upperAddress,
4949  VmaSuballocationType allocType,
4950  bool canMakeOtherLost,
4951  uint32_t strategy,
4952  VmaAllocationRequest* pAllocationRequest);
4953 
4954  virtual bool MakeRequestedAllocationsLost(
4955  uint32_t currentFrameIndex,
4956  uint32_t frameInUseCount,
4957  VmaAllocationRequest* pAllocationRequest);
4958 
4959  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4960 
4961  virtual VkResult CheckCorruption(const void* pBlockData);
4962 
4963  virtual void Alloc(
4964  const VmaAllocationRequest& request,
4965  VmaSuballocationType type,
4966  VkDeviceSize allocSize,
4967  bool upperAddress,
4968  VmaAllocation hAllocation);
4969 
4970  virtual void Free(const VmaAllocation allocation);
4971  virtual void FreeAtOffset(VkDeviceSize offset);
4972 
4973 private:
4974  /*
4975  There are two suballocation vectors, used in ping-pong way.
4976  The one with index m_1stVectorIndex is called 1st.
4977  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
4978  2nd can be non-empty only when 1st is not empty.
4979  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
4980  */
4981  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
4982 
4983  enum SECOND_VECTOR_MODE
4984  {
4985  SECOND_VECTOR_EMPTY,
4986  /*
4987  Suballocations in 2nd vector are created later than the ones in 1st, but they
4988  all have smaller offset.
4989  */
4990  SECOND_VECTOR_RING_BUFFER,
4991  /*
4992  Suballocations in 2nd vector are upper side of double stack.
4993  They all have offsets higher than those in 1st vector.
4994  Top of this stack means smaller offsets, but higher indices in this vector.
4995  */
4996  SECOND_VECTOR_DOUBLE_STACK,
4997  };
4998 
4999  VkDeviceSize m_SumFreeSize;
5000  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5001  uint32_t m_1stVectorIndex;
5002  SECOND_VECTOR_MODE m_2ndVectorMode;
5003 
5004  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5005  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5006  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5007  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5008 
5009  // Number of items in 1st vector with hAllocation = null at the beginning.
5010  size_t m_1stNullItemsBeginCount;
5011  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5012  size_t m_1stNullItemsMiddleCount;
5013  // Number of items in 2nd vector with hAllocation = null.
5014  size_t m_2ndNullItemsCount;
5015 
5016  bool ShouldCompact1st() const;
5017  void CleanupAfterFree();
5018 };
5019 
5020 /*
5021 - GetSize() is the original size of allocated memory block.
5022 - m_UsableSize is this size aligned down to a power of two.
5023  All allocations and calculations happen relative to m_UsableSize.
5024 - GetUnusableSize() is the difference between them.
5025  It is repoted as separate, unused range, not available for allocations.
5026 
5027 Node at level 0 has size = m_UsableSize.
5028 Each next level contains nodes with size 2 times smaller than current level.
5029 m_LevelCount is the maximum number of levels to use in the current object.
5030 */
5031 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5032 {
5033  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5034 public:
5035  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5036  virtual ~VmaBlockMetadata_Buddy();
5037  virtual void Init(VkDeviceSize size);
5038 
5039  virtual bool Validate() const;
5040  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5041  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5042  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5043  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5044 
5045  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5046  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5047 
5048 #if VMA_STATS_STRING_ENABLED
5049  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5050 #endif
5051 
5052  virtual bool CreateAllocationRequest(
5053  uint32_t currentFrameIndex,
5054  uint32_t frameInUseCount,
5055  VkDeviceSize bufferImageGranularity,
5056  VkDeviceSize allocSize,
5057  VkDeviceSize allocAlignment,
5058  bool upperAddress,
5059  VmaSuballocationType allocType,
5060  bool canMakeOtherLost,
5061  uint32_t strategy,
5062  VmaAllocationRequest* pAllocationRequest);
5063 
5064  virtual bool MakeRequestedAllocationsLost(
5065  uint32_t currentFrameIndex,
5066  uint32_t frameInUseCount,
5067  VmaAllocationRequest* pAllocationRequest);
5068 
5069  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5070 
5071  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5072 
5073  virtual void Alloc(
5074  const VmaAllocationRequest& request,
5075  VmaSuballocationType type,
5076  VkDeviceSize allocSize,
5077  bool upperAddress,
5078  VmaAllocation hAllocation);
5079 
5080  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5081  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5082 
5083 private:
5084  static const VkDeviceSize MIN_NODE_SIZE = 32;
5085  static const size_t MAX_LEVELS = 30;
5086 
5087  struct ValidationContext
5088  {
5089  size_t calculatedAllocationCount;
5090  size_t calculatedFreeCount;
5091  VkDeviceSize calculatedSumFreeSize;
5092 
5093  ValidationContext() :
5094  calculatedAllocationCount(0),
5095  calculatedFreeCount(0),
5096  calculatedSumFreeSize(0) { }
5097  };
5098 
5099  struct Node
5100  {
5101  VkDeviceSize offset;
5102  enum TYPE
5103  {
5104  TYPE_FREE,
5105  TYPE_ALLOCATION,
5106  TYPE_SPLIT,
5107  TYPE_COUNT
5108  } type;
5109  Node* parent;
5110  Node* buddy;
5111 
5112  union
5113  {
5114  struct
5115  {
5116  Node* prev;
5117  Node* next;
5118  } free;
5119  struct
5120  {
5121  VmaAllocation alloc;
5122  } allocation;
5123  struct
5124  {
5125  Node* leftChild;
5126  } split;
5127  };
5128  };
5129 
5130  // Size of the memory block aligned down to a power of two.
5131  VkDeviceSize m_UsableSize;
5132  uint32_t m_LevelCount;
5133 
5134  Node* m_Root;
5135  struct {
5136  Node* front;
5137  Node* back;
5138  } m_FreeList[MAX_LEVELS];
5139  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5140  size_t m_AllocationCount;
5141  // Number of nodes in the tree with type == TYPE_FREE.
5142  size_t m_FreeCount;
5143  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5144  VkDeviceSize m_SumFreeSize;
5145 
5146  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5147  void DeleteNode(Node* node);
5148  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5149  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5150  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5151  // Alloc passed just for validation. Can be null.
5152  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5153  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5154  // Adds node to the front of FreeList at given level.
5155  // node->type must be FREE.
5156  // node->free.prev, next can be undefined.
5157  void AddToFreeListFront(uint32_t level, Node* node);
5158  // Removes node from FreeList at given level.
5159  // node->type must be FREE.
5160  // node->free.prev, next stay untouched.
5161  void RemoveFromFreeList(uint32_t level, Node* node);
5162 
5163 #if VMA_STATS_STRING_ENABLED
5164  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5165 #endif
5166 };
5167 
5168 /*
5169 Represents a single block of device memory (`VkDeviceMemory`) with all the
5170 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5171 
5172 Thread-safety: This class must be externally synchronized.
5173 */
5174 class VmaDeviceMemoryBlock
5175 {
5176  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5177 public:
5178  VmaBlockMetadata* m_pMetadata;
5179 
5180  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5181 
5182  ~VmaDeviceMemoryBlock()
5183  {
5184  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5185  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5186  }
5187 
5188  // Always call after construction.
5189  void Init(
5190  VmaAllocator hAllocator,
5191  uint32_t newMemoryTypeIndex,
5192  VkDeviceMemory newMemory,
5193  VkDeviceSize newSize,
5194  uint32_t id,
5195  uint32_t algorithm);
5196  // Always call before destruction.
5197  void Destroy(VmaAllocator allocator);
5198 
5199  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5200  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5201  uint32_t GetId() const { return m_Id; }
5202  void* GetMappedData() const { return m_pMappedData; }
5203 
5204  // Validates all data structures inside this object. If not valid, returns false.
5205  bool Validate() const;
5206 
5207  VkResult CheckCorruption(VmaAllocator hAllocator);
5208 
5209  // ppData can be null.
5210  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5211  void Unmap(VmaAllocator hAllocator, uint32_t count);
5212 
5213  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5214  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5215 
5216  VkResult BindBufferMemory(
5217  const VmaAllocator hAllocator,
5218  const VmaAllocation hAllocation,
5219  VkBuffer hBuffer);
5220  VkResult BindImageMemory(
5221  const VmaAllocator hAllocator,
5222  const VmaAllocation hAllocation,
5223  VkImage hImage);
5224 
5225 private:
5226  uint32_t m_MemoryTypeIndex;
5227  uint32_t m_Id;
5228  VkDeviceMemory m_hMemory;
5229 
5230  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5231  // Also protects m_MapCount, m_pMappedData.
5232  VMA_MUTEX m_Mutex;
5233  uint32_t m_MapCount;
5234  void* m_pMappedData;
5235 };
5236 
5237 struct VmaPointerLess
5238 {
5239  bool operator()(const void* lhs, const void* rhs) const
5240  {
5241  return lhs < rhs;
5242  }
5243 };
5244 
5245 class VmaDefragmentator;
5246 
5247 /*
5248 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5249 Vulkan memory type.
5250 
5251 Synchronized internally with a mutex.
5252 */
5253 struct VmaBlockVector
5254 {
5255  VMA_CLASS_NO_COPY(VmaBlockVector)
5256 public:
5257  VmaBlockVector(
5258  VmaAllocator hAllocator,
5259  uint32_t memoryTypeIndex,
5260  VkDeviceSize preferredBlockSize,
5261  size_t minBlockCount,
5262  size_t maxBlockCount,
5263  VkDeviceSize bufferImageGranularity,
5264  uint32_t frameInUseCount,
5265  bool isCustomPool,
5266  bool explicitBlockSize,
5267  uint32_t algorithm);
5268  ~VmaBlockVector();
5269 
5270  VkResult CreateMinBlocks();
5271 
5272  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5273  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5274  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5275  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5276  uint32_t GetAlgorithm() const { return m_Algorithm; }
5277 
5278  void GetPoolStats(VmaPoolStats* pStats);
5279 
5280  bool IsEmpty() const { return m_Blocks.empty(); }
5281  bool IsCorruptionDetectionEnabled() const;
5282 
5283  VkResult Allocate(
5284  VmaPool hCurrentPool,
5285  uint32_t currentFrameIndex,
5286  VkDeviceSize size,
5287  VkDeviceSize alignment,
5288  const VmaAllocationCreateInfo& createInfo,
5289  VmaSuballocationType suballocType,
5290  VmaAllocation* pAllocation);
5291 
5292  void Free(
5293  VmaAllocation hAllocation);
5294 
5295  // Adds statistics of this BlockVector to pStats.
5296  void AddStats(VmaStats* pStats);
5297 
5298 #if VMA_STATS_STRING_ENABLED
5299  void PrintDetailedMap(class VmaJsonWriter& json);
5300 #endif
5301 
5302  void MakePoolAllocationsLost(
5303  uint32_t currentFrameIndex,
5304  size_t* pLostAllocationCount);
5305  VkResult CheckCorruption();
5306 
5307  VmaDefragmentator* EnsureDefragmentator(
5308  VmaAllocator hAllocator,
5309  uint32_t currentFrameIndex);
5310 
5311  VkResult Defragment(
5312  VmaDefragmentationStats* pDefragmentationStats,
5313  VkDeviceSize& maxBytesToMove,
5314  uint32_t& maxAllocationsToMove);
5315 
5316  void DestroyDefragmentator();
5317 
5318 private:
5319  friend class VmaDefragmentator;
5320 
5321  const VmaAllocator m_hAllocator;
5322  const uint32_t m_MemoryTypeIndex;
5323  const VkDeviceSize m_PreferredBlockSize;
5324  const size_t m_MinBlockCount;
5325  const size_t m_MaxBlockCount;
5326  const VkDeviceSize m_BufferImageGranularity;
5327  const uint32_t m_FrameInUseCount;
5328  const bool m_IsCustomPool;
5329  const bool m_ExplicitBlockSize;
5330  const uint32_t m_Algorithm;
5331  bool m_HasEmptyBlock;
5332  VMA_MUTEX m_Mutex;
5333  // Incrementally sorted by sumFreeSize, ascending.
5334  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5335  /* There can be at most one allocation that is completely empty - a
5336  hysteresis to avoid pessimistic case of alternating creation and destruction
5337  of a VkDeviceMemory. */
5338  VmaDefragmentator* m_pDefragmentator;
5339  uint32_t m_NextBlockId;
5340 
5341  VkDeviceSize CalcMaxBlockSize() const;
5342 
5343  // Finds and removes given block from vector.
5344  void Remove(VmaDeviceMemoryBlock* pBlock);
5345 
5346  // Performs single step in sorting m_Blocks. They may not be fully sorted
5347  // after this call.
5348  void IncrementallySortBlocks();
5349 
5350  // To be used only without CAN_MAKE_OTHER_LOST flag.
5351  VkResult AllocateFromBlock(
5352  VmaDeviceMemoryBlock* pBlock,
5353  VmaPool hCurrentPool,
5354  uint32_t currentFrameIndex,
5355  VkDeviceSize size,
5356  VkDeviceSize alignment,
5357  VmaAllocationCreateFlags allocFlags,
5358  void* pUserData,
5359  VmaSuballocationType suballocType,
5360  uint32_t strategy,
5361  VmaAllocation* pAllocation);
5362 
5363  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5364 };
5365 
5366 struct VmaPool_T
5367 {
5368  VMA_CLASS_NO_COPY(VmaPool_T)
5369 public:
5370  VmaBlockVector m_BlockVector;
5371 
5372  VmaPool_T(
5373  VmaAllocator hAllocator,
5374  const VmaPoolCreateInfo& createInfo,
5375  VkDeviceSize preferredBlockSize);
5376  ~VmaPool_T();
5377 
5378  uint32_t GetId() const { return m_Id; }
5379  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5380 
5381 #if VMA_STATS_STRING_ENABLED
5382  //void PrintDetailedMap(class VmaStringBuilder& sb);
5383 #endif
5384 
5385 private:
5386  uint32_t m_Id;
5387 };
5388 
5389 class VmaDefragmentator
5390 {
5391  VMA_CLASS_NO_COPY(VmaDefragmentator)
5392 private:
5393  const VmaAllocator m_hAllocator;
5394  VmaBlockVector* const m_pBlockVector;
5395  uint32_t m_CurrentFrameIndex;
5396  VkDeviceSize m_BytesMoved;
5397  uint32_t m_AllocationsMoved;
5398 
5399  struct AllocationInfo
5400  {
5401  VmaAllocation m_hAllocation;
5402  VkBool32* m_pChanged;
5403 
5404  AllocationInfo() :
5405  m_hAllocation(VK_NULL_HANDLE),
5406  m_pChanged(VMA_NULL)
5407  {
5408  }
5409  };
5410 
5411  struct AllocationInfoSizeGreater
5412  {
5413  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5414  {
5415  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5416  }
5417  };
5418 
5419  // Used between AddAllocation and Defragment.
5420  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5421 
5422  struct BlockInfo
5423  {
5424  VmaDeviceMemoryBlock* m_pBlock;
5425  bool m_HasNonMovableAllocations;
5426  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5427 
5428  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5429  m_pBlock(VMA_NULL),
5430  m_HasNonMovableAllocations(true),
5431  m_Allocations(pAllocationCallbacks),
5432  m_pMappedDataForDefragmentation(VMA_NULL)
5433  {
5434  }
5435 
5436  void CalcHasNonMovableAllocations()
5437  {
5438  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5439  const size_t defragmentAllocCount = m_Allocations.size();
5440  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5441  }
5442 
5443  void SortAllocationsBySizeDescecnding()
5444  {
5445  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5446  }
5447 
5448  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5449  void Unmap(VmaAllocator hAllocator);
5450 
5451  private:
5452  // Not null if mapped for defragmentation only, not originally mapped.
5453  void* m_pMappedDataForDefragmentation;
5454  };
5455 
5456  struct BlockPointerLess
5457  {
5458  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5459  {
5460  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5461  }
5462  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5463  {
5464  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5465  }
5466  };
5467 
5468  // 1. Blocks with some non-movable allocations go first.
5469  // 2. Blocks with smaller sumFreeSize go first.
5470  struct BlockInfoCompareMoveDestination
5471  {
5472  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5473  {
5474  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5475  {
5476  return true;
5477  }
5478  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5479  {
5480  return false;
5481  }
5482  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5483  {
5484  return true;
5485  }
5486  return false;
5487  }
5488  };
5489 
5490  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5491  BlockInfoVector m_Blocks;
5492 
5493  VkResult DefragmentRound(
5494  VkDeviceSize maxBytesToMove,
5495  uint32_t maxAllocationsToMove);
5496 
5497  static bool MoveMakesSense(
5498  size_t dstBlockIndex, VkDeviceSize dstOffset,
5499  size_t srcBlockIndex, VkDeviceSize srcOffset);
5500 
5501 public:
5502  VmaDefragmentator(
5503  VmaAllocator hAllocator,
5504  VmaBlockVector* pBlockVector,
5505  uint32_t currentFrameIndex);
5506 
5507  ~VmaDefragmentator();
5508 
5509  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5510  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5511 
5512  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5513 
5514  VkResult Defragment(
5515  VkDeviceSize maxBytesToMove,
5516  uint32_t maxAllocationsToMove);
5517 };
5518 
5519 #if VMA_RECORDING_ENABLED
5520 
5521 class VmaRecorder
5522 {
5523 public:
5524  VmaRecorder();
5525  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5526  void WriteConfiguration(
5527  const VkPhysicalDeviceProperties& devProps,
5528  const VkPhysicalDeviceMemoryProperties& memProps,
5529  bool dedicatedAllocationExtensionEnabled);
5530  ~VmaRecorder();
5531 
5532  void RecordCreateAllocator(uint32_t frameIndex);
5533  void RecordDestroyAllocator(uint32_t frameIndex);
5534  void RecordCreatePool(uint32_t frameIndex,
5535  const VmaPoolCreateInfo& createInfo,
5536  VmaPool pool);
5537  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5538  void RecordAllocateMemory(uint32_t frameIndex,
5539  const VkMemoryRequirements& vkMemReq,
5540  const VmaAllocationCreateInfo& createInfo,
5541  VmaAllocation allocation);
5542  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5543  const VkMemoryRequirements& vkMemReq,
5544  bool requiresDedicatedAllocation,
5545  bool prefersDedicatedAllocation,
5546  const VmaAllocationCreateInfo& createInfo,
5547  VmaAllocation allocation);
5548  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5549  const VkMemoryRequirements& vkMemReq,
5550  bool requiresDedicatedAllocation,
5551  bool prefersDedicatedAllocation,
5552  const VmaAllocationCreateInfo& createInfo,
5553  VmaAllocation allocation);
5554  void RecordFreeMemory(uint32_t frameIndex,
5555  VmaAllocation allocation);
5556  void RecordSetAllocationUserData(uint32_t frameIndex,
5557  VmaAllocation allocation,
5558  const void* pUserData);
5559  void RecordCreateLostAllocation(uint32_t frameIndex,
5560  VmaAllocation allocation);
5561  void RecordMapMemory(uint32_t frameIndex,
5562  VmaAllocation allocation);
5563  void RecordUnmapMemory(uint32_t frameIndex,
5564  VmaAllocation allocation);
5565  void RecordFlushAllocation(uint32_t frameIndex,
5566  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5567  void RecordInvalidateAllocation(uint32_t frameIndex,
5568  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5569  void RecordCreateBuffer(uint32_t frameIndex,
5570  const VkBufferCreateInfo& bufCreateInfo,
5571  const VmaAllocationCreateInfo& allocCreateInfo,
5572  VmaAllocation allocation);
5573  void RecordCreateImage(uint32_t frameIndex,
5574  const VkImageCreateInfo& imageCreateInfo,
5575  const VmaAllocationCreateInfo& allocCreateInfo,
5576  VmaAllocation allocation);
5577  void RecordDestroyBuffer(uint32_t frameIndex,
5578  VmaAllocation allocation);
5579  void RecordDestroyImage(uint32_t frameIndex,
5580  VmaAllocation allocation);
5581  void RecordTouchAllocation(uint32_t frameIndex,
5582  VmaAllocation allocation);
5583  void RecordGetAllocationInfo(uint32_t frameIndex,
5584  VmaAllocation allocation);
5585  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5586  VmaPool pool);
5587 
5588 private:
5589  struct CallParams
5590  {
5591  uint32_t threadId;
5592  double time;
5593  };
5594 
5595  class UserDataString
5596  {
5597  public:
5598  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5599  const char* GetString() const { return m_Str; }
5600 
5601  private:
5602  char m_PtrStr[17];
5603  const char* m_Str;
5604  };
5605 
5606  bool m_UseMutex;
5607  VmaRecordFlags m_Flags;
5608  FILE* m_File;
5609  VMA_MUTEX m_FileMutex;
5610  int64_t m_Freq;
5611  int64_t m_StartCounter;
5612 
5613  void GetBasicParams(CallParams& outParams);
5614  void Flush();
5615 };
5616 
5617 #endif // #if VMA_RECORDING_ENABLED
5618 
5619 // Main allocator object.
5620 struct VmaAllocator_T
5621 {
5622  VMA_CLASS_NO_COPY(VmaAllocator_T)
5623 public:
5624  bool m_UseMutex;
5625  bool m_UseKhrDedicatedAllocation;
5626  VkDevice m_hDevice;
5627  bool m_AllocationCallbacksSpecified;
5628  VkAllocationCallbacks m_AllocationCallbacks;
5629  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5630 
5631  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5632  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5633  VMA_MUTEX m_HeapSizeLimitMutex;
5634 
5635  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5636  VkPhysicalDeviceMemoryProperties m_MemProps;
5637 
5638  // Default pools.
5639  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5640 
5641  // Each vector is sorted by memory (handle value).
5642  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5643  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5644  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5645 
5646  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5647  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5648  ~VmaAllocator_T();
5649 
5650  const VkAllocationCallbacks* GetAllocationCallbacks() const
5651  {
5652  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5653  }
5654  const VmaVulkanFunctions& GetVulkanFunctions() const
5655  {
5656  return m_VulkanFunctions;
5657  }
5658 
5659  VkDeviceSize GetBufferImageGranularity() const
5660  {
5661  return VMA_MAX(
5662  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5663  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5664  }
5665 
5666  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5667  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5668 
5669  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5670  {
5671  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5672  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5673  }
5674  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5675  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5676  {
5677  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5678  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5679  }
5680  // Minimum alignment for all allocations in specific memory type.
5681  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5682  {
5683  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5684  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5685  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5686  }
5687 
5688  bool IsIntegratedGpu() const
5689  {
5690  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5691  }
5692 
5693 #if VMA_RECORDING_ENABLED
5694  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5695 #endif
5696 
5697  void GetBufferMemoryRequirements(
5698  VkBuffer hBuffer,
5699  VkMemoryRequirements& memReq,
5700  bool& requiresDedicatedAllocation,
5701  bool& prefersDedicatedAllocation) const;
5702  void GetImageMemoryRequirements(
5703  VkImage hImage,
5704  VkMemoryRequirements& memReq,
5705  bool& requiresDedicatedAllocation,
5706  bool& prefersDedicatedAllocation) const;
5707 
5708  // Main allocation function.
5709  VkResult AllocateMemory(
5710  const VkMemoryRequirements& vkMemReq,
5711  bool requiresDedicatedAllocation,
5712  bool prefersDedicatedAllocation,
5713  VkBuffer dedicatedBuffer,
5714  VkImage dedicatedImage,
5715  const VmaAllocationCreateInfo& createInfo,
5716  VmaSuballocationType suballocType,
5717  VmaAllocation* pAllocation);
5718 
5719  // Main deallocation function.
5720  void FreeMemory(const VmaAllocation allocation);
5721 
5722  void CalculateStats(VmaStats* pStats);
5723 
5724 #if VMA_STATS_STRING_ENABLED
5725  void PrintDetailedMap(class VmaJsonWriter& json);
5726 #endif
5727 
5728  VkResult Defragment(
5729  VmaAllocation* pAllocations,
5730  size_t allocationCount,
5731  VkBool32* pAllocationsChanged,
5732  const VmaDefragmentationInfo* pDefragmentationInfo,
5733  VmaDefragmentationStats* pDefragmentationStats);
5734 
5735  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5736  bool TouchAllocation(VmaAllocation hAllocation);
5737 
5738  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5739  void DestroyPool(VmaPool pool);
5740  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5741 
5742  void SetCurrentFrameIndex(uint32_t frameIndex);
5743  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5744 
5745  void MakePoolAllocationsLost(
5746  VmaPool hPool,
5747  size_t* pLostAllocationCount);
5748  VkResult CheckPoolCorruption(VmaPool hPool);
5749  VkResult CheckCorruption(uint32_t memoryTypeBits);
5750 
5751  void CreateLostAllocation(VmaAllocation* pAllocation);
5752 
5753  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5754  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5755 
5756  VkResult Map(VmaAllocation hAllocation, void** ppData);
5757  void Unmap(VmaAllocation hAllocation);
5758 
5759  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5760  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5761 
5762  void FlushOrInvalidateAllocation(
5763  VmaAllocation hAllocation,
5764  VkDeviceSize offset, VkDeviceSize size,
5765  VMA_CACHE_OPERATION op);
5766 
5767  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5768 
5769 private:
5770  VkDeviceSize m_PreferredLargeHeapBlockSize;
5771 
5772  VkPhysicalDevice m_PhysicalDevice;
5773  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5774 
5775  VMA_MUTEX m_PoolsMutex;
5776  // Protected by m_PoolsMutex. Sorted by pointer value.
5777  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5778  uint32_t m_NextPoolId;
5779 
5780  VmaVulkanFunctions m_VulkanFunctions;
5781 
5782 #if VMA_RECORDING_ENABLED
5783  VmaRecorder* m_pRecorder;
5784 #endif
5785 
5786  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5787 
5788  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5789 
5790  VkResult AllocateMemoryOfType(
5791  VkDeviceSize size,
5792  VkDeviceSize alignment,
5793  bool dedicatedAllocation,
5794  VkBuffer dedicatedBuffer,
5795  VkImage dedicatedImage,
5796  const VmaAllocationCreateInfo& createInfo,
5797  uint32_t memTypeIndex,
5798  VmaSuballocationType suballocType,
5799  VmaAllocation* pAllocation);
5800 
5801  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5802  VkResult AllocateDedicatedMemory(
5803  VkDeviceSize size,
5804  VmaSuballocationType suballocType,
5805  uint32_t memTypeIndex,
5806  bool map,
5807  bool isUserDataString,
5808  void* pUserData,
5809  VkBuffer dedicatedBuffer,
5810  VkImage dedicatedImage,
5811  VmaAllocation* pAllocation);
5812 
5813  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5814  void FreeDedicatedMemory(VmaAllocation allocation);
5815 };
5816 
5818 // Memory allocation #2 after VmaAllocator_T definition
5819 
5820 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5821 {
5822  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5823 }
5824 
5825 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5826 {
5827  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5828 }
5829 
5830 template<typename T>
5831 static T* VmaAllocate(VmaAllocator hAllocator)
5832 {
5833  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5834 }
5835 
5836 template<typename T>
5837 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5838 {
5839  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5840 }
5841 
5842 template<typename T>
5843 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5844 {
5845  if(ptr != VMA_NULL)
5846  {
5847  ptr->~T();
5848  VmaFree(hAllocator, ptr);
5849  }
5850 }
5851 
5852 template<typename T>
5853 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5854 {
5855  if(ptr != VMA_NULL)
5856  {
5857  for(size_t i = count; i--; )
5858  ptr[i].~T();
5859  VmaFree(hAllocator, ptr);
5860  }
5861 }
5862 
5864 // VmaStringBuilder
5865 
5866 #if VMA_STATS_STRING_ENABLED
5867 
5868 class VmaStringBuilder
5869 {
5870 public:
5871  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5872  size_t GetLength() const { return m_Data.size(); }
5873  const char* GetData() const { return m_Data.data(); }
5874 
5875  void Add(char ch) { m_Data.push_back(ch); }
5876  void Add(const char* pStr);
5877  void AddNewLine() { Add('\n'); }
5878  void AddNumber(uint32_t num);
5879  void AddNumber(uint64_t num);
5880  void AddPointer(const void* ptr);
5881 
5882 private:
5883  VmaVector< char, VmaStlAllocator<char> > m_Data;
5884 };
5885 
5886 void VmaStringBuilder::Add(const char* pStr)
5887 {
5888  const size_t strLen = strlen(pStr);
5889  if(strLen > 0)
5890  {
5891  const size_t oldCount = m_Data.size();
5892  m_Data.resize(oldCount + strLen);
5893  memcpy(m_Data.data() + oldCount, pStr, strLen);
5894  }
5895 }
5896 
5897 void VmaStringBuilder::AddNumber(uint32_t num)
5898 {
5899  char buf[11];
5900  VmaUint32ToStr(buf, sizeof(buf), num);
5901  Add(buf);
5902 }
5903 
5904 void VmaStringBuilder::AddNumber(uint64_t num)
5905 {
5906  char buf[21];
5907  VmaUint64ToStr(buf, sizeof(buf), num);
5908  Add(buf);
5909 }
5910 
5911 void VmaStringBuilder::AddPointer(const void* ptr)
5912 {
5913  char buf[21];
5914  VmaPtrToStr(buf, sizeof(buf), ptr);
5915  Add(buf);
5916 }
5917 
5918 #endif // #if VMA_STATS_STRING_ENABLED
5919 
5921 // VmaJsonWriter
5922 
5923 #if VMA_STATS_STRING_ENABLED
5924 
5925 class VmaJsonWriter
5926 {
5927  VMA_CLASS_NO_COPY(VmaJsonWriter)
5928 public:
5929  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5930  ~VmaJsonWriter();
5931 
5932  void BeginObject(bool singleLine = false);
5933  void EndObject();
5934 
5935  void BeginArray(bool singleLine = false);
5936  void EndArray();
5937 
5938  void WriteString(const char* pStr);
5939  void BeginString(const char* pStr = VMA_NULL);
5940  void ContinueString(const char* pStr);
5941  void ContinueString(uint32_t n);
5942  void ContinueString(uint64_t n);
5943  void ContinueString_Pointer(const void* ptr);
5944  void EndString(const char* pStr = VMA_NULL);
5945 
5946  void WriteNumber(uint32_t n);
5947  void WriteNumber(uint64_t n);
5948  void WriteBool(bool b);
5949  void WriteNull();
5950 
5951 private:
5952  static const char* const INDENT;
5953 
5954  enum COLLECTION_TYPE
5955  {
5956  COLLECTION_TYPE_OBJECT,
5957  COLLECTION_TYPE_ARRAY,
5958  };
5959  struct StackItem
5960  {
5961  COLLECTION_TYPE type;
5962  uint32_t valueCount;
5963  bool singleLineMode;
5964  };
5965 
5966  VmaStringBuilder& m_SB;
5967  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5968  bool m_InsideString;
5969 
5970  void BeginValue(bool isString);
5971  void WriteIndent(bool oneLess = false);
5972 };
5973 
5974 const char* const VmaJsonWriter::INDENT = " ";
5975 
5976 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5977  m_SB(sb),
5978  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5979  m_InsideString(false)
5980 {
5981 }
5982 
5983 VmaJsonWriter::~VmaJsonWriter()
5984 {
5985  VMA_ASSERT(!m_InsideString);
5986  VMA_ASSERT(m_Stack.empty());
5987 }
5988 
5989 void VmaJsonWriter::BeginObject(bool singleLine)
5990 {
5991  VMA_ASSERT(!m_InsideString);
5992 
5993  BeginValue(false);
5994  m_SB.Add('{');
5995 
5996  StackItem item;
5997  item.type = COLLECTION_TYPE_OBJECT;
5998  item.valueCount = 0;
5999  item.singleLineMode = singleLine;
6000  m_Stack.push_back(item);
6001 }
6002 
6003 void VmaJsonWriter::EndObject()
6004 {
6005  VMA_ASSERT(!m_InsideString);
6006 
6007  WriteIndent(true);
6008  m_SB.Add('}');
6009 
6010  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6011  m_Stack.pop_back();
6012 }
6013 
6014 void VmaJsonWriter::BeginArray(bool singleLine)
6015 {
6016  VMA_ASSERT(!m_InsideString);
6017 
6018  BeginValue(false);
6019  m_SB.Add('[');
6020 
6021  StackItem item;
6022  item.type = COLLECTION_TYPE_ARRAY;
6023  item.valueCount = 0;
6024  item.singleLineMode = singleLine;
6025  m_Stack.push_back(item);
6026 }
6027 
6028 void VmaJsonWriter::EndArray()
6029 {
6030  VMA_ASSERT(!m_InsideString);
6031 
6032  WriteIndent(true);
6033  m_SB.Add(']');
6034 
6035  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6036  m_Stack.pop_back();
6037 }
6038 
6039 void VmaJsonWriter::WriteString(const char* pStr)
6040 {
6041  BeginString(pStr);
6042  EndString();
6043 }
6044 
6045 void VmaJsonWriter::BeginString(const char* pStr)
6046 {
6047  VMA_ASSERT(!m_InsideString);
6048 
6049  BeginValue(true);
6050  m_SB.Add('"');
6051  m_InsideString = true;
6052  if(pStr != VMA_NULL && pStr[0] != '\0')
6053  {
6054  ContinueString(pStr);
6055  }
6056 }
6057 
6058 void VmaJsonWriter::ContinueString(const char* pStr)
6059 {
6060  VMA_ASSERT(m_InsideString);
6061 
6062  const size_t strLen = strlen(pStr);
6063  for(size_t i = 0; i < strLen; ++i)
6064  {
6065  char ch = pStr[i];
6066  if(ch == '\\')
6067  {
6068  m_SB.Add("\\\\");
6069  }
6070  else if(ch == '"')
6071  {
6072  m_SB.Add("\\\"");
6073  }
6074  else if(ch >= 32)
6075  {
6076  m_SB.Add(ch);
6077  }
6078  else switch(ch)
6079  {
6080  case '\b':
6081  m_SB.Add("\\b");
6082  break;
6083  case '\f':
6084  m_SB.Add("\\f");
6085  break;
6086  case '\n':
6087  m_SB.Add("\\n");
6088  break;
6089  case '\r':
6090  m_SB.Add("\\r");
6091  break;
6092  case '\t':
6093  m_SB.Add("\\t");
6094  break;
6095  default:
6096  VMA_ASSERT(0 && "Character not currently supported.");
6097  break;
6098  }
6099  }
6100 }
6101 
6102 void VmaJsonWriter::ContinueString(uint32_t n)
6103 {
6104  VMA_ASSERT(m_InsideString);
6105  m_SB.AddNumber(n);
6106 }
6107 
6108 void VmaJsonWriter::ContinueString(uint64_t n)
6109 {
6110  VMA_ASSERT(m_InsideString);
6111  m_SB.AddNumber(n);
6112 }
6113 
6114 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6115 {
6116  VMA_ASSERT(m_InsideString);
6117  m_SB.AddPointer(ptr);
6118 }
6119 
6120 void VmaJsonWriter::EndString(const char* pStr)
6121 {
6122  VMA_ASSERT(m_InsideString);
6123  if(pStr != VMA_NULL && pStr[0] != '\0')
6124  {
6125  ContinueString(pStr);
6126  }
6127  m_SB.Add('"');
6128  m_InsideString = false;
6129 }
6130 
6131 void VmaJsonWriter::WriteNumber(uint32_t n)
6132 {
6133  VMA_ASSERT(!m_InsideString);
6134  BeginValue(false);
6135  m_SB.AddNumber(n);
6136 }
6137 
6138 void VmaJsonWriter::WriteNumber(uint64_t n)
6139 {
6140  VMA_ASSERT(!m_InsideString);
6141  BeginValue(false);
6142  m_SB.AddNumber(n);
6143 }
6144 
6145 void VmaJsonWriter::WriteBool(bool b)
6146 {
6147  VMA_ASSERT(!m_InsideString);
6148  BeginValue(false);
6149  m_SB.Add(b ? "true" : "false");
6150 }
6151 
6152 void VmaJsonWriter::WriteNull()
6153 {
6154  VMA_ASSERT(!m_InsideString);
6155  BeginValue(false);
6156  m_SB.Add("null");
6157 }
6158 
6159 void VmaJsonWriter::BeginValue(bool isString)
6160 {
6161  if(!m_Stack.empty())
6162  {
6163  StackItem& currItem = m_Stack.back();
6164  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6165  currItem.valueCount % 2 == 0)
6166  {
6167  VMA_ASSERT(isString);
6168  }
6169 
6170  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6171  currItem.valueCount % 2 != 0)
6172  {
6173  m_SB.Add(": ");
6174  }
6175  else if(currItem.valueCount > 0)
6176  {
6177  m_SB.Add(", ");
6178  WriteIndent();
6179  }
6180  else
6181  {
6182  WriteIndent();
6183  }
6184  ++currItem.valueCount;
6185  }
6186 }
6187 
6188 void VmaJsonWriter::WriteIndent(bool oneLess)
6189 {
6190  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6191  {
6192  m_SB.AddNewLine();
6193 
6194  size_t count = m_Stack.size();
6195  if(count > 0 && oneLess)
6196  {
6197  --count;
6198  }
6199  for(size_t i = 0; i < count; ++i)
6200  {
6201  m_SB.Add(INDENT);
6202  }
6203  }
6204 }
6205 
6206 #endif // #if VMA_STATS_STRING_ENABLED
6207 
6209 
6210 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6211 {
6212  if(IsUserDataString())
6213  {
6214  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6215 
6216  FreeUserDataString(hAllocator);
6217 
6218  if(pUserData != VMA_NULL)
6219  {
6220  const char* const newStrSrc = (char*)pUserData;
6221  const size_t newStrLen = strlen(newStrSrc);
6222  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6223  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6224  m_pUserData = newStrDst;
6225  }
6226  }
6227  else
6228  {
6229  m_pUserData = pUserData;
6230  }
6231 }
6232 
6233 void VmaAllocation_T::ChangeBlockAllocation(
6234  VmaAllocator hAllocator,
6235  VmaDeviceMemoryBlock* block,
6236  VkDeviceSize offset)
6237 {
6238  VMA_ASSERT(block != VMA_NULL);
6239  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6240 
6241  // Move mapping reference counter from old block to new block.
6242  if(block != m_BlockAllocation.m_Block)
6243  {
6244  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6245  if(IsPersistentMap())
6246  ++mapRefCount;
6247  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6248  block->Map(hAllocator, mapRefCount, VMA_NULL);
6249  }
6250 
6251  m_BlockAllocation.m_Block = block;
6252  m_BlockAllocation.m_Offset = offset;
6253 }
6254 
6255 VkDeviceSize VmaAllocation_T::GetOffset() const
6256 {
6257  switch(m_Type)
6258  {
6259  case ALLOCATION_TYPE_BLOCK:
6260  return m_BlockAllocation.m_Offset;
6261  case ALLOCATION_TYPE_DEDICATED:
6262  return 0;
6263  default:
6264  VMA_ASSERT(0);
6265  return 0;
6266  }
6267 }
6268 
6269 VkDeviceMemory VmaAllocation_T::GetMemory() const
6270 {
6271  switch(m_Type)
6272  {
6273  case ALLOCATION_TYPE_BLOCK:
6274  return m_BlockAllocation.m_Block->GetDeviceMemory();
6275  case ALLOCATION_TYPE_DEDICATED:
6276  return m_DedicatedAllocation.m_hMemory;
6277  default:
6278  VMA_ASSERT(0);
6279  return VK_NULL_HANDLE;
6280  }
6281 }
6282 
6283 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6284 {
6285  switch(m_Type)
6286  {
6287  case ALLOCATION_TYPE_BLOCK:
6288  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6289  case ALLOCATION_TYPE_DEDICATED:
6290  return m_DedicatedAllocation.m_MemoryTypeIndex;
6291  default:
6292  VMA_ASSERT(0);
6293  return UINT32_MAX;
6294  }
6295 }
6296 
6297 void* VmaAllocation_T::GetMappedData() const
6298 {
6299  switch(m_Type)
6300  {
6301  case ALLOCATION_TYPE_BLOCK:
6302  if(m_MapCount != 0)
6303  {
6304  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6305  VMA_ASSERT(pBlockData != VMA_NULL);
6306  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6307  }
6308  else
6309  {
6310  return VMA_NULL;
6311  }
6312  break;
6313  case ALLOCATION_TYPE_DEDICATED:
6314  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6315  return m_DedicatedAllocation.m_pMappedData;
6316  default:
6317  VMA_ASSERT(0);
6318  return VMA_NULL;
6319  }
6320 }
6321 
6322 bool VmaAllocation_T::CanBecomeLost() const
6323 {
6324  switch(m_Type)
6325  {
6326  case ALLOCATION_TYPE_BLOCK:
6327  return m_BlockAllocation.m_CanBecomeLost;
6328  case ALLOCATION_TYPE_DEDICATED:
6329  return false;
6330  default:
6331  VMA_ASSERT(0);
6332  return false;
6333  }
6334 }
6335 
6336 VmaPool VmaAllocation_T::GetPool() const
6337 {
6338  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6339  return m_BlockAllocation.m_hPool;
6340 }
6341 
6342 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6343 {
6344  VMA_ASSERT(CanBecomeLost());
6345 
6346  /*
6347  Warning: This is a carefully designed algorithm.
6348  Do not modify unless you really know what you're doing :)
6349  */
6350  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6351  for(;;)
6352  {
6353  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6354  {
6355  VMA_ASSERT(0);
6356  return false;
6357  }
6358  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6359  {
6360  return false;
6361  }
6362  else // Last use time earlier than current time.
6363  {
6364  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6365  {
6366  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6367  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6368  return true;
6369  }
6370  }
6371  }
6372 }
6373 
6374 #if VMA_STATS_STRING_ENABLED
6375 
6376 // Correspond to values of enum VmaSuballocationType.
6377 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6378  "FREE",
6379  "UNKNOWN",
6380  "BUFFER",
6381  "IMAGE_UNKNOWN",
6382  "IMAGE_LINEAR",
6383  "IMAGE_OPTIMAL",
6384 };
6385 
6386 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6387 {
6388  json.WriteString("Type");
6389  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6390 
6391  json.WriteString("Size");
6392  json.WriteNumber(m_Size);
6393 
6394  if(m_pUserData != VMA_NULL)
6395  {
6396  json.WriteString("UserData");
6397  if(IsUserDataString())
6398  {
6399  json.WriteString((const char*)m_pUserData);
6400  }
6401  else
6402  {
6403  json.BeginString();
6404  json.ContinueString_Pointer(m_pUserData);
6405  json.EndString();
6406  }
6407  }
6408 
6409  json.WriteString("CreationFrameIndex");
6410  json.WriteNumber(m_CreationFrameIndex);
6411 
6412  json.WriteString("LastUseFrameIndex");
6413  json.WriteNumber(GetLastUseFrameIndex());
6414 
6415  if(m_BufferImageUsage != 0)
6416  {
6417  json.WriteString("Usage");
6418  json.WriteNumber(m_BufferImageUsage);
6419  }
6420 }
6421 
6422 #endif
6423 
6424 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6425 {
6426  VMA_ASSERT(IsUserDataString());
6427  if(m_pUserData != VMA_NULL)
6428  {
6429  char* const oldStr = (char*)m_pUserData;
6430  const size_t oldStrLen = strlen(oldStr);
6431  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6432  m_pUserData = VMA_NULL;
6433  }
6434 }
6435 
6436 void VmaAllocation_T::BlockAllocMap()
6437 {
6438  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6439 
6440  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6441  {
6442  ++m_MapCount;
6443  }
6444  else
6445  {
6446  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6447  }
6448 }
6449 
6450 void VmaAllocation_T::BlockAllocUnmap()
6451 {
6452  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6453 
6454  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6455  {
6456  --m_MapCount;
6457  }
6458  else
6459  {
6460  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6461  }
6462 }
6463 
6464 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6465 {
6466  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6467 
6468  if(m_MapCount != 0)
6469  {
6470  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6471  {
6472  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6473  *ppData = m_DedicatedAllocation.m_pMappedData;
6474  ++m_MapCount;
6475  return VK_SUCCESS;
6476  }
6477  else
6478  {
6479  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6480  return VK_ERROR_MEMORY_MAP_FAILED;
6481  }
6482  }
6483  else
6484  {
6485  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6486  hAllocator->m_hDevice,
6487  m_DedicatedAllocation.m_hMemory,
6488  0, // offset
6489  VK_WHOLE_SIZE,
6490  0, // flags
6491  ppData);
6492  if(result == VK_SUCCESS)
6493  {
6494  m_DedicatedAllocation.m_pMappedData = *ppData;
6495  m_MapCount = 1;
6496  }
6497  return result;
6498  }
6499 }
6500 
6501 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6502 {
6503  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6504 
6505  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6506  {
6507  --m_MapCount;
6508  if(m_MapCount == 0)
6509  {
6510  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6511  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6512  hAllocator->m_hDevice,
6513  m_DedicatedAllocation.m_hMemory);
6514  }
6515  }
6516  else
6517  {
6518  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6519  }
6520 }
6521 
6522 #if VMA_STATS_STRING_ENABLED
6523 
6524 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6525 {
6526  json.BeginObject();
6527 
6528  json.WriteString("Blocks");
6529  json.WriteNumber(stat.blockCount);
6530 
6531  json.WriteString("Allocations");
6532  json.WriteNumber(stat.allocationCount);
6533 
6534  json.WriteString("UnusedRanges");
6535  json.WriteNumber(stat.unusedRangeCount);
6536 
6537  json.WriteString("UsedBytes");
6538  json.WriteNumber(stat.usedBytes);
6539 
6540  json.WriteString("UnusedBytes");
6541  json.WriteNumber(stat.unusedBytes);
6542 
6543  if(stat.allocationCount > 1)
6544  {
6545  json.WriteString("AllocationSize");
6546  json.BeginObject(true);
6547  json.WriteString("Min");
6548  json.WriteNumber(stat.allocationSizeMin);
6549  json.WriteString("Avg");
6550  json.WriteNumber(stat.allocationSizeAvg);
6551  json.WriteString("Max");
6552  json.WriteNumber(stat.allocationSizeMax);
6553  json.EndObject();
6554  }
6555 
6556  if(stat.unusedRangeCount > 1)
6557  {
6558  json.WriteString("UnusedRangeSize");
6559  json.BeginObject(true);
6560  json.WriteString("Min");
6561  json.WriteNumber(stat.unusedRangeSizeMin);
6562  json.WriteString("Avg");
6563  json.WriteNumber(stat.unusedRangeSizeAvg);
6564  json.WriteString("Max");
6565  json.WriteNumber(stat.unusedRangeSizeMax);
6566  json.EndObject();
6567  }
6568 
6569  json.EndObject();
6570 }
6571 
6572 #endif // #if VMA_STATS_STRING_ENABLED
6573 
6574 struct VmaSuballocationItemSizeLess
6575 {
6576  bool operator()(
6577  const VmaSuballocationList::iterator lhs,
6578  const VmaSuballocationList::iterator rhs) const
6579  {
6580  return lhs->size < rhs->size;
6581  }
6582  bool operator()(
6583  const VmaSuballocationList::iterator lhs,
6584  VkDeviceSize rhsSize) const
6585  {
6586  return lhs->size < rhsSize;
6587  }
6588 };
6589 
6590 
6592 // class VmaBlockMetadata
6593 
6594 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6595  m_Size(0),
6596  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6597 {
6598 }
6599 
6600 #if VMA_STATS_STRING_ENABLED
6601 
6602 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6603  VkDeviceSize unusedBytes,
6604  size_t allocationCount,
6605  size_t unusedRangeCount) const
6606 {
6607  json.BeginObject();
6608 
6609  json.WriteString("TotalBytes");
6610  json.WriteNumber(GetSize());
6611 
6612  json.WriteString("UnusedBytes");
6613  json.WriteNumber(unusedBytes);
6614 
6615  json.WriteString("Allocations");
6616  json.WriteNumber((uint64_t)allocationCount);
6617 
6618  json.WriteString("UnusedRanges");
6619  json.WriteNumber((uint64_t)unusedRangeCount);
6620 
6621  json.WriteString("Suballocations");
6622  json.BeginArray();
6623 }
6624 
6625 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6626  VkDeviceSize offset,
6627  VmaAllocation hAllocation) const
6628 {
6629  json.BeginObject(true);
6630 
6631  json.WriteString("Offset");
6632  json.WriteNumber(offset);
6633 
6634  hAllocation->PrintParameters(json);
6635 
6636  json.EndObject();
6637 }
6638 
6639 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6640  VkDeviceSize offset,
6641  VkDeviceSize size) const
6642 {
6643  json.BeginObject(true);
6644 
6645  json.WriteString("Offset");
6646  json.WriteNumber(offset);
6647 
6648  json.WriteString("Type");
6649  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6650 
6651  json.WriteString("Size");
6652  json.WriteNumber(size);
6653 
6654  json.EndObject();
6655 }
6656 
6657 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6658 {
6659  json.EndArray();
6660  json.EndObject();
6661 }
6662 
6663 #endif // #if VMA_STATS_STRING_ENABLED
6664 
6666 // class VmaBlockMetadata_Generic
6667 
6668 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6669  VmaBlockMetadata(hAllocator),
6670  m_FreeCount(0),
6671  m_SumFreeSize(0),
6672  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6673  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6674 {
6675 }
6676 
6677 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6678 {
6679 }
6680 
6681 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6682 {
6683  VmaBlockMetadata::Init(size);
6684 
6685  m_FreeCount = 1;
6686  m_SumFreeSize = size;
6687 
6688  VmaSuballocation suballoc = {};
6689  suballoc.offset = 0;
6690  suballoc.size = size;
6691  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6692  suballoc.hAllocation = VK_NULL_HANDLE;
6693 
6694  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6695  m_Suballocations.push_back(suballoc);
6696  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6697  --suballocItem;
6698  m_FreeSuballocationsBySize.push_back(suballocItem);
6699 }
6700 
6701 bool VmaBlockMetadata_Generic::Validate() const
6702 {
6703  VMA_VALIDATE(!m_Suballocations.empty());
6704 
6705  // Expected offset of new suballocation as calculated from previous ones.
6706  VkDeviceSize calculatedOffset = 0;
6707  // Expected number of free suballocations as calculated from traversing their list.
6708  uint32_t calculatedFreeCount = 0;
6709  // Expected sum size of free suballocations as calculated from traversing their list.
6710  VkDeviceSize calculatedSumFreeSize = 0;
6711  // Expected number of free suballocations that should be registered in
6712  // m_FreeSuballocationsBySize calculated from traversing their list.
6713  size_t freeSuballocationsToRegister = 0;
6714  // True if previous visited suballocation was free.
6715  bool prevFree = false;
6716 
6717  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6718  suballocItem != m_Suballocations.cend();
6719  ++suballocItem)
6720  {
6721  const VmaSuballocation& subAlloc = *suballocItem;
6722 
6723  // Actual offset of this suballocation doesn't match expected one.
6724  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6725 
6726  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6727  // Two adjacent free suballocations are invalid. They should be merged.
6728  VMA_VALIDATE(!prevFree || !currFree);
6729 
6730  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6731 
6732  if(currFree)
6733  {
6734  calculatedSumFreeSize += subAlloc.size;
6735  ++calculatedFreeCount;
6736  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6737  {
6738  ++freeSuballocationsToRegister;
6739  }
6740 
6741  // Margin required between allocations - every free space must be at least that large.
6742  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6743  }
6744  else
6745  {
6746  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6747  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6748 
6749  // Margin required between allocations - previous allocation must be free.
6750  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6751  }
6752 
6753  calculatedOffset += subAlloc.size;
6754  prevFree = currFree;
6755  }
6756 
6757  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6758  // match expected one.
6759  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6760 
6761  VkDeviceSize lastSize = 0;
6762  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6763  {
6764  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6765 
6766  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6767  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6768  // They must be sorted by size ascending.
6769  VMA_VALIDATE(suballocItem->size >= lastSize);
6770 
6771  lastSize = suballocItem->size;
6772  }
6773 
6774  // Check if totals match calculacted values.
6775  VMA_VALIDATE(ValidateFreeSuballocationList());
6776  VMA_VALIDATE(calculatedOffset == GetSize());
6777  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6778  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6779 
6780  return true;
6781 }
6782 
6783 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6784 {
6785  if(!m_FreeSuballocationsBySize.empty())
6786  {
6787  return m_FreeSuballocationsBySize.back()->size;
6788  }
6789  else
6790  {
6791  return 0;
6792  }
6793 }
6794 
6795 bool VmaBlockMetadata_Generic::IsEmpty() const
6796 {
6797  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6798 }
6799 
6800 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6801 {
6802  outInfo.blockCount = 1;
6803 
6804  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6805  outInfo.allocationCount = rangeCount - m_FreeCount;
6806  outInfo.unusedRangeCount = m_FreeCount;
6807 
6808  outInfo.unusedBytes = m_SumFreeSize;
6809  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6810 
6811  outInfo.allocationSizeMin = UINT64_MAX;
6812  outInfo.allocationSizeMax = 0;
6813  outInfo.unusedRangeSizeMin = UINT64_MAX;
6814  outInfo.unusedRangeSizeMax = 0;
6815 
6816  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6817  suballocItem != m_Suballocations.cend();
6818  ++suballocItem)
6819  {
6820  const VmaSuballocation& suballoc = *suballocItem;
6821  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6822  {
6823  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6824  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6825  }
6826  else
6827  {
6828  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6829  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6830  }
6831  }
6832 }
6833 
6834 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6835 {
6836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6837 
6838  inoutStats.size += GetSize();
6839  inoutStats.unusedSize += m_SumFreeSize;
6840  inoutStats.allocationCount += rangeCount - m_FreeCount;
6841  inoutStats.unusedRangeCount += m_FreeCount;
6842  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6843 }
6844 
6845 #if VMA_STATS_STRING_ENABLED
6846 
6847 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6848 {
6849  PrintDetailedMap_Begin(json,
6850  m_SumFreeSize, // unusedBytes
6851  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6852  m_FreeCount); // unusedRangeCount
6853 
6854  size_t i = 0;
6855  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6856  suballocItem != m_Suballocations.cend();
6857  ++suballocItem, ++i)
6858  {
6859  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6860  {
6861  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6862  }
6863  else
6864  {
6865  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6866  }
6867  }
6868 
6869  PrintDetailedMap_End(json);
6870 }
6871 
6872 #endif // #if VMA_STATS_STRING_ENABLED
6873 
6874 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6875  uint32_t currentFrameIndex,
6876  uint32_t frameInUseCount,
6877  VkDeviceSize bufferImageGranularity,
6878  VkDeviceSize allocSize,
6879  VkDeviceSize allocAlignment,
6880  bool upperAddress,
6881  VmaSuballocationType allocType,
6882  bool canMakeOtherLost,
6883  uint32_t strategy,
6884  VmaAllocationRequest* pAllocationRequest)
6885 {
6886  VMA_ASSERT(allocSize > 0);
6887  VMA_ASSERT(!upperAddress);
6888  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6889  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6890  VMA_HEAVY_ASSERT(Validate());
6891 
6892  // There is not enough total free space in this block to fullfill the request: Early return.
6893  if(canMakeOtherLost == false &&
6894  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6895  {
6896  return false;
6897  }
6898 
6899  // New algorithm, efficiently searching freeSuballocationsBySize.
6900  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6901  if(freeSuballocCount > 0)
6902  {
6904  {
6905  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6906  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6907  m_FreeSuballocationsBySize.data(),
6908  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6909  allocSize + 2 * VMA_DEBUG_MARGIN,
6910  VmaSuballocationItemSizeLess());
6911  size_t index = it - m_FreeSuballocationsBySize.data();
6912  for(; index < freeSuballocCount; ++index)
6913  {
6914  if(CheckAllocation(
6915  currentFrameIndex,
6916  frameInUseCount,
6917  bufferImageGranularity,
6918  allocSize,
6919  allocAlignment,
6920  allocType,
6921  m_FreeSuballocationsBySize[index],
6922  false, // canMakeOtherLost
6923  &pAllocationRequest->offset,
6924  &pAllocationRequest->itemsToMakeLostCount,
6925  &pAllocationRequest->sumFreeSize,
6926  &pAllocationRequest->sumItemSize))
6927  {
6928  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6929  return true;
6930  }
6931  }
6932  }
6933  else // WORST_FIT, FIRST_FIT
6934  {
6935  // Search staring from biggest suballocations.
6936  for(size_t index = freeSuballocCount; index--; )
6937  {
6938  if(CheckAllocation(
6939  currentFrameIndex,
6940  frameInUseCount,
6941  bufferImageGranularity,
6942  allocSize,
6943  allocAlignment,
6944  allocType,
6945  m_FreeSuballocationsBySize[index],
6946  false, // canMakeOtherLost
6947  &pAllocationRequest->offset,
6948  &pAllocationRequest->itemsToMakeLostCount,
6949  &pAllocationRequest->sumFreeSize,
6950  &pAllocationRequest->sumItemSize))
6951  {
6952  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6953  return true;
6954  }
6955  }
6956  }
6957  }
6958 
6959  if(canMakeOtherLost)
6960  {
6961  // Brute-force algorithm. TODO: Come up with something better.
6962 
6963  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6964  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6965 
6966  VmaAllocationRequest tmpAllocRequest = {};
6967  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6968  suballocIt != m_Suballocations.end();
6969  ++suballocIt)
6970  {
6971  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6972  suballocIt->hAllocation->CanBecomeLost())
6973  {
6974  if(CheckAllocation(
6975  currentFrameIndex,
6976  frameInUseCount,
6977  bufferImageGranularity,
6978  allocSize,
6979  allocAlignment,
6980  allocType,
6981  suballocIt,
6982  canMakeOtherLost,
6983  &tmpAllocRequest.offset,
6984  &tmpAllocRequest.itemsToMakeLostCount,
6985  &tmpAllocRequest.sumFreeSize,
6986  &tmpAllocRequest.sumItemSize))
6987  {
6988  tmpAllocRequest.item = suballocIt;
6989 
6990  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
6992  {
6993  *pAllocationRequest = tmpAllocRequest;
6994  }
6995  }
6996  }
6997  }
6998 
6999  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7000  {
7001  return true;
7002  }
7003  }
7004 
7005  return false;
7006 }
7007 
7008 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7009  uint32_t currentFrameIndex,
7010  uint32_t frameInUseCount,
7011  VmaAllocationRequest* pAllocationRequest)
7012 {
7013  while(pAllocationRequest->itemsToMakeLostCount > 0)
7014  {
7015  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7016  {
7017  ++pAllocationRequest->item;
7018  }
7019  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7020  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7021  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7022  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7023  {
7024  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7025  --pAllocationRequest->itemsToMakeLostCount;
7026  }
7027  else
7028  {
7029  return false;
7030  }
7031  }
7032 
7033  VMA_HEAVY_ASSERT(Validate());
7034  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7035  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7036 
7037  return true;
7038 }
7039 
7040 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7041 {
7042  uint32_t lostAllocationCount = 0;
7043  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7044  it != m_Suballocations.end();
7045  ++it)
7046  {
7047  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7048  it->hAllocation->CanBecomeLost() &&
7049  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7050  {
7051  it = FreeSuballocation(it);
7052  ++lostAllocationCount;
7053  }
7054  }
7055  return lostAllocationCount;
7056 }
7057 
7058 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7059 {
7060  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7061  it != m_Suballocations.end();
7062  ++it)
7063  {
7064  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7065  {
7066  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7067  {
7068  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7069  return VK_ERROR_VALIDATION_FAILED_EXT;
7070  }
7071  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7072  {
7073  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7074  return VK_ERROR_VALIDATION_FAILED_EXT;
7075  }
7076  }
7077  }
7078 
7079  return VK_SUCCESS;
7080 }
7081 
7082 void VmaBlockMetadata_Generic::Alloc(
7083  const VmaAllocationRequest& request,
7084  VmaSuballocationType type,
7085  VkDeviceSize allocSize,
7086  bool upperAddress,
7087  VmaAllocation hAllocation)
7088 {
7089  VMA_ASSERT(!upperAddress);
7090  VMA_ASSERT(request.item != m_Suballocations.end());
7091  VmaSuballocation& suballoc = *request.item;
7092  // Given suballocation is a free block.
7093  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7094  // Given offset is inside this suballocation.
7095  VMA_ASSERT(request.offset >= suballoc.offset);
7096  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7097  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7098  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7099 
7100  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7101  // it to become used.
7102  UnregisterFreeSuballocation(request.item);
7103 
7104  suballoc.offset = request.offset;
7105  suballoc.size = allocSize;
7106  suballoc.type = type;
7107  suballoc.hAllocation = hAllocation;
7108 
7109  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7110  if(paddingEnd)
7111  {
7112  VmaSuballocation paddingSuballoc = {};
7113  paddingSuballoc.offset = request.offset + allocSize;
7114  paddingSuballoc.size = paddingEnd;
7115  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7116  VmaSuballocationList::iterator next = request.item;
7117  ++next;
7118  const VmaSuballocationList::iterator paddingEndItem =
7119  m_Suballocations.insert(next, paddingSuballoc);
7120  RegisterFreeSuballocation(paddingEndItem);
7121  }
7122 
7123  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7124  if(paddingBegin)
7125  {
7126  VmaSuballocation paddingSuballoc = {};
7127  paddingSuballoc.offset = request.offset - paddingBegin;
7128  paddingSuballoc.size = paddingBegin;
7129  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7130  const VmaSuballocationList::iterator paddingBeginItem =
7131  m_Suballocations.insert(request.item, paddingSuballoc);
7132  RegisterFreeSuballocation(paddingBeginItem);
7133  }
7134 
7135  // Update totals.
7136  m_FreeCount = m_FreeCount - 1;
7137  if(paddingBegin > 0)
7138  {
7139  ++m_FreeCount;
7140  }
7141  if(paddingEnd > 0)
7142  {
7143  ++m_FreeCount;
7144  }
7145  m_SumFreeSize -= allocSize;
7146 }
7147 
7148 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7149 {
7150  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7151  suballocItem != m_Suballocations.end();
7152  ++suballocItem)
7153  {
7154  VmaSuballocation& suballoc = *suballocItem;
7155  if(suballoc.hAllocation == allocation)
7156  {
7157  FreeSuballocation(suballocItem);
7158  VMA_HEAVY_ASSERT(Validate());
7159  return;
7160  }
7161  }
7162  VMA_ASSERT(0 && "Not found!");
7163 }
7164 
7165 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7166 {
7167  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7168  suballocItem != m_Suballocations.end();
7169  ++suballocItem)
7170  {
7171  VmaSuballocation& suballoc = *suballocItem;
7172  if(suballoc.offset == offset)
7173  {
7174  FreeSuballocation(suballocItem);
7175  return;
7176  }
7177  }
7178  VMA_ASSERT(0 && "Not found!");
7179 }
7180 
7181 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7182 {
7183  VkDeviceSize lastSize = 0;
7184  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7185  {
7186  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7187 
7188  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7189  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7190  VMA_VALIDATE(it->size >= lastSize);
7191  lastSize = it->size;
7192  }
7193  return true;
7194 }
7195 
7196 bool VmaBlockMetadata_Generic::CheckAllocation(
7197  uint32_t currentFrameIndex,
7198  uint32_t frameInUseCount,
7199  VkDeviceSize bufferImageGranularity,
7200  VkDeviceSize allocSize,
7201  VkDeviceSize allocAlignment,
7202  VmaSuballocationType allocType,
7203  VmaSuballocationList::const_iterator suballocItem,
7204  bool canMakeOtherLost,
7205  VkDeviceSize* pOffset,
7206  size_t* itemsToMakeLostCount,
7207  VkDeviceSize* pSumFreeSize,
7208  VkDeviceSize* pSumItemSize) const
7209 {
7210  VMA_ASSERT(allocSize > 0);
7211  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7212  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7213  VMA_ASSERT(pOffset != VMA_NULL);
7214 
7215  *itemsToMakeLostCount = 0;
7216  *pSumFreeSize = 0;
7217  *pSumItemSize = 0;
7218 
7219  if(canMakeOtherLost)
7220  {
7221  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7222  {
7223  *pSumFreeSize = suballocItem->size;
7224  }
7225  else
7226  {
7227  if(suballocItem->hAllocation->CanBecomeLost() &&
7228  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7229  {
7230  ++*itemsToMakeLostCount;
7231  *pSumItemSize = suballocItem->size;
7232  }
7233  else
7234  {
7235  return false;
7236  }
7237  }
7238 
7239  // Remaining size is too small for this request: Early return.
7240  if(GetSize() - suballocItem->offset < allocSize)
7241  {
7242  return false;
7243  }
7244 
7245  // Start from offset equal to beginning of this suballocation.
7246  *pOffset = suballocItem->offset;
7247 
7248  // Apply VMA_DEBUG_MARGIN at the beginning.
7249  if(VMA_DEBUG_MARGIN > 0)
7250  {
7251  *pOffset += VMA_DEBUG_MARGIN;
7252  }
7253 
7254  // Apply alignment.
7255  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7256 
7257  // Check previous suballocations for BufferImageGranularity conflicts.
7258  // Make bigger alignment if necessary.
7259  if(bufferImageGranularity > 1)
7260  {
7261  bool bufferImageGranularityConflict = false;
7262  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7263  while(prevSuballocItem != m_Suballocations.cbegin())
7264  {
7265  --prevSuballocItem;
7266  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7267  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7268  {
7269  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7270  {
7271  bufferImageGranularityConflict = true;
7272  break;
7273  }
7274  }
7275  else
7276  // Already on previous page.
7277  break;
7278  }
7279  if(bufferImageGranularityConflict)
7280  {
7281  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7282  }
7283  }
7284 
7285  // Now that we have final *pOffset, check if we are past suballocItem.
7286  // If yes, return false - this function should be called for another suballocItem as starting point.
7287  if(*pOffset >= suballocItem->offset + suballocItem->size)
7288  {
7289  return false;
7290  }
7291 
7292  // Calculate padding at the beginning based on current offset.
7293  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7294 
7295  // Calculate required margin at the end.
7296  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7297 
7298  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7299  // Another early return check.
7300  if(suballocItem->offset + totalSize > GetSize())
7301  {
7302  return false;
7303  }
7304 
7305  // Advance lastSuballocItem until desired size is reached.
7306  // Update itemsToMakeLostCount.
7307  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7308  if(totalSize > suballocItem->size)
7309  {
7310  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7311  while(remainingSize > 0)
7312  {
7313  ++lastSuballocItem;
7314  if(lastSuballocItem == m_Suballocations.cend())
7315  {
7316  return false;
7317  }
7318  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7319  {
7320  *pSumFreeSize += lastSuballocItem->size;
7321  }
7322  else
7323  {
7324  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7325  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7326  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7327  {
7328  ++*itemsToMakeLostCount;
7329  *pSumItemSize += lastSuballocItem->size;
7330  }
7331  else
7332  {
7333  return false;
7334  }
7335  }
7336  remainingSize = (lastSuballocItem->size < remainingSize) ?
7337  remainingSize - lastSuballocItem->size : 0;
7338  }
7339  }
7340 
7341  // Check next suballocations for BufferImageGranularity conflicts.
7342  // If conflict exists, we must mark more allocations lost or fail.
7343  if(bufferImageGranularity > 1)
7344  {
7345  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7346  ++nextSuballocItem;
7347  while(nextSuballocItem != m_Suballocations.cend())
7348  {
7349  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7350  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7351  {
7352  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7353  {
7354  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7355  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7356  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7357  {
7358  ++*itemsToMakeLostCount;
7359  }
7360  else
7361  {
7362  return false;
7363  }
7364  }
7365  }
7366  else
7367  {
7368  // Already on next page.
7369  break;
7370  }
7371  ++nextSuballocItem;
7372  }
7373  }
7374  }
7375  else
7376  {
7377  const VmaSuballocation& suballoc = *suballocItem;
7378  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7379 
7380  *pSumFreeSize = suballoc.size;
7381 
7382  // Size of this suballocation is too small for this request: Early return.
7383  if(suballoc.size < allocSize)
7384  {
7385  return false;
7386  }
7387 
7388  // Start from offset equal to beginning of this suballocation.
7389  *pOffset = suballoc.offset;
7390 
7391  // Apply VMA_DEBUG_MARGIN at the beginning.
7392  if(VMA_DEBUG_MARGIN > 0)
7393  {
7394  *pOffset += VMA_DEBUG_MARGIN;
7395  }
7396 
7397  // Apply alignment.
7398  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7399 
7400  // Check previous suballocations for BufferImageGranularity conflicts.
7401  // Make bigger alignment if necessary.
7402  if(bufferImageGranularity > 1)
7403  {
7404  bool bufferImageGranularityConflict = false;
7405  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7406  while(prevSuballocItem != m_Suballocations.cbegin())
7407  {
7408  --prevSuballocItem;
7409  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7410  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7411  {
7412  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7413  {
7414  bufferImageGranularityConflict = true;
7415  break;
7416  }
7417  }
7418  else
7419  // Already on previous page.
7420  break;
7421  }
7422  if(bufferImageGranularityConflict)
7423  {
7424  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7425  }
7426  }
7427 
7428  // Calculate padding at the beginning based on current offset.
7429  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7430 
7431  // Calculate required margin at the end.
7432  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7433 
7434  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7435  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7436  {
7437  return false;
7438  }
7439 
7440  // Check next suballocations for BufferImageGranularity conflicts.
7441  // If conflict exists, allocation cannot be made here.
7442  if(bufferImageGranularity > 1)
7443  {
7444  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7445  ++nextSuballocItem;
7446  while(nextSuballocItem != m_Suballocations.cend())
7447  {
7448  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7449  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7450  {
7451  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7452  {
7453  return false;
7454  }
7455  }
7456  else
7457  {
7458  // Already on next page.
7459  break;
7460  }
7461  ++nextSuballocItem;
7462  }
7463  }
7464  }
7465 
7466  // All tests passed: Success. pOffset is already filled.
7467  return true;
7468 }
7469 
7470 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7471 {
7472  VMA_ASSERT(item != m_Suballocations.end());
7473  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7474 
7475  VmaSuballocationList::iterator nextItem = item;
7476  ++nextItem;
7477  VMA_ASSERT(nextItem != m_Suballocations.end());
7478  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7479 
7480  item->size += nextItem->size;
7481  --m_FreeCount;
7482  m_Suballocations.erase(nextItem);
7483 }
7484 
7485 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7486 {
7487  // Change this suballocation to be marked as free.
7488  VmaSuballocation& suballoc = *suballocItem;
7489  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7490  suballoc.hAllocation = VK_NULL_HANDLE;
7491 
7492  // Update totals.
7493  ++m_FreeCount;
7494  m_SumFreeSize += suballoc.size;
7495 
7496  // Merge with previous and/or next suballocation if it's also free.
7497  bool mergeWithNext = false;
7498  bool mergeWithPrev = false;
7499 
7500  VmaSuballocationList::iterator nextItem = suballocItem;
7501  ++nextItem;
7502  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7503  {
7504  mergeWithNext = true;
7505  }
7506 
7507  VmaSuballocationList::iterator prevItem = suballocItem;
7508  if(suballocItem != m_Suballocations.begin())
7509  {
7510  --prevItem;
7511  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7512  {
7513  mergeWithPrev = true;
7514  }
7515  }
7516 
7517  if(mergeWithNext)
7518  {
7519  UnregisterFreeSuballocation(nextItem);
7520  MergeFreeWithNext(suballocItem);
7521  }
7522 
7523  if(mergeWithPrev)
7524  {
7525  UnregisterFreeSuballocation(prevItem);
7526  MergeFreeWithNext(prevItem);
7527  RegisterFreeSuballocation(prevItem);
7528  return prevItem;
7529  }
7530  else
7531  {
7532  RegisterFreeSuballocation(suballocItem);
7533  return suballocItem;
7534  }
7535 }
7536 
7537 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7538 {
7539  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7540  VMA_ASSERT(item->size > 0);
7541 
7542  // You may want to enable this validation at the beginning or at the end of
7543  // this function, depending on what do you want to check.
7544  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7545 
7546  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7547  {
7548  if(m_FreeSuballocationsBySize.empty())
7549  {
7550  m_FreeSuballocationsBySize.push_back(item);
7551  }
7552  else
7553  {
7554  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7555  }
7556  }
7557 
7558  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7559 }
7560 
7561 
7562 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7563 {
7564  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7565  VMA_ASSERT(item->size > 0);
7566 
7567  // You may want to enable this validation at the beginning or at the end of
7568  // this function, depending on what do you want to check.
7569  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7570 
7571  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7572  {
7573  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7574  m_FreeSuballocationsBySize.data(),
7575  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7576  item,
7577  VmaSuballocationItemSizeLess());
7578  for(size_t index = it - m_FreeSuballocationsBySize.data();
7579  index < m_FreeSuballocationsBySize.size();
7580  ++index)
7581  {
7582  if(m_FreeSuballocationsBySize[index] == item)
7583  {
7584  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7585  return;
7586  }
7587  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7588  }
7589  VMA_ASSERT(0 && "Not found.");
7590  }
7591 
7592  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7593 }
7594 
7596 // class VmaBlockMetadata_Linear
7597 
7598 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7599  VmaBlockMetadata(hAllocator),
7600  m_SumFreeSize(0),
7601  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7602  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7603  m_1stVectorIndex(0),
7604  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7605  m_1stNullItemsBeginCount(0),
7606  m_1stNullItemsMiddleCount(0),
7607  m_2ndNullItemsCount(0)
7608 {
7609 }
7610 
7611 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7612 {
7613 }
7614 
7615 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7616 {
7617  VmaBlockMetadata::Init(size);
7618  m_SumFreeSize = size;
7619 }
7620 
7621 bool VmaBlockMetadata_Linear::Validate() const
7622 {
7623  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7624  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7625 
7626  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7627  VMA_VALIDATE(!suballocations1st.empty() ||
7628  suballocations2nd.empty() ||
7629  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7630 
7631  if(!suballocations1st.empty())
7632  {
7633  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7634  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7635  // Null item at the end should be just pop_back().
7636  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7637  }
7638  if(!suballocations2nd.empty())
7639  {
7640  // Null item at the end should be just pop_back().
7641  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7642  }
7643 
7644  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7645  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7646 
7647  VkDeviceSize sumUsedSize = 0;
7648  const size_t suballoc1stCount = suballocations1st.size();
7649  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7650 
7651  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7652  {
7653  const size_t suballoc2ndCount = suballocations2nd.size();
7654  size_t nullItem2ndCount = 0;
7655  for(size_t i = 0; i < suballoc2ndCount; ++i)
7656  {
7657  const VmaSuballocation& suballoc = suballocations2nd[i];
7658  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7659 
7660  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7661  VMA_VALIDATE(suballoc.offset >= offset);
7662 
7663  if(!currFree)
7664  {
7665  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7666  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7667  sumUsedSize += suballoc.size;
7668  }
7669  else
7670  {
7671  ++nullItem2ndCount;
7672  }
7673 
7674  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7675  }
7676 
7677  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7678  }
7679 
7680  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7681  {
7682  const VmaSuballocation& suballoc = suballocations1st[i];
7683  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7684  suballoc.hAllocation == VK_NULL_HANDLE);
7685  }
7686 
7687  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7688 
7689  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7690  {
7691  const VmaSuballocation& suballoc = suballocations1st[i];
7692  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7693 
7694  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7695  VMA_VALIDATE(suballoc.offset >= offset);
7696  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7697 
7698  if(!currFree)
7699  {
7700  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7701  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7702  sumUsedSize += suballoc.size;
7703  }
7704  else
7705  {
7706  ++nullItem1stCount;
7707  }
7708 
7709  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7710  }
7711  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7712 
7713  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7714  {
7715  const size_t suballoc2ndCount = suballocations2nd.size();
7716  size_t nullItem2ndCount = 0;
7717  for(size_t i = suballoc2ndCount; i--; )
7718  {
7719  const VmaSuballocation& suballoc = suballocations2nd[i];
7720  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7721 
7722  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7723  VMA_VALIDATE(suballoc.offset >= offset);
7724 
7725  if(!currFree)
7726  {
7727  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7728  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7729  sumUsedSize += suballoc.size;
7730  }
7731  else
7732  {
7733  ++nullItem2ndCount;
7734  }
7735 
7736  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7737  }
7738 
7739  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7740  }
7741 
7742  VMA_VALIDATE(offset <= GetSize());
7743  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7744 
7745  return true;
7746 }
7747 
7748 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7749 {
7750  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7751  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7752 }
7753 
7754 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7755 {
7756  const VkDeviceSize size = GetSize();
7757 
7758  /*
7759  We don't consider gaps inside allocation vectors with freed allocations because
7760  they are not suitable for reuse in linear allocator. We consider only space that
7761  is available for new allocations.
7762  */
7763  if(IsEmpty())
7764  {
7765  return size;
7766  }
7767 
7768  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7769 
7770  switch(m_2ndVectorMode)
7771  {
7772  case SECOND_VECTOR_EMPTY:
7773  /*
7774  Available space is after end of 1st, as well as before beginning of 1st (which
7775  whould make it a ring buffer).
7776  */
7777  {
7778  const size_t suballocations1stCount = suballocations1st.size();
7779  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7780  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7781  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7782  return VMA_MAX(
7783  firstSuballoc.offset,
7784  size - (lastSuballoc.offset + lastSuballoc.size));
7785  }
7786  break;
7787 
7788  case SECOND_VECTOR_RING_BUFFER:
7789  /*
7790  Available space is only between end of 2nd and beginning of 1st.
7791  */
7792  {
7793  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7794  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7795  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7796  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7797  }
7798  break;
7799 
7800  case SECOND_VECTOR_DOUBLE_STACK:
7801  /*
7802  Available space is only between end of 1st and top of 2nd.
7803  */
7804  {
7805  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7806  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7807  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7808  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7809  }
7810  break;
7811 
7812  default:
7813  VMA_ASSERT(0);
7814  return 0;
7815  }
7816 }
7817 
7818 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7819 {
7820  const VkDeviceSize size = GetSize();
7821  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7822  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7823  const size_t suballoc1stCount = suballocations1st.size();
7824  const size_t suballoc2ndCount = suballocations2nd.size();
7825 
7826  outInfo.blockCount = 1;
7827  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7828  outInfo.unusedRangeCount = 0;
7829  outInfo.usedBytes = 0;
7830  outInfo.allocationSizeMin = UINT64_MAX;
7831  outInfo.allocationSizeMax = 0;
7832  outInfo.unusedRangeSizeMin = UINT64_MAX;
7833  outInfo.unusedRangeSizeMax = 0;
7834 
7835  VkDeviceSize lastOffset = 0;
7836 
7837  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7838  {
7839  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7840  size_t nextAlloc2ndIndex = 0;
7841  while(lastOffset < freeSpace2ndTo1stEnd)
7842  {
7843  // Find next non-null allocation or move nextAllocIndex to the end.
7844  while(nextAlloc2ndIndex < suballoc2ndCount &&
7845  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7846  {
7847  ++nextAlloc2ndIndex;
7848  }
7849 
7850  // Found non-null allocation.
7851  if(nextAlloc2ndIndex < suballoc2ndCount)
7852  {
7853  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7854 
7855  // 1. Process free space before this allocation.
7856  if(lastOffset < suballoc.offset)
7857  {
7858  // There is free space from lastOffset to suballoc.offset.
7859  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7860  ++outInfo.unusedRangeCount;
7861  outInfo.unusedBytes += unusedRangeSize;
7862  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7863  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7864  }
7865 
7866  // 2. Process this allocation.
7867  // There is allocation with suballoc.offset, suballoc.size.
7868  outInfo.usedBytes += suballoc.size;
7869  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7870  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7871 
7872  // 3. Prepare for next iteration.
7873  lastOffset = suballoc.offset + suballoc.size;
7874  ++nextAlloc2ndIndex;
7875  }
7876  // We are at the end.
7877  else
7878  {
7879  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7880  if(lastOffset < freeSpace2ndTo1stEnd)
7881  {
7882  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7883  ++outInfo.unusedRangeCount;
7884  outInfo.unusedBytes += unusedRangeSize;
7885  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7886  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7887  }
7888 
7889  // End of loop.
7890  lastOffset = freeSpace2ndTo1stEnd;
7891  }
7892  }
7893  }
7894 
7895  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7896  const VkDeviceSize freeSpace1stTo2ndEnd =
7897  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7898  while(lastOffset < freeSpace1stTo2ndEnd)
7899  {
7900  // Find next non-null allocation or move nextAllocIndex to the end.
7901  while(nextAlloc1stIndex < suballoc1stCount &&
7902  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7903  {
7904  ++nextAlloc1stIndex;
7905  }
7906 
7907  // Found non-null allocation.
7908  if(nextAlloc1stIndex < suballoc1stCount)
7909  {
7910  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7911 
7912  // 1. Process free space before this allocation.
7913  if(lastOffset < suballoc.offset)
7914  {
7915  // There is free space from lastOffset to suballoc.offset.
7916  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7917  ++outInfo.unusedRangeCount;
7918  outInfo.unusedBytes += unusedRangeSize;
7919  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7920  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7921  }
7922 
7923  // 2. Process this allocation.
7924  // There is allocation with suballoc.offset, suballoc.size.
7925  outInfo.usedBytes += suballoc.size;
7926  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7927  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7928 
7929  // 3. Prepare for next iteration.
7930  lastOffset = suballoc.offset + suballoc.size;
7931  ++nextAlloc1stIndex;
7932  }
7933  // We are at the end.
7934  else
7935  {
7936  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7937  if(lastOffset < freeSpace1stTo2ndEnd)
7938  {
7939  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7940  ++outInfo.unusedRangeCount;
7941  outInfo.unusedBytes += unusedRangeSize;
7942  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7943  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7944  }
7945 
7946  // End of loop.
7947  lastOffset = freeSpace1stTo2ndEnd;
7948  }
7949  }
7950 
7951  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7952  {
7953  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7954  while(lastOffset < size)
7955  {
7956  // Find next non-null allocation or move nextAllocIndex to the end.
7957  while(nextAlloc2ndIndex != SIZE_MAX &&
7958  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7959  {
7960  --nextAlloc2ndIndex;
7961  }
7962 
7963  // Found non-null allocation.
7964  if(nextAlloc2ndIndex != SIZE_MAX)
7965  {
7966  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7967 
7968  // 1. Process free space before this allocation.
7969  if(lastOffset < suballoc.offset)
7970  {
7971  // There is free space from lastOffset to suballoc.offset.
7972  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7973  ++outInfo.unusedRangeCount;
7974  outInfo.unusedBytes += unusedRangeSize;
7975  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7976  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7977  }
7978 
7979  // 2. Process this allocation.
7980  // There is allocation with suballoc.offset, suballoc.size.
7981  outInfo.usedBytes += suballoc.size;
7982  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7983  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7984 
7985  // 3. Prepare for next iteration.
7986  lastOffset = suballoc.offset + suballoc.size;
7987  --nextAlloc2ndIndex;
7988  }
7989  // We are at the end.
7990  else
7991  {
7992  // There is free space from lastOffset to size.
7993  if(lastOffset < size)
7994  {
7995  const VkDeviceSize unusedRangeSize = size - lastOffset;
7996  ++outInfo.unusedRangeCount;
7997  outInfo.unusedBytes += unusedRangeSize;
7998  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7999  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8000  }
8001 
8002  // End of loop.
8003  lastOffset = size;
8004  }
8005  }
8006  }
8007 
8008  outInfo.unusedBytes = size - outInfo.usedBytes;
8009 }
8010 
8011 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8012 {
8013  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8014  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8015  const VkDeviceSize size = GetSize();
8016  const size_t suballoc1stCount = suballocations1st.size();
8017  const size_t suballoc2ndCount = suballocations2nd.size();
8018 
8019  inoutStats.size += size;
8020 
8021  VkDeviceSize lastOffset = 0;
8022 
8023  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8024  {
8025  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8026  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8027  while(lastOffset < freeSpace2ndTo1stEnd)
8028  {
8029  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8030  while(nextAlloc2ndIndex < suballoc2ndCount &&
8031  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8032  {
8033  ++nextAlloc2ndIndex;
8034  }
8035 
8036  // Found non-null allocation.
8037  if(nextAlloc2ndIndex < suballoc2ndCount)
8038  {
8039  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8040 
8041  // 1. Process free space before this allocation.
8042  if(lastOffset < suballoc.offset)
8043  {
8044  // There is free space from lastOffset to suballoc.offset.
8045  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8046  inoutStats.unusedSize += unusedRangeSize;
8047  ++inoutStats.unusedRangeCount;
8048  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8049  }
8050 
8051  // 2. Process this allocation.
8052  // There is allocation with suballoc.offset, suballoc.size.
8053  ++inoutStats.allocationCount;
8054 
8055  // 3. Prepare for next iteration.
8056  lastOffset = suballoc.offset + suballoc.size;
8057  ++nextAlloc2ndIndex;
8058  }
8059  // We are at the end.
8060  else
8061  {
8062  if(lastOffset < freeSpace2ndTo1stEnd)
8063  {
8064  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8065  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8066  inoutStats.unusedSize += unusedRangeSize;
8067  ++inoutStats.unusedRangeCount;
8068  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8069  }
8070 
8071  // End of loop.
8072  lastOffset = freeSpace2ndTo1stEnd;
8073  }
8074  }
8075  }
8076 
8077  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8078  const VkDeviceSize freeSpace1stTo2ndEnd =
8079  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8080  while(lastOffset < freeSpace1stTo2ndEnd)
8081  {
8082  // Find next non-null allocation or move nextAllocIndex to the end.
8083  while(nextAlloc1stIndex < suballoc1stCount &&
8084  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8085  {
8086  ++nextAlloc1stIndex;
8087  }
8088 
8089  // Found non-null allocation.
8090  if(nextAlloc1stIndex < suballoc1stCount)
8091  {
8092  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8093 
8094  // 1. Process free space before this allocation.
8095  if(lastOffset < suballoc.offset)
8096  {
8097  // There is free space from lastOffset to suballoc.offset.
8098  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8099  inoutStats.unusedSize += unusedRangeSize;
8100  ++inoutStats.unusedRangeCount;
8101  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8102  }
8103 
8104  // 2. Process this allocation.
8105  // There is allocation with suballoc.offset, suballoc.size.
8106  ++inoutStats.allocationCount;
8107 
8108  // 3. Prepare for next iteration.
8109  lastOffset = suballoc.offset + suballoc.size;
8110  ++nextAlloc1stIndex;
8111  }
8112  // We are at the end.
8113  else
8114  {
8115  if(lastOffset < freeSpace1stTo2ndEnd)
8116  {
8117  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8118  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8119  inoutStats.unusedSize += unusedRangeSize;
8120  ++inoutStats.unusedRangeCount;
8121  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8122  }
8123 
8124  // End of loop.
8125  lastOffset = freeSpace1stTo2ndEnd;
8126  }
8127  }
8128 
8129  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8130  {
8131  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8132  while(lastOffset < size)
8133  {
8134  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8135  while(nextAlloc2ndIndex != SIZE_MAX &&
8136  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8137  {
8138  --nextAlloc2ndIndex;
8139  }
8140 
8141  // Found non-null allocation.
8142  if(nextAlloc2ndIndex != SIZE_MAX)
8143  {
8144  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8145 
8146  // 1. Process free space before this allocation.
8147  if(lastOffset < suballoc.offset)
8148  {
8149  // There is free space from lastOffset to suballoc.offset.
8150  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8151  inoutStats.unusedSize += unusedRangeSize;
8152  ++inoutStats.unusedRangeCount;
8153  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8154  }
8155 
8156  // 2. Process this allocation.
8157  // There is allocation with suballoc.offset, suballoc.size.
8158  ++inoutStats.allocationCount;
8159 
8160  // 3. Prepare for next iteration.
8161  lastOffset = suballoc.offset + suballoc.size;
8162  --nextAlloc2ndIndex;
8163  }
8164  // We are at the end.
8165  else
8166  {
8167  if(lastOffset < size)
8168  {
8169  // There is free space from lastOffset to size.
8170  const VkDeviceSize unusedRangeSize = size - lastOffset;
8171  inoutStats.unusedSize += unusedRangeSize;
8172  ++inoutStats.unusedRangeCount;
8173  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8174  }
8175 
8176  // End of loop.
8177  lastOffset = size;
8178  }
8179  }
8180  }
8181 }
8182 
8183 #if VMA_STATS_STRING_ENABLED
8184 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8185 {
8186  const VkDeviceSize size = GetSize();
8187  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8188  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8189  const size_t suballoc1stCount = suballocations1st.size();
8190  const size_t suballoc2ndCount = suballocations2nd.size();
8191 
8192  // FIRST PASS
8193 
8194  size_t unusedRangeCount = 0;
8195  VkDeviceSize usedBytes = 0;
8196 
8197  VkDeviceSize lastOffset = 0;
8198 
8199  size_t alloc2ndCount = 0;
8200  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8201  {
8202  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8203  size_t nextAlloc2ndIndex = 0;
8204  while(lastOffset < freeSpace2ndTo1stEnd)
8205  {
8206  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8207  while(nextAlloc2ndIndex < suballoc2ndCount &&
8208  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8209  {
8210  ++nextAlloc2ndIndex;
8211  }
8212 
8213  // Found non-null allocation.
8214  if(nextAlloc2ndIndex < suballoc2ndCount)
8215  {
8216  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8217 
8218  // 1. Process free space before this allocation.
8219  if(lastOffset < suballoc.offset)
8220  {
8221  // There is free space from lastOffset to suballoc.offset.
8222  ++unusedRangeCount;
8223  }
8224 
8225  // 2. Process this allocation.
8226  // There is allocation with suballoc.offset, suballoc.size.
8227  ++alloc2ndCount;
8228  usedBytes += suballoc.size;
8229 
8230  // 3. Prepare for next iteration.
8231  lastOffset = suballoc.offset + suballoc.size;
8232  ++nextAlloc2ndIndex;
8233  }
8234  // We are at the end.
8235  else
8236  {
8237  if(lastOffset < freeSpace2ndTo1stEnd)
8238  {
8239  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8240  ++unusedRangeCount;
8241  }
8242 
8243  // End of loop.
8244  lastOffset = freeSpace2ndTo1stEnd;
8245  }
8246  }
8247  }
8248 
8249  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8250  size_t alloc1stCount = 0;
8251  const VkDeviceSize freeSpace1stTo2ndEnd =
8252  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8253  while(lastOffset < freeSpace1stTo2ndEnd)
8254  {
8255  // Find next non-null allocation or move nextAllocIndex to the end.
8256  while(nextAlloc1stIndex < suballoc1stCount &&
8257  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8258  {
8259  ++nextAlloc1stIndex;
8260  }
8261 
8262  // Found non-null allocation.
8263  if(nextAlloc1stIndex < suballoc1stCount)
8264  {
8265  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8266 
8267  // 1. Process free space before this allocation.
8268  if(lastOffset < suballoc.offset)
8269  {
8270  // There is free space from lastOffset to suballoc.offset.
8271  ++unusedRangeCount;
8272  }
8273 
8274  // 2. Process this allocation.
8275  // There is allocation with suballoc.offset, suballoc.size.
8276  ++alloc1stCount;
8277  usedBytes += suballoc.size;
8278 
8279  // 3. Prepare for next iteration.
8280  lastOffset = suballoc.offset + suballoc.size;
8281  ++nextAlloc1stIndex;
8282  }
8283  // We are at the end.
8284  else
8285  {
8286  if(lastOffset < size)
8287  {
8288  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8289  ++unusedRangeCount;
8290  }
8291 
8292  // End of loop.
8293  lastOffset = freeSpace1stTo2ndEnd;
8294  }
8295  }
8296 
8297  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8298  {
8299  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8300  while(lastOffset < size)
8301  {
8302  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8303  while(nextAlloc2ndIndex != SIZE_MAX &&
8304  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8305  {
8306  --nextAlloc2ndIndex;
8307  }
8308 
8309  // Found non-null allocation.
8310  if(nextAlloc2ndIndex != SIZE_MAX)
8311  {
8312  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8313 
8314  // 1. Process free space before this allocation.
8315  if(lastOffset < suballoc.offset)
8316  {
8317  // There is free space from lastOffset to suballoc.offset.
8318  ++unusedRangeCount;
8319  }
8320 
8321  // 2. Process this allocation.
8322  // There is allocation with suballoc.offset, suballoc.size.
8323  ++alloc2ndCount;
8324  usedBytes += suballoc.size;
8325 
8326  // 3. Prepare for next iteration.
8327  lastOffset = suballoc.offset + suballoc.size;
8328  --nextAlloc2ndIndex;
8329  }
8330  // We are at the end.
8331  else
8332  {
8333  if(lastOffset < size)
8334  {
8335  // There is free space from lastOffset to size.
8336  ++unusedRangeCount;
8337  }
8338 
8339  // End of loop.
8340  lastOffset = size;
8341  }
8342  }
8343  }
8344 
8345  const VkDeviceSize unusedBytes = size - usedBytes;
8346  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8347 
8348  // SECOND PASS
8349  lastOffset = 0;
8350 
8351  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8352  {
8353  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8354  size_t nextAlloc2ndIndex = 0;
8355  while(lastOffset < freeSpace2ndTo1stEnd)
8356  {
8357  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8358  while(nextAlloc2ndIndex < suballoc2ndCount &&
8359  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8360  {
8361  ++nextAlloc2ndIndex;
8362  }
8363 
8364  // Found non-null allocation.
8365  if(nextAlloc2ndIndex < suballoc2ndCount)
8366  {
8367  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8368 
8369  // 1. Process free space before this allocation.
8370  if(lastOffset < suballoc.offset)
8371  {
8372  // There is free space from lastOffset to suballoc.offset.
8373  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8374  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8375  }
8376 
8377  // 2. Process this allocation.
8378  // There is allocation with suballoc.offset, suballoc.size.
8379  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8380 
8381  // 3. Prepare for next iteration.
8382  lastOffset = suballoc.offset + suballoc.size;
8383  ++nextAlloc2ndIndex;
8384  }
8385  // We are at the end.
8386  else
8387  {
8388  if(lastOffset < freeSpace2ndTo1stEnd)
8389  {
8390  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8391  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8392  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8393  }
8394 
8395  // End of loop.
8396  lastOffset = freeSpace2ndTo1stEnd;
8397  }
8398  }
8399  }
8400 
8401  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8402  while(lastOffset < freeSpace1stTo2ndEnd)
8403  {
8404  // Find next non-null allocation or move nextAllocIndex to the end.
8405  while(nextAlloc1stIndex < suballoc1stCount &&
8406  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8407  {
8408  ++nextAlloc1stIndex;
8409  }
8410 
8411  // Found non-null allocation.
8412  if(nextAlloc1stIndex < suballoc1stCount)
8413  {
8414  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8415 
8416  // 1. Process free space before this allocation.
8417  if(lastOffset < suballoc.offset)
8418  {
8419  // There is free space from lastOffset to suballoc.offset.
8420  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8421  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8422  }
8423 
8424  // 2. Process this allocation.
8425  // There is allocation with suballoc.offset, suballoc.size.
8426  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8427 
8428  // 3. Prepare for next iteration.
8429  lastOffset = suballoc.offset + suballoc.size;
8430  ++nextAlloc1stIndex;
8431  }
8432  // We are at the end.
8433  else
8434  {
8435  if(lastOffset < freeSpace1stTo2ndEnd)
8436  {
8437  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8438  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8439  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8440  }
8441 
8442  // End of loop.
8443  lastOffset = freeSpace1stTo2ndEnd;
8444  }
8445  }
8446 
8447  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8448  {
8449  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8450  while(lastOffset < size)
8451  {
8452  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8453  while(nextAlloc2ndIndex != SIZE_MAX &&
8454  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8455  {
8456  --nextAlloc2ndIndex;
8457  }
8458 
8459  // Found non-null allocation.
8460  if(nextAlloc2ndIndex != SIZE_MAX)
8461  {
8462  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8463 
8464  // 1. Process free space before this allocation.
8465  if(lastOffset < suballoc.offset)
8466  {
8467  // There is free space from lastOffset to suballoc.offset.
8468  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8469  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8470  }
8471 
8472  // 2. Process this allocation.
8473  // There is allocation with suballoc.offset, suballoc.size.
8474  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8475 
8476  // 3. Prepare for next iteration.
8477  lastOffset = suballoc.offset + suballoc.size;
8478  --nextAlloc2ndIndex;
8479  }
8480  // We are at the end.
8481  else
8482  {
8483  if(lastOffset < size)
8484  {
8485  // There is free space from lastOffset to size.
8486  const VkDeviceSize unusedRangeSize = size - lastOffset;
8487  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8488  }
8489 
8490  // End of loop.
8491  lastOffset = size;
8492  }
8493  }
8494  }
8495 
8496  PrintDetailedMap_End(json);
8497 }
8498 #endif // #if VMA_STATS_STRING_ENABLED
8499 
8500 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8501  uint32_t currentFrameIndex,
8502  uint32_t frameInUseCount,
8503  VkDeviceSize bufferImageGranularity,
8504  VkDeviceSize allocSize,
8505  VkDeviceSize allocAlignment,
8506  bool upperAddress,
8507  VmaSuballocationType allocType,
8508  bool canMakeOtherLost,
8509  uint32_t strategy,
8510  VmaAllocationRequest* pAllocationRequest)
8511 {
8512  VMA_ASSERT(allocSize > 0);
8513  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8514  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8515  VMA_HEAVY_ASSERT(Validate());
8516 
8517  const VkDeviceSize size = GetSize();
8518  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8519  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8520 
8521  if(upperAddress)
8522  {
8523  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8524  {
8525  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8526  return false;
8527  }
8528 
8529  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8530  if(allocSize > size)
8531  {
8532  return false;
8533  }
8534  VkDeviceSize resultBaseOffset = size - allocSize;
8535  if(!suballocations2nd.empty())
8536  {
8537  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8538  resultBaseOffset = lastSuballoc.offset - allocSize;
8539  if(allocSize > lastSuballoc.offset)
8540  {
8541  return false;
8542  }
8543  }
8544 
8545  // Start from offset equal to end of free space.
8546  VkDeviceSize resultOffset = resultBaseOffset;
8547 
8548  // Apply VMA_DEBUG_MARGIN at the end.
8549  if(VMA_DEBUG_MARGIN > 0)
8550  {
8551  if(resultOffset < VMA_DEBUG_MARGIN)
8552  {
8553  return false;
8554  }
8555  resultOffset -= VMA_DEBUG_MARGIN;
8556  }
8557 
8558  // Apply alignment.
8559  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8560 
8561  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8562  // Make bigger alignment if necessary.
8563  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8564  {
8565  bool bufferImageGranularityConflict = false;
8566  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8567  {
8568  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8569  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8570  {
8571  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8572  {
8573  bufferImageGranularityConflict = true;
8574  break;
8575  }
8576  }
8577  else
8578  // Already on previous page.
8579  break;
8580  }
8581  if(bufferImageGranularityConflict)
8582  {
8583  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8584  }
8585  }
8586 
8587  // There is enough free space.
8588  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8589  suballocations1st.back().offset + suballocations1st.back().size :
8590  0;
8591  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8592  {
8593  // Check previous suballocations for BufferImageGranularity conflicts.
8594  // If conflict exists, allocation cannot be made here.
8595  if(bufferImageGranularity > 1)
8596  {
8597  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8598  {
8599  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8600  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8601  {
8602  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8603  {
8604  return false;
8605  }
8606  }
8607  else
8608  {
8609  // Already on next page.
8610  break;
8611  }
8612  }
8613  }
8614 
8615  // All tests passed: Success.
8616  pAllocationRequest->offset = resultOffset;
8617  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8618  pAllocationRequest->sumItemSize = 0;
8619  // pAllocationRequest->item unused.
8620  pAllocationRequest->itemsToMakeLostCount = 0;
8621  return true;
8622  }
8623  }
8624  else // !upperAddress
8625  {
8626  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8627  {
8628  // Try to allocate at the end of 1st vector.
8629 
8630  VkDeviceSize resultBaseOffset = 0;
8631  if(!suballocations1st.empty())
8632  {
8633  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8634  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8635  }
8636 
8637  // Start from offset equal to beginning of free space.
8638  VkDeviceSize resultOffset = resultBaseOffset;
8639 
8640  // Apply VMA_DEBUG_MARGIN at the beginning.
8641  if(VMA_DEBUG_MARGIN > 0)
8642  {
8643  resultOffset += VMA_DEBUG_MARGIN;
8644  }
8645 
8646  // Apply alignment.
8647  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8648 
8649  // Check previous suballocations for BufferImageGranularity conflicts.
8650  // Make bigger alignment if necessary.
8651  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8652  {
8653  bool bufferImageGranularityConflict = false;
8654  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8655  {
8656  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8657  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8658  {
8659  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8660  {
8661  bufferImageGranularityConflict = true;
8662  break;
8663  }
8664  }
8665  else
8666  // Already on previous page.
8667  break;
8668  }
8669  if(bufferImageGranularityConflict)
8670  {
8671  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8672  }
8673  }
8674 
8675  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8676  suballocations2nd.back().offset : size;
8677 
8678  // There is enough free space at the end after alignment.
8679  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8680  {
8681  // Check next suballocations for BufferImageGranularity conflicts.
8682  // If conflict exists, allocation cannot be made here.
8683  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8684  {
8685  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8686  {
8687  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8688  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8689  {
8690  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8691  {
8692  return false;
8693  }
8694  }
8695  else
8696  {
8697  // Already on previous page.
8698  break;
8699  }
8700  }
8701  }
8702 
8703  // All tests passed: Success.
8704  pAllocationRequest->offset = resultOffset;
8705  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8706  pAllocationRequest->sumItemSize = 0;
8707  // pAllocationRequest->item unused.
8708  pAllocationRequest->itemsToMakeLostCount = 0;
8709  return true;
8710  }
8711  }
8712 
8713  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8714  // beginning of 1st vector as the end of free space.
8715  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8716  {
8717  VMA_ASSERT(!suballocations1st.empty());
8718 
8719  VkDeviceSize resultBaseOffset = 0;
8720  if(!suballocations2nd.empty())
8721  {
8722  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8723  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8724  }
8725 
8726  // Start from offset equal to beginning of free space.
8727  VkDeviceSize resultOffset = resultBaseOffset;
8728 
8729  // Apply VMA_DEBUG_MARGIN at the beginning.
8730  if(VMA_DEBUG_MARGIN > 0)
8731  {
8732  resultOffset += VMA_DEBUG_MARGIN;
8733  }
8734 
8735  // Apply alignment.
8736  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8737 
8738  // Check previous suballocations for BufferImageGranularity conflicts.
8739  // Make bigger alignment if necessary.
8740  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8741  {
8742  bool bufferImageGranularityConflict = false;
8743  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8744  {
8745  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8746  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8747  {
8748  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8749  {
8750  bufferImageGranularityConflict = true;
8751  break;
8752  }
8753  }
8754  else
8755  // Already on previous page.
8756  break;
8757  }
8758  if(bufferImageGranularityConflict)
8759  {
8760  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8761  }
8762  }
8763 
8764  pAllocationRequest->itemsToMakeLostCount = 0;
8765  pAllocationRequest->sumItemSize = 0;
8766  size_t index1st = m_1stNullItemsBeginCount;
8767 
8768  if(canMakeOtherLost)
8769  {
8770  while(index1st < suballocations1st.size() &&
8771  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8772  {
8773  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8774  const VmaSuballocation& suballoc = suballocations1st[index1st];
8775  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8776  {
8777  // No problem.
8778  }
8779  else
8780  {
8781  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8782  if(suballoc.hAllocation->CanBecomeLost() &&
8783  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8784  {
8785  ++pAllocationRequest->itemsToMakeLostCount;
8786  pAllocationRequest->sumItemSize += suballoc.size;
8787  }
8788  else
8789  {
8790  return false;
8791  }
8792  }
8793  ++index1st;
8794  }
8795 
8796  // Check next suballocations for BufferImageGranularity conflicts.
8797  // If conflict exists, we must mark more allocations lost or fail.
8798  if(bufferImageGranularity > 1)
8799  {
8800  while(index1st < suballocations1st.size())
8801  {
8802  const VmaSuballocation& suballoc = suballocations1st[index1st];
8803  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8804  {
8805  if(suballoc.hAllocation != VK_NULL_HANDLE)
8806  {
8807  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8808  if(suballoc.hAllocation->CanBecomeLost() &&
8809  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8810  {
8811  ++pAllocationRequest->itemsToMakeLostCount;
8812  pAllocationRequest->sumItemSize += suballoc.size;
8813  }
8814  else
8815  {
8816  return false;
8817  }
8818  }
8819  }
8820  else
8821  {
8822  // Already on next page.
8823  break;
8824  }
8825  ++index1st;
8826  }
8827  }
8828  }
8829 
8830  // There is enough free space at the end after alignment.
8831  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8832  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8833  {
8834  // Check next suballocations for BufferImageGranularity conflicts.
8835  // If conflict exists, allocation cannot be made here.
8836  if(bufferImageGranularity > 1)
8837  {
8838  for(size_t nextSuballocIndex = index1st;
8839  nextSuballocIndex < suballocations1st.size();
8840  nextSuballocIndex++)
8841  {
8842  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8843  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8844  {
8845  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8846  {
8847  return false;
8848  }
8849  }
8850  else
8851  {
8852  // Already on next page.
8853  break;
8854  }
8855  }
8856  }
8857 
8858  // All tests passed: Success.
8859  pAllocationRequest->offset = resultOffset;
8860  pAllocationRequest->sumFreeSize =
8861  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8862  - resultBaseOffset
8863  - pAllocationRequest->sumItemSize;
8864  // pAllocationRequest->item unused.
8865  return true;
8866  }
8867  }
8868  }
8869 
8870  return false;
8871 }
8872 
8873 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8874  uint32_t currentFrameIndex,
8875  uint32_t frameInUseCount,
8876  VmaAllocationRequest* pAllocationRequest)
8877 {
8878  if(pAllocationRequest->itemsToMakeLostCount == 0)
8879  {
8880  return true;
8881  }
8882 
8883  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8884 
8885  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8886  size_t index1st = m_1stNullItemsBeginCount;
8887  size_t madeLostCount = 0;
8888  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8889  {
8890  VMA_ASSERT(index1st < suballocations1st.size());
8891  VmaSuballocation& suballoc = suballocations1st[index1st];
8892  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8893  {
8894  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8895  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8896  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8897  {
8898  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8899  suballoc.hAllocation = VK_NULL_HANDLE;
8900  m_SumFreeSize += suballoc.size;
8901  ++m_1stNullItemsMiddleCount;
8902  ++madeLostCount;
8903  }
8904  else
8905  {
8906  return false;
8907  }
8908  }
8909  ++index1st;
8910  }
8911 
8912  CleanupAfterFree();
8913  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8914 
8915  return true;
8916 }
8917 
8918 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8919 {
8920  uint32_t lostAllocationCount = 0;
8921 
8922  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8923  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8924  {
8925  VmaSuballocation& suballoc = suballocations1st[i];
8926  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8927  suballoc.hAllocation->CanBecomeLost() &&
8928  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8929  {
8930  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8931  suballoc.hAllocation = VK_NULL_HANDLE;
8932  ++m_1stNullItemsMiddleCount;
8933  m_SumFreeSize += suballoc.size;
8934  ++lostAllocationCount;
8935  }
8936  }
8937 
8938  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8939  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8940  {
8941  VmaSuballocation& suballoc = suballocations2nd[i];
8942  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8943  suballoc.hAllocation->CanBecomeLost() &&
8944  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8945  {
8946  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8947  suballoc.hAllocation = VK_NULL_HANDLE;
8948  ++m_2ndNullItemsCount;
8949  ++lostAllocationCount;
8950  }
8951  }
8952 
8953  if(lostAllocationCount)
8954  {
8955  CleanupAfterFree();
8956  }
8957 
8958  return lostAllocationCount;
8959 }
8960 
8961 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8962 {
8963  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8964  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8965  {
8966  const VmaSuballocation& suballoc = suballocations1st[i];
8967  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8968  {
8969  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8970  {
8971  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8972  return VK_ERROR_VALIDATION_FAILED_EXT;
8973  }
8974  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8975  {
8976  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8977  return VK_ERROR_VALIDATION_FAILED_EXT;
8978  }
8979  }
8980  }
8981 
8982  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8983  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8984  {
8985  const VmaSuballocation& suballoc = suballocations2nd[i];
8986  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8987  {
8988  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8989  {
8990  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8991  return VK_ERROR_VALIDATION_FAILED_EXT;
8992  }
8993  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8994  {
8995  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8996  return VK_ERROR_VALIDATION_FAILED_EXT;
8997  }
8998  }
8999  }
9000 
9001  return VK_SUCCESS;
9002 }
9003 
9004 void VmaBlockMetadata_Linear::Alloc(
9005  const VmaAllocationRequest& request,
9006  VmaSuballocationType type,
9007  VkDeviceSize allocSize,
9008  bool upperAddress,
9009  VmaAllocation hAllocation)
9010 {
9011  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9012 
9013  if(upperAddress)
9014  {
9015  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9016  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9017  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9018  suballocations2nd.push_back(newSuballoc);
9019  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9020  }
9021  else
9022  {
9023  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9024 
9025  // First allocation.
9026  if(suballocations1st.empty())
9027  {
9028  suballocations1st.push_back(newSuballoc);
9029  }
9030  else
9031  {
9032  // New allocation at the end of 1st vector.
9033  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9034  {
9035  // Check if it fits before the end of the block.
9036  VMA_ASSERT(request.offset + allocSize <= GetSize());
9037  suballocations1st.push_back(newSuballoc);
9038  }
9039  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9040  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9041  {
9042  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9043 
9044  switch(m_2ndVectorMode)
9045  {
9046  case SECOND_VECTOR_EMPTY:
9047  // First allocation from second part ring buffer.
9048  VMA_ASSERT(suballocations2nd.empty());
9049  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9050  break;
9051  case SECOND_VECTOR_RING_BUFFER:
9052  // 2-part ring buffer is already started.
9053  VMA_ASSERT(!suballocations2nd.empty());
9054  break;
9055  case SECOND_VECTOR_DOUBLE_STACK:
9056  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9057  break;
9058  default:
9059  VMA_ASSERT(0);
9060  }
9061 
9062  suballocations2nd.push_back(newSuballoc);
9063  }
9064  else
9065  {
9066  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9067  }
9068  }
9069  }
9070 
9071  m_SumFreeSize -= newSuballoc.size;
9072 }
9073 
9074 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9075 {
9076  FreeAtOffset(allocation->GetOffset());
9077 }
9078 
9079 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9080 {
9081  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9082  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9083 
9084  if(!suballocations1st.empty())
9085  {
9086  // First allocation: Mark it as next empty at the beginning.
9087  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9088  if(firstSuballoc.offset == offset)
9089  {
9090  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9091  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9092  m_SumFreeSize += firstSuballoc.size;
9093  ++m_1stNullItemsBeginCount;
9094  CleanupAfterFree();
9095  return;
9096  }
9097  }
9098 
9099  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9100  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9101  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9102  {
9103  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9104  if(lastSuballoc.offset == offset)
9105  {
9106  m_SumFreeSize += lastSuballoc.size;
9107  suballocations2nd.pop_back();
9108  CleanupAfterFree();
9109  return;
9110  }
9111  }
9112  // Last allocation in 1st vector.
9113  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9114  {
9115  VmaSuballocation& lastSuballoc = suballocations1st.back();
9116  if(lastSuballoc.offset == offset)
9117  {
9118  m_SumFreeSize += lastSuballoc.size;
9119  suballocations1st.pop_back();
9120  CleanupAfterFree();
9121  return;
9122  }
9123  }
9124 
9125  // Item from the middle of 1st vector.
9126  {
9127  VmaSuballocation refSuballoc;
9128  refSuballoc.offset = offset;
9129  // Rest of members stays uninitialized intentionally for better performance.
9130  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9131  suballocations1st.begin() + m_1stNullItemsBeginCount,
9132  suballocations1st.end(),
9133  refSuballoc);
9134  if(it != suballocations1st.end())
9135  {
9136  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9137  it->hAllocation = VK_NULL_HANDLE;
9138  ++m_1stNullItemsMiddleCount;
9139  m_SumFreeSize += it->size;
9140  CleanupAfterFree();
9141  return;
9142  }
9143  }
9144 
9145  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9146  {
9147  // Item from the middle of 2nd vector.
9148  VmaSuballocation refSuballoc;
9149  refSuballoc.offset = offset;
9150  // Rest of members stays uninitialized intentionally for better performance.
9151  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9152  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9153  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9154  if(it != suballocations2nd.end())
9155  {
9156  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9157  it->hAllocation = VK_NULL_HANDLE;
9158  ++m_2ndNullItemsCount;
9159  m_SumFreeSize += it->size;
9160  CleanupAfterFree();
9161  return;
9162  }
9163  }
9164 
9165  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9166 }
9167 
9168 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9169 {
9170  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9171  const size_t suballocCount = AccessSuballocations1st().size();
9172  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9173 }
9174 
9175 void VmaBlockMetadata_Linear::CleanupAfterFree()
9176 {
9177  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9178  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9179 
9180  if(IsEmpty())
9181  {
9182  suballocations1st.clear();
9183  suballocations2nd.clear();
9184  m_1stNullItemsBeginCount = 0;
9185  m_1stNullItemsMiddleCount = 0;
9186  m_2ndNullItemsCount = 0;
9187  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9188  }
9189  else
9190  {
9191  const size_t suballoc1stCount = suballocations1st.size();
9192  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9193  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9194 
9195  // Find more null items at the beginning of 1st vector.
9196  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9197  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9198  {
9199  ++m_1stNullItemsBeginCount;
9200  --m_1stNullItemsMiddleCount;
9201  }
9202 
9203  // Find more null items at the end of 1st vector.
9204  while(m_1stNullItemsMiddleCount > 0 &&
9205  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9206  {
9207  --m_1stNullItemsMiddleCount;
9208  suballocations1st.pop_back();
9209  }
9210 
9211  // Find more null items at the end of 2nd vector.
9212  while(m_2ndNullItemsCount > 0 &&
9213  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9214  {
9215  --m_2ndNullItemsCount;
9216  suballocations2nd.pop_back();
9217  }
9218 
9219  if(ShouldCompact1st())
9220  {
9221  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9222  size_t srcIndex = m_1stNullItemsBeginCount;
9223  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9224  {
9225  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9226  {
9227  ++srcIndex;
9228  }
9229  if(dstIndex != srcIndex)
9230  {
9231  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9232  }
9233  ++srcIndex;
9234  }
9235  suballocations1st.resize(nonNullItemCount);
9236  m_1stNullItemsBeginCount = 0;
9237  m_1stNullItemsMiddleCount = 0;
9238  }
9239 
9240  // 2nd vector became empty.
9241  if(suballocations2nd.empty())
9242  {
9243  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9244  }
9245 
9246  // 1st vector became empty.
9247  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9248  {
9249  suballocations1st.clear();
9250  m_1stNullItemsBeginCount = 0;
9251 
9252  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9253  {
9254  // Swap 1st with 2nd. Now 2nd is empty.
9255  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9256  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9257  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9258  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9259  {
9260  ++m_1stNullItemsBeginCount;
9261  --m_1stNullItemsMiddleCount;
9262  }
9263  m_2ndNullItemsCount = 0;
9264  m_1stVectorIndex ^= 1;
9265  }
9266  }
9267  }
9268 
9269  VMA_HEAVY_ASSERT(Validate());
9270 }
9271 
9272 
9274 // class VmaBlockMetadata_Buddy
9275 
9276 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9277  VmaBlockMetadata(hAllocator),
9278  m_Root(VMA_NULL),
9279  m_AllocationCount(0),
9280  m_FreeCount(1),
9281  m_SumFreeSize(0)
9282 {
9283  memset(m_FreeList, 0, sizeof(m_FreeList));
9284 }
9285 
9286 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9287 {
9288  DeleteNode(m_Root);
9289 }
9290 
9291 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9292 {
9293  VmaBlockMetadata::Init(size);
9294 
9295  m_UsableSize = VmaPrevPow2(size);
9296  m_SumFreeSize = m_UsableSize;
9297 
9298  // Calculate m_LevelCount.
9299  m_LevelCount = 1;
9300  while(m_LevelCount < MAX_LEVELS &&
9301  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9302  {
9303  ++m_LevelCount;
9304  }
9305 
9306  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9307  rootNode->offset = 0;
9308  rootNode->type = Node::TYPE_FREE;
9309  rootNode->parent = VMA_NULL;
9310  rootNode->buddy = VMA_NULL;
9311 
9312  m_Root = rootNode;
9313  AddToFreeListFront(0, rootNode);
9314 }
9315 
9316 bool VmaBlockMetadata_Buddy::Validate() const
9317 {
9318  // Validate tree.
9319  ValidationContext ctx;
9320  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9321  {
9322  VMA_VALIDATE(false && "ValidateNode failed.");
9323  }
9324  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9325  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9326 
9327  // Validate free node lists.
9328  for(uint32_t level = 0; level < m_LevelCount; ++level)
9329  {
9330  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9331  m_FreeList[level].front->free.prev == VMA_NULL);
9332 
9333  for(Node* node = m_FreeList[level].front;
9334  node != VMA_NULL;
9335  node = node->free.next)
9336  {
9337  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9338 
9339  if(node->free.next == VMA_NULL)
9340  {
9341  VMA_VALIDATE(m_FreeList[level].back == node);
9342  }
9343  else
9344  {
9345  VMA_VALIDATE(node->free.next->free.prev == node);
9346  }
9347  }
9348  }
9349 
9350  // Validate that free lists ar higher levels are empty.
9351  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9352  {
9353  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9354  }
9355 
9356  return true;
9357 }
9358 
9359 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9360 {
9361  for(uint32_t level = 0; level < m_LevelCount; ++level)
9362  {
9363  if(m_FreeList[level].front != VMA_NULL)
9364  {
9365  return LevelToNodeSize(level);
9366  }
9367  }
9368  return 0;
9369 }
9370 
9371 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9372 {
9373  const VkDeviceSize unusableSize = GetUnusableSize();
9374 
9375  outInfo.blockCount = 1;
9376 
9377  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9378  outInfo.usedBytes = outInfo.unusedBytes = 0;
9379 
9380  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9381  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9382  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9383 
9384  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9385 
9386  if(unusableSize > 0)
9387  {
9388  ++outInfo.unusedRangeCount;
9389  outInfo.unusedBytes += unusableSize;
9390  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9391  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9392  }
9393 }
9394 
9395 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9396 {
9397  const VkDeviceSize unusableSize = GetUnusableSize();
9398 
9399  inoutStats.size += GetSize();
9400  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9401  inoutStats.allocationCount += m_AllocationCount;
9402  inoutStats.unusedRangeCount += m_FreeCount;
9403  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9404 
9405  if(unusableSize > 0)
9406  {
9407  ++inoutStats.unusedRangeCount;
9408  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9409  }
9410 }
9411 
9412 #if VMA_STATS_STRING_ENABLED
9413 
9414 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9415 {
9416  // TODO optimize
9417  VmaStatInfo stat;
9418  CalcAllocationStatInfo(stat);
9419 
9420  PrintDetailedMap_Begin(
9421  json,
9422  stat.unusedBytes,
9423  stat.allocationCount,
9424  stat.unusedRangeCount);
9425 
9426  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9427 
9428  const VkDeviceSize unusableSize = GetUnusableSize();
9429  if(unusableSize > 0)
9430  {
9431  PrintDetailedMap_UnusedRange(json,
9432  m_UsableSize, // offset
9433  unusableSize); // size
9434  }
9435 
9436  PrintDetailedMap_End(json);
9437 }
9438 
9439 #endif // #if VMA_STATS_STRING_ENABLED
9440 
9441 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9442  uint32_t currentFrameIndex,
9443  uint32_t frameInUseCount,
9444  VkDeviceSize bufferImageGranularity,
9445  VkDeviceSize allocSize,
9446  VkDeviceSize allocAlignment,
9447  bool upperAddress,
9448  VmaSuballocationType allocType,
9449  bool canMakeOtherLost,
9450  uint32_t strategy,
9451  VmaAllocationRequest* pAllocationRequest)
9452 {
9453  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9454 
9455  // Simple way to respect bufferImageGranularity. May be optimized some day.
9456  // Whenever it might be an OPTIMAL image...
9457  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9458  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9459  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9460  {
9461  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9462  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9463  }
9464 
9465  if(allocSize > m_UsableSize)
9466  {
9467  return false;
9468  }
9469 
9470  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9471  for(uint32_t level = targetLevel + 1; level--; )
9472  {
9473  for(Node* freeNode = m_FreeList[level].front;
9474  freeNode != VMA_NULL;
9475  freeNode = freeNode->free.next)
9476  {
9477  if(freeNode->offset % allocAlignment == 0)
9478  {
9479  pAllocationRequest->offset = freeNode->offset;
9480  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9481  pAllocationRequest->sumItemSize = 0;
9482  pAllocationRequest->itemsToMakeLostCount = 0;
9483  pAllocationRequest->customData = (void*)(uintptr_t)level;
9484  return true;
9485  }
9486  }
9487  }
9488 
9489  return false;
9490 }
9491 
9492 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9493  uint32_t currentFrameIndex,
9494  uint32_t frameInUseCount,
9495  VmaAllocationRequest* pAllocationRequest)
9496 {
9497  /*
9498  Lost allocations are not supported in buddy allocator at the moment.
9499  Support might be added in the future.
9500  */
9501  return pAllocationRequest->itemsToMakeLostCount == 0;
9502 }
9503 
9504 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9505 {
9506  /*
9507  Lost allocations are not supported in buddy allocator at the moment.
9508  Support might be added in the future.
9509  */
9510  return 0;
9511 }
9512 
9513 void VmaBlockMetadata_Buddy::Alloc(
9514  const VmaAllocationRequest& request,
9515  VmaSuballocationType type,
9516  VkDeviceSize allocSize,
9517  bool upperAddress,
9518  VmaAllocation hAllocation)
9519 {
9520  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9521  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9522 
9523  Node* currNode = m_FreeList[currLevel].front;
9524  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9525  while(currNode->offset != request.offset)
9526  {
9527  currNode = currNode->free.next;
9528  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9529  }
9530 
9531  // Go down, splitting free nodes.
9532  while(currLevel < targetLevel)
9533  {
9534  // currNode is already first free node at currLevel.
9535  // Remove it from list of free nodes at this currLevel.
9536  RemoveFromFreeList(currLevel, currNode);
9537 
9538  const uint32_t childrenLevel = currLevel + 1;
9539 
9540  // Create two free sub-nodes.
9541  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9542  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9543 
9544  leftChild->offset = currNode->offset;
9545  leftChild->type = Node::TYPE_FREE;
9546  leftChild->parent = currNode;
9547  leftChild->buddy = rightChild;
9548 
9549  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9550  rightChild->type = Node::TYPE_FREE;
9551  rightChild->parent = currNode;
9552  rightChild->buddy = leftChild;
9553 
9554  // Convert current currNode to split type.
9555  currNode->type = Node::TYPE_SPLIT;
9556  currNode->split.leftChild = leftChild;
9557 
9558  // Add child nodes to free list. Order is important!
9559  AddToFreeListFront(childrenLevel, rightChild);
9560  AddToFreeListFront(childrenLevel, leftChild);
9561 
9562  ++m_FreeCount;
9563  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9564  ++currLevel;
9565  currNode = m_FreeList[currLevel].front;
9566 
9567  /*
9568  We can be sure that currNode, as left child of node previously split,
9569  also fullfills the alignment requirement.
9570  */
9571  }
9572 
9573  // Remove from free list.
9574  VMA_ASSERT(currLevel == targetLevel &&
9575  currNode != VMA_NULL &&
9576  currNode->type == Node::TYPE_FREE);
9577  RemoveFromFreeList(currLevel, currNode);
9578 
9579  // Convert to allocation node.
9580  currNode->type = Node::TYPE_ALLOCATION;
9581  currNode->allocation.alloc = hAllocation;
9582 
9583  ++m_AllocationCount;
9584  --m_FreeCount;
9585  m_SumFreeSize -= allocSize;
9586 }
9587 
9588 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9589 {
9590  if(node->type == Node::TYPE_SPLIT)
9591  {
9592  DeleteNode(node->split.leftChild->buddy);
9593  DeleteNode(node->split.leftChild);
9594  }
9595 
9596  vma_delete(GetAllocationCallbacks(), node);
9597 }
9598 
9599 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9600 {
9601  VMA_VALIDATE(level < m_LevelCount);
9602  VMA_VALIDATE(curr->parent == parent);
9603  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9604  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9605  switch(curr->type)
9606  {
9607  case Node::TYPE_FREE:
9608  // curr->free.prev, next are validated separately.
9609  ctx.calculatedSumFreeSize += levelNodeSize;
9610  ++ctx.calculatedFreeCount;
9611  break;
9612  case Node::TYPE_ALLOCATION:
9613  ++ctx.calculatedAllocationCount;
9614  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9615  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9616  break;
9617  case Node::TYPE_SPLIT:
9618  {
9619  const uint32_t childrenLevel = level + 1;
9620  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9621  const Node* const leftChild = curr->split.leftChild;
9622  VMA_VALIDATE(leftChild != VMA_NULL);
9623  VMA_VALIDATE(leftChild->offset == curr->offset);
9624  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9625  {
9626  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9627  }
9628  const Node* const rightChild = leftChild->buddy;
9629  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9630  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9631  {
9632  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9633  }
9634  }
9635  break;
9636  default:
9637  return false;
9638  }
9639 
9640  return true;
9641 }
9642 
9643 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9644 {
9645  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9646  uint32_t level = 0;
9647  VkDeviceSize currLevelNodeSize = m_UsableSize;
9648  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9649  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9650  {
9651  ++level;
9652  currLevelNodeSize = nextLevelNodeSize;
9653  nextLevelNodeSize = currLevelNodeSize >> 1;
9654  }
9655  return level;
9656 }
9657 
9658 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9659 {
9660  // Find node and level.
9661  Node* node = m_Root;
9662  VkDeviceSize nodeOffset = 0;
9663  uint32_t level = 0;
9664  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9665  while(node->type == Node::TYPE_SPLIT)
9666  {
9667  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9668  if(offset < nodeOffset + nextLevelSize)
9669  {
9670  node = node->split.leftChild;
9671  }
9672  else
9673  {
9674  node = node->split.leftChild->buddy;
9675  nodeOffset += nextLevelSize;
9676  }
9677  ++level;
9678  levelNodeSize = nextLevelSize;
9679  }
9680 
9681  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9682  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9683 
9684  ++m_FreeCount;
9685  --m_AllocationCount;
9686  m_SumFreeSize += alloc->GetSize();
9687 
9688  node->type = Node::TYPE_FREE;
9689 
9690  // Join free nodes if possible.
9691  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9692  {
9693  RemoveFromFreeList(level, node->buddy);
9694  Node* const parent = node->parent;
9695 
9696  vma_delete(GetAllocationCallbacks(), node->buddy);
9697  vma_delete(GetAllocationCallbacks(), node);
9698  parent->type = Node::TYPE_FREE;
9699 
9700  node = parent;
9701  --level;
9702  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9703  --m_FreeCount;
9704  }
9705 
9706  AddToFreeListFront(level, node);
9707 }
9708 
9709 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9710 {
9711  switch(node->type)
9712  {
9713  case Node::TYPE_FREE:
9714  ++outInfo.unusedRangeCount;
9715  outInfo.unusedBytes += levelNodeSize;
9716  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9717  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9718  break;
9719  case Node::TYPE_ALLOCATION:
9720  {
9721  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9722  ++outInfo.allocationCount;
9723  outInfo.usedBytes += allocSize;
9724  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9725  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9726 
9727  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9728  if(unusedRangeSize > 0)
9729  {
9730  ++outInfo.unusedRangeCount;
9731  outInfo.unusedBytes += unusedRangeSize;
9732  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9733  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9734  }
9735  }
9736  break;
9737  case Node::TYPE_SPLIT:
9738  {
9739  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9740  const Node* const leftChild = node->split.leftChild;
9741  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9742  const Node* const rightChild = leftChild->buddy;
9743  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9744  }
9745  break;
9746  default:
9747  VMA_ASSERT(0);
9748  }
9749 }
9750 
9751 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9752 {
9753  VMA_ASSERT(node->type == Node::TYPE_FREE);
9754 
9755  // List is empty.
9756  Node* const frontNode = m_FreeList[level].front;
9757  if(frontNode == VMA_NULL)
9758  {
9759  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9760  node->free.prev = node->free.next = VMA_NULL;
9761  m_FreeList[level].front = m_FreeList[level].back = node;
9762  }
9763  else
9764  {
9765  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9766  node->free.prev = VMA_NULL;
9767  node->free.next = frontNode;
9768  frontNode->free.prev = node;
9769  m_FreeList[level].front = node;
9770  }
9771 }
9772 
9773 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9774 {
9775  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9776 
9777  // It is at the front.
9778  if(node->free.prev == VMA_NULL)
9779  {
9780  VMA_ASSERT(m_FreeList[level].front == node);
9781  m_FreeList[level].front = node->free.next;
9782  }
9783  else
9784  {
9785  Node* const prevFreeNode = node->free.prev;
9786  VMA_ASSERT(prevFreeNode->free.next == node);
9787  prevFreeNode->free.next = node->free.next;
9788  }
9789 
9790  // It is at the back.
9791  if(node->free.next == VMA_NULL)
9792  {
9793  VMA_ASSERT(m_FreeList[level].back == node);
9794  m_FreeList[level].back = node->free.prev;
9795  }
9796  else
9797  {
9798  Node* const nextFreeNode = node->free.next;
9799  VMA_ASSERT(nextFreeNode->free.prev == node);
9800  nextFreeNode->free.prev = node->free.prev;
9801  }
9802 }
9803 
9804 #if VMA_STATS_STRING_ENABLED
9805 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9806 {
9807  switch(node->type)
9808  {
9809  case Node::TYPE_FREE:
9810  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9811  break;
9812  case Node::TYPE_ALLOCATION:
9813  {
9814  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9815  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9816  if(allocSize < levelNodeSize)
9817  {
9818  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
9819  }
9820  }
9821  break;
9822  case Node::TYPE_SPLIT:
9823  {
9824  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9825  const Node* const leftChild = node->split.leftChild;
9826  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9827  const Node* const rightChild = leftChild->buddy;
9828  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9829  }
9830  break;
9831  default:
9832  VMA_ASSERT(0);
9833  }
9834 }
9835 #endif // #if VMA_STATS_STRING_ENABLED
9836 
9837 
9839 // class VmaDeviceMemoryBlock
9840 
9841 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9842  m_pMetadata(VMA_NULL),
9843  m_MemoryTypeIndex(UINT32_MAX),
9844  m_Id(0),
9845  m_hMemory(VK_NULL_HANDLE),
9846  m_MapCount(0),
9847  m_pMappedData(VMA_NULL)
9848 {
9849 }
9850 
9851 void VmaDeviceMemoryBlock::Init(
9852  VmaAllocator hAllocator,
9853  uint32_t newMemoryTypeIndex,
9854  VkDeviceMemory newMemory,
9855  VkDeviceSize newSize,
9856  uint32_t id,
9857  uint32_t algorithm)
9858 {
9859  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9860 
9861  m_MemoryTypeIndex = newMemoryTypeIndex;
9862  m_Id = id;
9863  m_hMemory = newMemory;
9864 
9865  switch(algorithm)
9866  {
9868  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9869  break;
9871  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
9872  break;
9873  default:
9874  VMA_ASSERT(0);
9875  // Fall-through.
9876  case 0:
9877  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9878  }
9879  m_pMetadata->Init(newSize);
9880 }
9881 
9882 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9883 {
9884  // This is the most important assert in the entire library.
9885  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9886  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9887 
9888  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9889  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9890  m_hMemory = VK_NULL_HANDLE;
9891 
9892  vma_delete(allocator, m_pMetadata);
9893  m_pMetadata = VMA_NULL;
9894 }
9895 
9896 bool VmaDeviceMemoryBlock::Validate() const
9897 {
9898  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
9899  (m_pMetadata->GetSize() != 0));
9900 
9901  return m_pMetadata->Validate();
9902 }
9903 
9904 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9905 {
9906  void* pData = nullptr;
9907  VkResult res = Map(hAllocator, 1, &pData);
9908  if(res != VK_SUCCESS)
9909  {
9910  return res;
9911  }
9912 
9913  res = m_pMetadata->CheckCorruption(pData);
9914 
9915  Unmap(hAllocator, 1);
9916 
9917  return res;
9918 }
9919 
9920 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9921 {
9922  if(count == 0)
9923  {
9924  return VK_SUCCESS;
9925  }
9926 
9927  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9928  if(m_MapCount != 0)
9929  {
9930  m_MapCount += count;
9931  VMA_ASSERT(m_pMappedData != VMA_NULL);
9932  if(ppData != VMA_NULL)
9933  {
9934  *ppData = m_pMappedData;
9935  }
9936  return VK_SUCCESS;
9937  }
9938  else
9939  {
9940  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9941  hAllocator->m_hDevice,
9942  m_hMemory,
9943  0, // offset
9944  VK_WHOLE_SIZE,
9945  0, // flags
9946  &m_pMappedData);
9947  if(result == VK_SUCCESS)
9948  {
9949  if(ppData != VMA_NULL)
9950  {
9951  *ppData = m_pMappedData;
9952  }
9953  m_MapCount = count;
9954  }
9955  return result;
9956  }
9957 }
9958 
9959 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9960 {
9961  if(count == 0)
9962  {
9963  return;
9964  }
9965 
9966  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9967  if(m_MapCount >= count)
9968  {
9969  m_MapCount -= count;
9970  if(m_MapCount == 0)
9971  {
9972  m_pMappedData = VMA_NULL;
9973  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9974  }
9975  }
9976  else
9977  {
9978  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
9979  }
9980 }
9981 
9982 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9983 {
9984  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9985  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9986 
9987  void* pData;
9988  VkResult res = Map(hAllocator, 1, &pData);
9989  if(res != VK_SUCCESS)
9990  {
9991  return res;
9992  }
9993 
9994  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
9995  VmaWriteMagicValue(pData, allocOffset + allocSize);
9996 
9997  Unmap(hAllocator, 1);
9998 
9999  return VK_SUCCESS;
10000 }
10001 
10002 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10003 {
10004  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10005  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10006 
10007  void* pData;
10008  VkResult res = Map(hAllocator, 1, &pData);
10009  if(res != VK_SUCCESS)
10010  {
10011  return res;
10012  }
10013 
10014  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10015  {
10016  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10017  }
10018  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10019  {
10020  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10021  }
10022 
10023  Unmap(hAllocator, 1);
10024 
10025  return VK_SUCCESS;
10026 }
10027 
10028 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10029  const VmaAllocator hAllocator,
10030  const VmaAllocation hAllocation,
10031  VkBuffer hBuffer)
10032 {
10033  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10034  hAllocation->GetBlock() == this);
10035  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10036  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10037  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10038  hAllocator->m_hDevice,
10039  hBuffer,
10040  m_hMemory,
10041  hAllocation->GetOffset());
10042 }
10043 
10044 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10045  const VmaAllocator hAllocator,
10046  const VmaAllocation hAllocation,
10047  VkImage hImage)
10048 {
10049  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10050  hAllocation->GetBlock() == this);
10051  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10052  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10053  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10054  hAllocator->m_hDevice,
10055  hImage,
10056  m_hMemory,
10057  hAllocation->GetOffset());
10058 }
10059 
10060 static void InitStatInfo(VmaStatInfo& outInfo)
10061 {
10062  memset(&outInfo, 0, sizeof(outInfo));
10063  outInfo.allocationSizeMin = UINT64_MAX;
10064  outInfo.unusedRangeSizeMin = UINT64_MAX;
10065 }
10066 
10067 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10068 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10069 {
10070  inoutInfo.blockCount += srcInfo.blockCount;
10071  inoutInfo.allocationCount += srcInfo.allocationCount;
10072  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10073  inoutInfo.usedBytes += srcInfo.usedBytes;
10074  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10075  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10076  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10077  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10078  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10079 }
10080 
10081 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10082 {
10083  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10084  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10085  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10086  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10087 }
10088 
10089 VmaPool_T::VmaPool_T(
10090  VmaAllocator hAllocator,
10091  const VmaPoolCreateInfo& createInfo,
10092  VkDeviceSize preferredBlockSize) :
10093  m_BlockVector(
10094  hAllocator,
10095  createInfo.memoryTypeIndex,
10096  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10097  createInfo.minBlockCount,
10098  createInfo.maxBlockCount,
10099  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10100  createInfo.frameInUseCount,
10101  true, // isCustomPool
10102  createInfo.blockSize != 0, // explicitBlockSize
10103  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10104  m_Id(0)
10105 {
10106 }
10107 
10108 VmaPool_T::~VmaPool_T()
10109 {
10110 }
10111 
10112 #if VMA_STATS_STRING_ENABLED
10113 
10114 #endif // #if VMA_STATS_STRING_ENABLED
10115 
10116 VmaBlockVector::VmaBlockVector(
10117  VmaAllocator hAllocator,
10118  uint32_t memoryTypeIndex,
10119  VkDeviceSize preferredBlockSize,
10120  size_t minBlockCount,
10121  size_t maxBlockCount,
10122  VkDeviceSize bufferImageGranularity,
10123  uint32_t frameInUseCount,
10124  bool isCustomPool,
10125  bool explicitBlockSize,
10126  uint32_t algorithm) :
10127  m_hAllocator(hAllocator),
10128  m_MemoryTypeIndex(memoryTypeIndex),
10129  m_PreferredBlockSize(preferredBlockSize),
10130  m_MinBlockCount(minBlockCount),
10131  m_MaxBlockCount(maxBlockCount),
10132  m_BufferImageGranularity(bufferImageGranularity),
10133  m_FrameInUseCount(frameInUseCount),
10134  m_IsCustomPool(isCustomPool),
10135  m_ExplicitBlockSize(explicitBlockSize),
10136  m_Algorithm(algorithm),
10137  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10138  m_HasEmptyBlock(false),
10139  m_pDefragmentator(VMA_NULL),
10140  m_NextBlockId(0)
10141 {
10142 }
10143 
10144 VmaBlockVector::~VmaBlockVector()
10145 {
10146  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10147 
10148  for(size_t i = m_Blocks.size(); i--; )
10149  {
10150  m_Blocks[i]->Destroy(m_hAllocator);
10151  vma_delete(m_hAllocator, m_Blocks[i]);
10152  }
10153 }
10154 
10155 VkResult VmaBlockVector::CreateMinBlocks()
10156 {
10157  for(size_t i = 0; i < m_MinBlockCount; ++i)
10158  {
10159  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10160  if(res != VK_SUCCESS)
10161  {
10162  return res;
10163  }
10164  }
10165  return VK_SUCCESS;
10166 }
10167 
10168 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10169 {
10170  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10171 
10172  const size_t blockCount = m_Blocks.size();
10173 
10174  pStats->size = 0;
10175  pStats->unusedSize = 0;
10176  pStats->allocationCount = 0;
10177  pStats->unusedRangeCount = 0;
10178  pStats->unusedRangeSizeMax = 0;
10179  pStats->blockCount = blockCount;
10180 
10181  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10182  {
10183  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10184  VMA_ASSERT(pBlock);
10185  VMA_HEAVY_ASSERT(pBlock->Validate());
10186  pBlock->m_pMetadata->AddPoolStats(*pStats);
10187  }
10188 }
10189 
10190 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10191 {
10192  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10193  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10194  (VMA_DEBUG_MARGIN > 0) &&
10195  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10196 }
10197 
10198 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10199 
10200 VkResult VmaBlockVector::Allocate(
10201  VmaPool hCurrentPool,
10202  uint32_t currentFrameIndex,
10203  VkDeviceSize size,
10204  VkDeviceSize alignment,
10205  const VmaAllocationCreateInfo& createInfo,
10206  VmaSuballocationType suballocType,
10207  VmaAllocation* pAllocation)
10208 {
10209  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10210  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10211  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10212  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10213  const bool canCreateNewBlock =
10214  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10215  (m_Blocks.size() < m_MaxBlockCount);
10216  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10217 
10218  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10219  // Which in turn is available only when maxBlockCount = 1.
10220  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10221  {
10222  canMakeOtherLost = false;
10223  }
10224 
10225  // Upper address can only be used with linear allocator and within single memory block.
10226  if(isUpperAddress &&
10227  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10228  {
10229  return VK_ERROR_FEATURE_NOT_PRESENT;
10230  }
10231 
10232  // Validate strategy.
10233  switch(strategy)
10234  {
10235  case 0:
10237  break;
10241  break;
10242  default:
10243  return VK_ERROR_FEATURE_NOT_PRESENT;
10244  }
10245 
10246  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10247  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10248  {
10249  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10250  }
10251 
10252  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10253 
10254  /*
10255  Under certain condition, this whole section can be skipped for optimization, so
10256  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10257  e.g. for custom pools with linear algorithm.
10258  */
10259  if(!canMakeOtherLost || canCreateNewBlock)
10260  {
10261  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10262  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10264 
10265  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10266  {
10267  // Use only last block.
10268  if(!m_Blocks.empty())
10269  {
10270  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10271  VMA_ASSERT(pCurrBlock);
10272  VkResult res = AllocateFromBlock(
10273  pCurrBlock,
10274  hCurrentPool,
10275  currentFrameIndex,
10276  size,
10277  alignment,
10278  allocFlagsCopy,
10279  createInfo.pUserData,
10280  suballocType,
10281  strategy,
10282  pAllocation);
10283  if(res == VK_SUCCESS)
10284  {
10285  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10286  return VK_SUCCESS;
10287  }
10288  }
10289  }
10290  else
10291  {
10293  {
10294  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10295  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10296  {
10297  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10298  VMA_ASSERT(pCurrBlock);
10299  VkResult res = AllocateFromBlock(
10300  pCurrBlock,
10301  hCurrentPool,
10302  currentFrameIndex,
10303  size,
10304  alignment,
10305  allocFlagsCopy,
10306  createInfo.pUserData,
10307  suballocType,
10308  strategy,
10309  pAllocation);
10310  if(res == VK_SUCCESS)
10311  {
10312  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10313  return VK_SUCCESS;
10314  }
10315  }
10316  }
10317  else // WORST_FIT, FIRST_FIT
10318  {
10319  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10320  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10321  {
10322  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10323  VMA_ASSERT(pCurrBlock);
10324  VkResult res = AllocateFromBlock(
10325  pCurrBlock,
10326  hCurrentPool,
10327  currentFrameIndex,
10328  size,
10329  alignment,
10330  allocFlagsCopy,
10331  createInfo.pUserData,
10332  suballocType,
10333  strategy,
10334  pAllocation);
10335  if(res == VK_SUCCESS)
10336  {
10337  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10338  return VK_SUCCESS;
10339  }
10340  }
10341  }
10342  }
10343 
10344  // 2. Try to create new block.
10345  if(canCreateNewBlock)
10346  {
10347  // Calculate optimal size for new block.
10348  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10349  uint32_t newBlockSizeShift = 0;
10350  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10351 
10352  if(!m_ExplicitBlockSize)
10353  {
10354  // Allocate 1/8, 1/4, 1/2 as first blocks.
10355  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10356  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10357  {
10358  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10359  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10360  {
10361  newBlockSize = smallerNewBlockSize;
10362  ++newBlockSizeShift;
10363  }
10364  else
10365  {
10366  break;
10367  }
10368  }
10369  }
10370 
10371  size_t newBlockIndex = 0;
10372  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10373  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10374  if(!m_ExplicitBlockSize)
10375  {
10376  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10377  {
10378  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10379  if(smallerNewBlockSize >= size)
10380  {
10381  newBlockSize = smallerNewBlockSize;
10382  ++newBlockSizeShift;
10383  res = CreateBlock(newBlockSize, &newBlockIndex);
10384  }
10385  else
10386  {
10387  break;
10388  }
10389  }
10390  }
10391 
10392  if(res == VK_SUCCESS)
10393  {
10394  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10395  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10396 
10397  res = AllocateFromBlock(
10398  pBlock,
10399  hCurrentPool,
10400  currentFrameIndex,
10401  size,
10402  alignment,
10403  allocFlagsCopy,
10404  createInfo.pUserData,
10405  suballocType,
10406  strategy,
10407  pAllocation);
10408  if(res == VK_SUCCESS)
10409  {
10410  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10411  return VK_SUCCESS;
10412  }
10413  else
10414  {
10415  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10416  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10417  }
10418  }
10419  }
10420  }
10421 
10422  // 3. Try to allocate from existing blocks with making other allocations lost.
10423  if(canMakeOtherLost)
10424  {
10425  uint32_t tryIndex = 0;
10426  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10427  {
10428  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10429  VmaAllocationRequest bestRequest = {};
10430  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10431 
10432  // 1. Search existing allocations.
10434  {
10435  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10436  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10437  {
10438  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10439  VMA_ASSERT(pCurrBlock);
10440  VmaAllocationRequest currRequest = {};
10441  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10442  currentFrameIndex,
10443  m_FrameInUseCount,
10444  m_BufferImageGranularity,
10445  size,
10446  alignment,
10447  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10448  suballocType,
10449  canMakeOtherLost,
10450  strategy,
10451  &currRequest))
10452  {
10453  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10454  if(pBestRequestBlock == VMA_NULL ||
10455  currRequestCost < bestRequestCost)
10456  {
10457  pBestRequestBlock = pCurrBlock;
10458  bestRequest = currRequest;
10459  bestRequestCost = currRequestCost;
10460 
10461  if(bestRequestCost == 0)
10462  {
10463  break;
10464  }
10465  }
10466  }
10467  }
10468  }
10469  else // WORST_FIT, FIRST_FIT
10470  {
10471  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10472  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10473  {
10474  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10475  VMA_ASSERT(pCurrBlock);
10476  VmaAllocationRequest currRequest = {};
10477  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10478  currentFrameIndex,
10479  m_FrameInUseCount,
10480  m_BufferImageGranularity,
10481  size,
10482  alignment,
10483  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10484  suballocType,
10485  canMakeOtherLost,
10486  strategy,
10487  &currRequest))
10488  {
10489  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10490  if(pBestRequestBlock == VMA_NULL ||
10491  currRequestCost < bestRequestCost ||
10493  {
10494  pBestRequestBlock = pCurrBlock;
10495  bestRequest = currRequest;
10496  bestRequestCost = currRequestCost;
10497 
10498  if(bestRequestCost == 0 ||
10500  {
10501  break;
10502  }
10503  }
10504  }
10505  }
10506  }
10507 
10508  if(pBestRequestBlock != VMA_NULL)
10509  {
10510  if(mapped)
10511  {
10512  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10513  if(res != VK_SUCCESS)
10514  {
10515  return res;
10516  }
10517  }
10518 
10519  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10520  currentFrameIndex,
10521  m_FrameInUseCount,
10522  &bestRequest))
10523  {
10524  // We no longer have an empty Allocation.
10525  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10526  {
10527  m_HasEmptyBlock = false;
10528  }
10529  // Allocate from this pBlock.
10530  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10531  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10532  (*pAllocation)->InitBlockAllocation(
10533  hCurrentPool,
10534  pBestRequestBlock,
10535  bestRequest.offset,
10536  alignment,
10537  size,
10538  suballocType,
10539  mapped,
10540  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10541  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10542  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10543  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10544  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10545  {
10546  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10547  }
10548  if(IsCorruptionDetectionEnabled())
10549  {
10550  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10551  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10552  }
10553  return VK_SUCCESS;
10554  }
10555  // else: Some allocations must have been touched while we are here. Next try.
10556  }
10557  else
10558  {
10559  // Could not find place in any of the blocks - break outer loop.
10560  break;
10561  }
10562  }
10563  /* Maximum number of tries exceeded - a very unlike event when many other
10564  threads are simultaneously touching allocations making it impossible to make
10565  lost at the same time as we try to allocate. */
10566  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10567  {
10568  return VK_ERROR_TOO_MANY_OBJECTS;
10569  }
10570  }
10571 
10572  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10573 }
10574 
10575 void VmaBlockVector::Free(
10576  VmaAllocation hAllocation)
10577 {
10578  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10579 
10580  // Scope for lock.
10581  {
10582  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10583 
10584  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10585 
10586  if(IsCorruptionDetectionEnabled())
10587  {
10588  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10589  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10590  }
10591 
10592  if(hAllocation->IsPersistentMap())
10593  {
10594  pBlock->Unmap(m_hAllocator, 1);
10595  }
10596 
10597  pBlock->m_pMetadata->Free(hAllocation);
10598  VMA_HEAVY_ASSERT(pBlock->Validate());
10599 
10600  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10601 
10602  // pBlock became empty after this deallocation.
10603  if(pBlock->m_pMetadata->IsEmpty())
10604  {
10605  // Already has empty Allocation. We don't want to have two, so delete this one.
10606  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10607  {
10608  pBlockToDelete = pBlock;
10609  Remove(pBlock);
10610  }
10611  // We now have first empty block.
10612  else
10613  {
10614  m_HasEmptyBlock = true;
10615  }
10616  }
10617  // pBlock didn't become empty, but we have another empty block - find and free that one.
10618  // (This is optional, heuristics.)
10619  else if(m_HasEmptyBlock)
10620  {
10621  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10622  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10623  {
10624  pBlockToDelete = pLastBlock;
10625  m_Blocks.pop_back();
10626  m_HasEmptyBlock = false;
10627  }
10628  }
10629 
10630  IncrementallySortBlocks();
10631  }
10632 
10633  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10634  // lock, for performance reason.
10635  if(pBlockToDelete != VMA_NULL)
10636  {
10637  VMA_DEBUG_LOG(" Deleted empty allocation");
10638  pBlockToDelete->Destroy(m_hAllocator);
10639  vma_delete(m_hAllocator, pBlockToDelete);
10640  }
10641 }
10642 
10643 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10644 {
10645  VkDeviceSize result = 0;
10646  for(size_t i = m_Blocks.size(); i--; )
10647  {
10648  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10649  if(result >= m_PreferredBlockSize)
10650  {
10651  break;
10652  }
10653  }
10654  return result;
10655 }
10656 
10657 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10658 {
10659  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10660  {
10661  if(m_Blocks[blockIndex] == pBlock)
10662  {
10663  VmaVectorRemove(m_Blocks, blockIndex);
10664  return;
10665  }
10666  }
10667  VMA_ASSERT(0);
10668 }
10669 
10670 void VmaBlockVector::IncrementallySortBlocks()
10671 {
10672  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10673  {
10674  // Bubble sort only until first swap.
10675  for(size_t i = 1; i < m_Blocks.size(); ++i)
10676  {
10677  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10678  {
10679  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10680  return;
10681  }
10682  }
10683  }
10684 }
10685 
10686 VkResult VmaBlockVector::AllocateFromBlock(
10687  VmaDeviceMemoryBlock* pBlock,
10688  VmaPool hCurrentPool,
10689  uint32_t currentFrameIndex,
10690  VkDeviceSize size,
10691  VkDeviceSize alignment,
10692  VmaAllocationCreateFlags allocFlags,
10693  void* pUserData,
10694  VmaSuballocationType suballocType,
10695  uint32_t strategy,
10696  VmaAllocation* pAllocation)
10697 {
10698  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10699  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10700  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10701  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10702 
10703  VmaAllocationRequest currRequest = {};
10704  if(pBlock->m_pMetadata->CreateAllocationRequest(
10705  currentFrameIndex,
10706  m_FrameInUseCount,
10707  m_BufferImageGranularity,
10708  size,
10709  alignment,
10710  isUpperAddress,
10711  suballocType,
10712  false, // canMakeOtherLost
10713  strategy,
10714  &currRequest))
10715  {
10716  // Allocate from pCurrBlock.
10717  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10718 
10719  if(mapped)
10720  {
10721  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10722  if(res != VK_SUCCESS)
10723  {
10724  return res;
10725  }
10726  }
10727 
10728  // We no longer have an empty Allocation.
10729  if(pBlock->m_pMetadata->IsEmpty())
10730  {
10731  m_HasEmptyBlock = false;
10732  }
10733 
10734  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10735  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10736  (*pAllocation)->InitBlockAllocation(
10737  hCurrentPool,
10738  pBlock,
10739  currRequest.offset,
10740  alignment,
10741  size,
10742  suballocType,
10743  mapped,
10744  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10745  VMA_HEAVY_ASSERT(pBlock->Validate());
10746  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10747  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10748  {
10749  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10750  }
10751  if(IsCorruptionDetectionEnabled())
10752  {
10753  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10754  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10755  }
10756  return VK_SUCCESS;
10757  }
10758  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10759 }
10760 
10761 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10762 {
10763  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10764  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10765  allocInfo.allocationSize = blockSize;
10766  VkDeviceMemory mem = VK_NULL_HANDLE;
10767  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10768  if(res < 0)
10769  {
10770  return res;
10771  }
10772 
10773  // New VkDeviceMemory successfully created.
10774 
10775  // Create new Allocation for it.
10776  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10777  pBlock->Init(
10778  m_hAllocator,
10779  m_MemoryTypeIndex,
10780  mem,
10781  allocInfo.allocationSize,
10782  m_NextBlockId++,
10783  m_Algorithm);
10784 
10785  m_Blocks.push_back(pBlock);
10786  if(pNewBlockIndex != VMA_NULL)
10787  {
10788  *pNewBlockIndex = m_Blocks.size() - 1;
10789  }
10790 
10791  return VK_SUCCESS;
10792 }
10793 
10794 #if VMA_STATS_STRING_ENABLED
10795 
10796 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10797 {
10798  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10799 
10800  json.BeginObject();
10801 
10802  if(m_IsCustomPool)
10803  {
10804  json.WriteString("MemoryTypeIndex");
10805  json.WriteNumber(m_MemoryTypeIndex);
10806 
10807  json.WriteString("BlockSize");
10808  json.WriteNumber(m_PreferredBlockSize);
10809 
10810  json.WriteString("BlockCount");
10811  json.BeginObject(true);
10812  if(m_MinBlockCount > 0)
10813  {
10814  json.WriteString("Min");
10815  json.WriteNumber((uint64_t)m_MinBlockCount);
10816  }
10817  if(m_MaxBlockCount < SIZE_MAX)
10818  {
10819  json.WriteString("Max");
10820  json.WriteNumber((uint64_t)m_MaxBlockCount);
10821  }
10822  json.WriteString("Cur");
10823  json.WriteNumber((uint64_t)m_Blocks.size());
10824  json.EndObject();
10825 
10826  if(m_FrameInUseCount > 0)
10827  {
10828  json.WriteString("FrameInUseCount");
10829  json.WriteNumber(m_FrameInUseCount);
10830  }
10831 
10832  if(m_Algorithm != 0)
10833  {
10834  json.WriteString("Algorithm");
10835  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10836  }
10837  }
10838  else
10839  {
10840  json.WriteString("PreferredBlockSize");
10841  json.WriteNumber(m_PreferredBlockSize);
10842  }
10843 
10844  json.WriteString("Blocks");
10845  json.BeginObject();
10846  for(size_t i = 0; i < m_Blocks.size(); ++i)
10847  {
10848  json.BeginString();
10849  json.ContinueString(m_Blocks[i]->GetId());
10850  json.EndString();
10851 
10852  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10853  }
10854  json.EndObject();
10855 
10856  json.EndObject();
10857 }
10858 
10859 #endif // #if VMA_STATS_STRING_ENABLED
10860 
10861 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10862  VmaAllocator hAllocator,
10863  uint32_t currentFrameIndex)
10864 {
10865  if(m_pDefragmentator == VMA_NULL)
10866  {
10867  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
10868  hAllocator,
10869  this,
10870  currentFrameIndex);
10871  }
10872 
10873  return m_pDefragmentator;
10874 }
10875 
10876 VkResult VmaBlockVector::Defragment(
10877  VmaDefragmentationStats* pDefragmentationStats,
10878  VkDeviceSize& maxBytesToMove,
10879  uint32_t& maxAllocationsToMove)
10880 {
10881  if(m_pDefragmentator == VMA_NULL)
10882  {
10883  return VK_SUCCESS;
10884  }
10885 
10886  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10887 
10888  // Defragment.
10889  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
10890 
10891  // Accumulate statistics.
10892  if(pDefragmentationStats != VMA_NULL)
10893  {
10894  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
10895  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
10896  pDefragmentationStats->bytesMoved += bytesMoved;
10897  pDefragmentationStats->allocationsMoved += allocationsMoved;
10898  VMA_ASSERT(bytesMoved <= maxBytesToMove);
10899  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
10900  maxBytesToMove -= bytesMoved;
10901  maxAllocationsToMove -= allocationsMoved;
10902  }
10903 
10904  // Free empty blocks.
10905  m_HasEmptyBlock = false;
10906  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10907  {
10908  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
10909  if(pBlock->m_pMetadata->IsEmpty())
10910  {
10911  if(m_Blocks.size() > m_MinBlockCount)
10912  {
10913  if(pDefragmentationStats != VMA_NULL)
10914  {
10915  ++pDefragmentationStats->deviceMemoryBlocksFreed;
10916  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
10917  }
10918 
10919  VmaVectorRemove(m_Blocks, blockIndex);
10920  pBlock->Destroy(m_hAllocator);
10921  vma_delete(m_hAllocator, pBlock);
10922  }
10923  else
10924  {
10925  m_HasEmptyBlock = true;
10926  }
10927  }
10928  }
10929 
10930  return result;
10931 }
10932 
10933 void VmaBlockVector::DestroyDefragmentator()
10934 {
10935  if(m_pDefragmentator != VMA_NULL)
10936  {
10937  vma_delete(m_hAllocator, m_pDefragmentator);
10938  m_pDefragmentator = VMA_NULL;
10939  }
10940 }
10941 
10942 void VmaBlockVector::MakePoolAllocationsLost(
10943  uint32_t currentFrameIndex,
10944  size_t* pLostAllocationCount)
10945 {
10946  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10947  size_t lostAllocationCount = 0;
10948  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10949  {
10950  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10951  VMA_ASSERT(pBlock);
10952  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10953  }
10954  if(pLostAllocationCount != VMA_NULL)
10955  {
10956  *pLostAllocationCount = lostAllocationCount;
10957  }
10958 }
10959 
10960 VkResult VmaBlockVector::CheckCorruption()
10961 {
10962  if(!IsCorruptionDetectionEnabled())
10963  {
10964  return VK_ERROR_FEATURE_NOT_PRESENT;
10965  }
10966 
10967  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10968  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10969  {
10970  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10971  VMA_ASSERT(pBlock);
10972  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10973  if(res != VK_SUCCESS)
10974  {
10975  return res;
10976  }
10977  }
10978  return VK_SUCCESS;
10979 }
10980 
10981 void VmaBlockVector::AddStats(VmaStats* pStats)
10982 {
10983  const uint32_t memTypeIndex = m_MemoryTypeIndex;
10984  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
10985 
10986  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10987 
10988  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10989  {
10990  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10991  VMA_ASSERT(pBlock);
10992  VMA_HEAVY_ASSERT(pBlock->Validate());
10993  VmaStatInfo allocationStatInfo;
10994  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
10995  VmaAddStatInfo(pStats->total, allocationStatInfo);
10996  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
10997  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
10998  }
10999 }
11000 
11002 // VmaDefragmentator members definition
11003 
11004 VmaDefragmentator::VmaDefragmentator(
11005  VmaAllocator hAllocator,
11006  VmaBlockVector* pBlockVector,
11007  uint32_t currentFrameIndex) :
11008  m_hAllocator(hAllocator),
11009  m_pBlockVector(pBlockVector),
11010  m_CurrentFrameIndex(currentFrameIndex),
11011  m_BytesMoved(0),
11012  m_AllocationsMoved(0),
11013  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11014  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11015 {
11016  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11017 }
11018 
11019 VmaDefragmentator::~VmaDefragmentator()
11020 {
11021  for(size_t i = m_Blocks.size(); i--; )
11022  {
11023  vma_delete(m_hAllocator, m_Blocks[i]);
11024  }
11025 }
11026 
11027 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11028 {
11029  AllocationInfo allocInfo;
11030  allocInfo.m_hAllocation = hAlloc;
11031  allocInfo.m_pChanged = pChanged;
11032  m_Allocations.push_back(allocInfo);
11033 }
11034 
11035 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11036 {
11037  // It has already been mapped for defragmentation.
11038  if(m_pMappedDataForDefragmentation)
11039  {
11040  *ppMappedData = m_pMappedDataForDefragmentation;
11041  return VK_SUCCESS;
11042  }
11043 
11044  // It is originally mapped.
11045  if(m_pBlock->GetMappedData())
11046  {
11047  *ppMappedData = m_pBlock->GetMappedData();
11048  return VK_SUCCESS;
11049  }
11050 
11051  // Map on first usage.
11052  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11053  *ppMappedData = m_pMappedDataForDefragmentation;
11054  return res;
11055 }
11056 
11057 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11058 {
11059  if(m_pMappedDataForDefragmentation != VMA_NULL)
11060  {
11061  m_pBlock->Unmap(hAllocator, 1);
11062  }
11063 }
11064 
11065 VkResult VmaDefragmentator::DefragmentRound(
11066  VkDeviceSize maxBytesToMove,
11067  uint32_t maxAllocationsToMove)
11068 {
11069  if(m_Blocks.empty())
11070  {
11071  return VK_SUCCESS;
11072  }
11073 
11074  size_t srcBlockIndex = m_Blocks.size() - 1;
11075  size_t srcAllocIndex = SIZE_MAX;
11076  for(;;)
11077  {
11078  // 1. Find next allocation to move.
11079  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11080  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11081  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11082  {
11083  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11084  {
11085  // Finished: no more allocations to process.
11086  if(srcBlockIndex == 0)
11087  {
11088  return VK_SUCCESS;
11089  }
11090  else
11091  {
11092  --srcBlockIndex;
11093  srcAllocIndex = SIZE_MAX;
11094  }
11095  }
11096  else
11097  {
11098  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11099  }
11100  }
11101 
11102  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11103  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11104 
11105  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11106  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11107  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11108  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11109 
11110  // 2. Try to find new place for this allocation in preceding or current block.
11111  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11112  {
11113  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11114  VmaAllocationRequest dstAllocRequest;
11115  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11116  m_CurrentFrameIndex,
11117  m_pBlockVector->GetFrameInUseCount(),
11118  m_pBlockVector->GetBufferImageGranularity(),
11119  size,
11120  alignment,
11121  false, // upperAddress
11122  suballocType,
11123  false, // canMakeOtherLost
11125  &dstAllocRequest) &&
11126  MoveMakesSense(
11127  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11128  {
11129  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11130 
11131  // Reached limit on number of allocations or bytes to move.
11132  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11133  (m_BytesMoved + size > maxBytesToMove))
11134  {
11135  return VK_INCOMPLETE;
11136  }
11137 
11138  void* pDstMappedData = VMA_NULL;
11139  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11140  if(res != VK_SUCCESS)
11141  {
11142  return res;
11143  }
11144 
11145  void* pSrcMappedData = VMA_NULL;
11146  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11147  if(res != VK_SUCCESS)
11148  {
11149  return res;
11150  }
11151 
11152  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11153  memcpy(
11154  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11155  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11156  static_cast<size_t>(size));
11157 
11158  if(VMA_DEBUG_MARGIN > 0)
11159  {
11160  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11161  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11162  }
11163 
11164  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11165  dstAllocRequest,
11166  suballocType,
11167  size,
11168  false, // upperAddress
11169  allocInfo.m_hAllocation);
11170  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11171 
11172  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11173 
11174  if(allocInfo.m_pChanged != VMA_NULL)
11175  {
11176  *allocInfo.m_pChanged = VK_TRUE;
11177  }
11178 
11179  ++m_AllocationsMoved;
11180  m_BytesMoved += size;
11181 
11182  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11183 
11184  break;
11185  }
11186  }
11187 
11188  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11189 
11190  if(srcAllocIndex > 0)
11191  {
11192  --srcAllocIndex;
11193  }
11194  else
11195  {
11196  if(srcBlockIndex > 0)
11197  {
11198  --srcBlockIndex;
11199  srcAllocIndex = SIZE_MAX;
11200  }
11201  else
11202  {
11203  return VK_SUCCESS;
11204  }
11205  }
11206  }
11207 }
11208 
11209 VkResult VmaDefragmentator::Defragment(
11210  VkDeviceSize maxBytesToMove,
11211  uint32_t maxAllocationsToMove)
11212 {
11213  if(m_Allocations.empty())
11214  {
11215  return VK_SUCCESS;
11216  }
11217 
11218  // Create block info for each block.
11219  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11220  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11221  {
11222  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11223  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11224  m_Blocks.push_back(pBlockInfo);
11225  }
11226 
11227  // Sort them by m_pBlock pointer value.
11228  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11229 
11230  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11231  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11232  {
11233  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11234  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11235  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11236  {
11237  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11238  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11239  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11240  {
11241  (*it)->m_Allocations.push_back(allocInfo);
11242  }
11243  else
11244  {
11245  VMA_ASSERT(0);
11246  }
11247  }
11248  }
11249  m_Allocations.clear();
11250 
11251  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11252  {
11253  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11254  pBlockInfo->CalcHasNonMovableAllocations();
11255  pBlockInfo->SortAllocationsBySizeDescecnding();
11256  }
11257 
11258  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11259  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11260 
11261  // Execute defragmentation rounds (the main part).
11262  VkResult result = VK_SUCCESS;
11263  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11264  {
11265  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11266  }
11267 
11268  // Unmap blocks that were mapped for defragmentation.
11269  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11270  {
11271  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11272  }
11273 
11274  return result;
11275 }
11276 
11277 bool VmaDefragmentator::MoveMakesSense(
11278  size_t dstBlockIndex, VkDeviceSize dstOffset,
11279  size_t srcBlockIndex, VkDeviceSize srcOffset)
11280 {
11281  if(dstBlockIndex < srcBlockIndex)
11282  {
11283  return true;
11284  }
11285  if(dstBlockIndex > srcBlockIndex)
11286  {
11287  return false;
11288  }
11289  if(dstOffset < srcOffset)
11290  {
11291  return true;
11292  }
11293  return false;
11294 }
11295 
11297 // VmaRecorder
11298 
11299 #if VMA_RECORDING_ENABLED
11300 
11301 VmaRecorder::VmaRecorder() :
11302  m_UseMutex(true),
11303  m_Flags(0),
11304  m_File(VMA_NULL),
11305  m_Freq(INT64_MAX),
11306  m_StartCounter(INT64_MAX)
11307 {
11308 }
11309 
11310 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11311 {
11312  m_UseMutex = useMutex;
11313  m_Flags = settings.flags;
11314 
11315  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11316  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11317 
11318  // Open file for writing.
11319  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11320  if(err != 0)
11321  {
11322  return VK_ERROR_INITIALIZATION_FAILED;
11323  }
11324 
11325  // Write header.
11326  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11327  fprintf(m_File, "%s\n", "1,3");
11328 
11329  return VK_SUCCESS;
11330 }
11331 
11332 VmaRecorder::~VmaRecorder()
11333 {
11334  if(m_File != VMA_NULL)
11335  {
11336  fclose(m_File);
11337  }
11338 }
11339 
11340 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11341 {
11342  CallParams callParams;
11343  GetBasicParams(callParams);
11344 
11345  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11346  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11347  Flush();
11348 }
11349 
11350 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11351 {
11352  CallParams callParams;
11353  GetBasicParams(callParams);
11354 
11355  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11356  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11357  Flush();
11358 }
11359 
11360 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11361 {
11362  CallParams callParams;
11363  GetBasicParams(callParams);
11364 
11365  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11366  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11367  createInfo.memoryTypeIndex,
11368  createInfo.flags,
11369  createInfo.blockSize,
11370  createInfo.minBlockCount,
11371  createInfo.maxBlockCount,
11372  createInfo.frameInUseCount,
11373  pool);
11374  Flush();
11375 }
11376 
11377 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11378 {
11379  CallParams callParams;
11380  GetBasicParams(callParams);
11381 
11382  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11383  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11384  pool);
11385  Flush();
11386 }
11387 
11388 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11389  const VkMemoryRequirements& vkMemReq,
11390  const VmaAllocationCreateInfo& createInfo,
11391  VmaAllocation allocation)
11392 {
11393  CallParams callParams;
11394  GetBasicParams(callParams);
11395 
11396  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11397  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11398  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11399  vkMemReq.size,
11400  vkMemReq.alignment,
11401  vkMemReq.memoryTypeBits,
11402  createInfo.flags,
11403  createInfo.usage,
11404  createInfo.requiredFlags,
11405  createInfo.preferredFlags,
11406  createInfo.memoryTypeBits,
11407  createInfo.pool,
11408  allocation,
11409  userDataStr.GetString());
11410  Flush();
11411 }
11412 
11413 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11414  const VkMemoryRequirements& vkMemReq,
11415  bool requiresDedicatedAllocation,
11416  bool prefersDedicatedAllocation,
11417  const VmaAllocationCreateInfo& createInfo,
11418  VmaAllocation allocation)
11419 {
11420  CallParams callParams;
11421  GetBasicParams(callParams);
11422 
11423  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11424  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11425  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11426  vkMemReq.size,
11427  vkMemReq.alignment,
11428  vkMemReq.memoryTypeBits,
11429  requiresDedicatedAllocation ? 1 : 0,
11430  prefersDedicatedAllocation ? 1 : 0,
11431  createInfo.flags,
11432  createInfo.usage,
11433  createInfo.requiredFlags,
11434  createInfo.preferredFlags,
11435  createInfo.memoryTypeBits,
11436  createInfo.pool,
11437  allocation,
11438  userDataStr.GetString());
11439  Flush();
11440 }
11441 
11442 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11443  const VkMemoryRequirements& vkMemReq,
11444  bool requiresDedicatedAllocation,
11445  bool prefersDedicatedAllocation,
11446  const VmaAllocationCreateInfo& createInfo,
11447  VmaAllocation allocation)
11448 {
11449  CallParams callParams;
11450  GetBasicParams(callParams);
11451 
11452  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11453  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11454  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11455  vkMemReq.size,
11456  vkMemReq.alignment,
11457  vkMemReq.memoryTypeBits,
11458  requiresDedicatedAllocation ? 1 : 0,
11459  prefersDedicatedAllocation ? 1 : 0,
11460  createInfo.flags,
11461  createInfo.usage,
11462  createInfo.requiredFlags,
11463  createInfo.preferredFlags,
11464  createInfo.memoryTypeBits,
11465  createInfo.pool,
11466  allocation,
11467  userDataStr.GetString());
11468  Flush();
11469 }
11470 
11471 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11472  VmaAllocation allocation)
11473 {
11474  CallParams callParams;
11475  GetBasicParams(callParams);
11476 
11477  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11478  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11479  allocation);
11480  Flush();
11481 }
11482 
11483 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11484  VmaAllocation allocation,
11485  const void* pUserData)
11486 {
11487  CallParams callParams;
11488  GetBasicParams(callParams);
11489 
11490  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11491  UserDataString userDataStr(
11492  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11493  pUserData);
11494  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11495  allocation,
11496  userDataStr.GetString());
11497  Flush();
11498 }
11499 
11500 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11501  VmaAllocation allocation)
11502 {
11503  CallParams callParams;
11504  GetBasicParams(callParams);
11505 
11506  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11507  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11508  allocation);
11509  Flush();
11510 }
11511 
11512 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11513  VmaAllocation allocation)
11514 {
11515  CallParams callParams;
11516  GetBasicParams(callParams);
11517 
11518  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11519  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11520  allocation);
11521  Flush();
11522 }
11523 
11524 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11525  VmaAllocation allocation)
11526 {
11527  CallParams callParams;
11528  GetBasicParams(callParams);
11529 
11530  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11531  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11532  allocation);
11533  Flush();
11534 }
11535 
11536 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11537  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11538 {
11539  CallParams callParams;
11540  GetBasicParams(callParams);
11541 
11542  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11543  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11544  allocation,
11545  offset,
11546  size);
11547  Flush();
11548 }
11549 
11550 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11551  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11552 {
11553  CallParams callParams;
11554  GetBasicParams(callParams);
11555 
11556  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11557  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11558  allocation,
11559  offset,
11560  size);
11561  Flush();
11562 }
11563 
11564 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11565  const VkBufferCreateInfo& bufCreateInfo,
11566  const VmaAllocationCreateInfo& allocCreateInfo,
11567  VmaAllocation allocation)
11568 {
11569  CallParams callParams;
11570  GetBasicParams(callParams);
11571 
11572  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11573  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11574  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11575  bufCreateInfo.flags,
11576  bufCreateInfo.size,
11577  bufCreateInfo.usage,
11578  bufCreateInfo.sharingMode,
11579  allocCreateInfo.flags,
11580  allocCreateInfo.usage,
11581  allocCreateInfo.requiredFlags,
11582  allocCreateInfo.preferredFlags,
11583  allocCreateInfo.memoryTypeBits,
11584  allocCreateInfo.pool,
11585  allocation,
11586  userDataStr.GetString());
11587  Flush();
11588 }
11589 
11590 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11591  const VkImageCreateInfo& imageCreateInfo,
11592  const VmaAllocationCreateInfo& allocCreateInfo,
11593  VmaAllocation allocation)
11594 {
11595  CallParams callParams;
11596  GetBasicParams(callParams);
11597 
11598  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11599  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11600  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11601  imageCreateInfo.flags,
11602  imageCreateInfo.imageType,
11603  imageCreateInfo.format,
11604  imageCreateInfo.extent.width,
11605  imageCreateInfo.extent.height,
11606  imageCreateInfo.extent.depth,
11607  imageCreateInfo.mipLevels,
11608  imageCreateInfo.arrayLayers,
11609  imageCreateInfo.samples,
11610  imageCreateInfo.tiling,
11611  imageCreateInfo.usage,
11612  imageCreateInfo.sharingMode,
11613  imageCreateInfo.initialLayout,
11614  allocCreateInfo.flags,
11615  allocCreateInfo.usage,
11616  allocCreateInfo.requiredFlags,
11617  allocCreateInfo.preferredFlags,
11618  allocCreateInfo.memoryTypeBits,
11619  allocCreateInfo.pool,
11620  allocation,
11621  userDataStr.GetString());
11622  Flush();
11623 }
11624 
11625 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11626  VmaAllocation allocation)
11627 {
11628  CallParams callParams;
11629  GetBasicParams(callParams);
11630 
11631  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11632  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11633  allocation);
11634  Flush();
11635 }
11636 
11637 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11638  VmaAllocation allocation)
11639 {
11640  CallParams callParams;
11641  GetBasicParams(callParams);
11642 
11643  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11644  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11645  allocation);
11646  Flush();
11647 }
11648 
11649 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11650  VmaAllocation allocation)
11651 {
11652  CallParams callParams;
11653  GetBasicParams(callParams);
11654 
11655  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11656  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11657  allocation);
11658  Flush();
11659 }
11660 
11661 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11662  VmaAllocation allocation)
11663 {
11664  CallParams callParams;
11665  GetBasicParams(callParams);
11666 
11667  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11668  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11669  allocation);
11670  Flush();
11671 }
11672 
11673 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11674  VmaPool pool)
11675 {
11676  CallParams callParams;
11677  GetBasicParams(callParams);
11678 
11679  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11680  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11681  pool);
11682  Flush();
11683 }
11684 
11685 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11686 {
11687  if(pUserData != VMA_NULL)
11688  {
11689  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11690  {
11691  m_Str = (const char*)pUserData;
11692  }
11693  else
11694  {
11695  sprintf_s(m_PtrStr, "%p", pUserData);
11696  m_Str = m_PtrStr;
11697  }
11698  }
11699  else
11700  {
11701  m_Str = "";
11702  }
11703 }
11704 
11705 void VmaRecorder::WriteConfiguration(
11706  const VkPhysicalDeviceProperties& devProps,
11707  const VkPhysicalDeviceMemoryProperties& memProps,
11708  bool dedicatedAllocationExtensionEnabled)
11709 {
11710  fprintf(m_File, "Config,Begin\n");
11711 
11712  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11713  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11714  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11715  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11716  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11717  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11718 
11719  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11720  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11721  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11722 
11723  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11724  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11725  {
11726  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11727  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11728  }
11729  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11730  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11731  {
11732  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11733  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11734  }
11735 
11736  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11737 
11738  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11739  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11740  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11741  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11742  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11743  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11744  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11745  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11746  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11747 
11748  fprintf(m_File, "Config,End\n");
11749 }
11750 
11751 void VmaRecorder::GetBasicParams(CallParams& outParams)
11752 {
11753  outParams.threadId = GetCurrentThreadId();
11754 
11755  LARGE_INTEGER counter;
11756  QueryPerformanceCounter(&counter);
11757  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11758 }
11759 
11760 void VmaRecorder::Flush()
11761 {
11762  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11763  {
11764  fflush(m_File);
11765  }
11766 }
11767 
11768 #endif // #if VMA_RECORDING_ENABLED
11769 
11771 // VmaAllocator_T
11772 
11773 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11774  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11775  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11776  m_hDevice(pCreateInfo->device),
11777  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11778  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11779  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11780  m_PreferredLargeHeapBlockSize(0),
11781  m_PhysicalDevice(pCreateInfo->physicalDevice),
11782  m_CurrentFrameIndex(0),
11783  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11784  m_NextPoolId(0)
11786  ,m_pRecorder(VMA_NULL)
11787 #endif
11788 {
11789  if(VMA_DEBUG_DETECT_CORRUPTION)
11790  {
11791  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11792  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11793  }
11794 
11795  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11796 
11797 #if !(VMA_DEDICATED_ALLOCATION)
11799  {
11800  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11801  }
11802 #endif
11803 
11804  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11805  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11806  memset(&m_MemProps, 0, sizeof(m_MemProps));
11807 
11808  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11809  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11810 
11811  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11812  {
11813  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11814  }
11815 
11816  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11817  {
11818  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11819  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11820  }
11821 
11822  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11823 
11824  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11825  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11826 
11827  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11828  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11829  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11830  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11831 
11832  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11833  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11834 
11835  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11836  {
11837  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
11838  {
11839  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
11840  if(limit != VK_WHOLE_SIZE)
11841  {
11842  m_HeapSizeLimit[heapIndex] = limit;
11843  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
11844  {
11845  m_MemProps.memoryHeaps[heapIndex].size = limit;
11846  }
11847  }
11848  }
11849  }
11850 
11851  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11852  {
11853  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
11854 
11855  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
11856  this,
11857  memTypeIndex,
11858  preferredBlockSize,
11859  0,
11860  SIZE_MAX,
11861  GetBufferImageGranularity(),
11862  pCreateInfo->frameInUseCount,
11863  false, // isCustomPool
11864  false, // explicitBlockSize
11865  false); // linearAlgorithm
11866  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
11867  // becase minBlockCount is 0.
11868  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
11869 
11870  }
11871 }
11872 
11873 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
11874 {
11875  VkResult res = VK_SUCCESS;
11876 
11877  if(pCreateInfo->pRecordSettings != VMA_NULL &&
11878  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
11879  {
11880 #if VMA_RECORDING_ENABLED
11881  m_pRecorder = vma_new(this, VmaRecorder)();
11882  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
11883  if(res != VK_SUCCESS)
11884  {
11885  return res;
11886  }
11887  m_pRecorder->WriteConfiguration(
11888  m_PhysicalDeviceProperties,
11889  m_MemProps,
11890  m_UseKhrDedicatedAllocation);
11891  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
11892 #else
11893  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
11894  return VK_ERROR_FEATURE_NOT_PRESENT;
11895 #endif
11896  }
11897 
11898  return res;
11899 }
11900 
11901 VmaAllocator_T::~VmaAllocator_T()
11902 {
11903 #if VMA_RECORDING_ENABLED
11904  if(m_pRecorder != VMA_NULL)
11905  {
11906  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
11907  vma_delete(this, m_pRecorder);
11908  }
11909 #endif
11910 
11911  VMA_ASSERT(m_Pools.empty());
11912 
11913  for(size_t i = GetMemoryTypeCount(); i--; )
11914  {
11915  vma_delete(this, m_pDedicatedAllocations[i]);
11916  vma_delete(this, m_pBlockVectors[i]);
11917  }
11918 }
11919 
11920 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
11921 {
11922 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11923  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
11924  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
11925  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
11926  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
11927  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
11928  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
11929  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
11930  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
11931  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
11932  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
11933  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
11934  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
11935  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
11936  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
11937  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
11938  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
11939 #if VMA_DEDICATED_ALLOCATION
11940  if(m_UseKhrDedicatedAllocation)
11941  {
11942  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
11943  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11944  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11945  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11946  }
11947 #endif // #if VMA_DEDICATED_ALLOCATION
11948 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11949 
11950 #define VMA_COPY_IF_NOT_NULL(funcName) \
11951  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11952 
11953  if(pVulkanFunctions != VMA_NULL)
11954  {
11955  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11956  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11957  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11958  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11959  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11960  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11961  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11962  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11963  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11964  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11965  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11966  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11967  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11968  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11969  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11970  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11971 #if VMA_DEDICATED_ALLOCATION
11972  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11973  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11974 #endif
11975  }
11976 
11977 #undef VMA_COPY_IF_NOT_NULL
11978 
11979  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
11980  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
11981  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
11982  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
11983  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
11984  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
11985  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
11986  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
11987  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
11988  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
11989  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
11990  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
11991  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
11992  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
11993  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
11994  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
11995  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
11996  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
11997 #if VMA_DEDICATED_ALLOCATION
11998  if(m_UseKhrDedicatedAllocation)
11999  {
12000  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12001  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12002  }
12003 #endif
12004 }
12005 
12006 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12007 {
12008  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12009  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12010  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12011  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12012 }
12013 
12014 VkResult VmaAllocator_T::AllocateMemoryOfType(
12015  VkDeviceSize size,
12016  VkDeviceSize alignment,
12017  bool dedicatedAllocation,
12018  VkBuffer dedicatedBuffer,
12019  VkImage dedicatedImage,
12020  const VmaAllocationCreateInfo& createInfo,
12021  uint32_t memTypeIndex,
12022  VmaSuballocationType suballocType,
12023  VmaAllocation* pAllocation)
12024 {
12025  VMA_ASSERT(pAllocation != VMA_NULL);
12026  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12027 
12028  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12029 
12030  // If memory type is not HOST_VISIBLE, disable MAPPED.
12031  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12032  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12033  {
12034  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12035  }
12036 
12037  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12038  VMA_ASSERT(blockVector);
12039 
12040  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12041  bool preferDedicatedMemory =
12042  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12043  dedicatedAllocation ||
12044  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12045  size > preferredBlockSize / 2;
12046 
12047  if(preferDedicatedMemory &&
12048  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12049  finalCreateInfo.pool == VK_NULL_HANDLE)
12050  {
12052  }
12053 
12054  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12055  {
12056  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12057  {
12058  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12059  }
12060  else
12061  {
12062  return AllocateDedicatedMemory(
12063  size,
12064  suballocType,
12065  memTypeIndex,
12066  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12067  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12068  finalCreateInfo.pUserData,
12069  dedicatedBuffer,
12070  dedicatedImage,
12071  pAllocation);
12072  }
12073  }
12074  else
12075  {
12076  VkResult res = blockVector->Allocate(
12077  VK_NULL_HANDLE, // hCurrentPool
12078  m_CurrentFrameIndex.load(),
12079  size,
12080  alignment,
12081  finalCreateInfo,
12082  suballocType,
12083  pAllocation);
12084  if(res == VK_SUCCESS)
12085  {
12086  return res;
12087  }
12088 
12089  // 5. Try dedicated memory.
12090  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12091  {
12092  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12093  }
12094  else
12095  {
12096  res = AllocateDedicatedMemory(
12097  size,
12098  suballocType,
12099  memTypeIndex,
12100  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12101  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12102  finalCreateInfo.pUserData,
12103  dedicatedBuffer,
12104  dedicatedImage,
12105  pAllocation);
12106  if(res == VK_SUCCESS)
12107  {
12108  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12109  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12110  return VK_SUCCESS;
12111  }
12112  else
12113  {
12114  // Everything failed: Return error code.
12115  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12116  return res;
12117  }
12118  }
12119  }
12120 }
12121 
12122 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12123  VkDeviceSize size,
12124  VmaSuballocationType suballocType,
12125  uint32_t memTypeIndex,
12126  bool map,
12127  bool isUserDataString,
12128  void* pUserData,
12129  VkBuffer dedicatedBuffer,
12130  VkImage dedicatedImage,
12131  VmaAllocation* pAllocation)
12132 {
12133  VMA_ASSERT(pAllocation);
12134 
12135  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12136  allocInfo.memoryTypeIndex = memTypeIndex;
12137  allocInfo.allocationSize = size;
12138 
12139 #if VMA_DEDICATED_ALLOCATION
12140  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12141  if(m_UseKhrDedicatedAllocation)
12142  {
12143  if(dedicatedBuffer != VK_NULL_HANDLE)
12144  {
12145  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12146  dedicatedAllocInfo.buffer = dedicatedBuffer;
12147  allocInfo.pNext = &dedicatedAllocInfo;
12148  }
12149  else if(dedicatedImage != VK_NULL_HANDLE)
12150  {
12151  dedicatedAllocInfo.image = dedicatedImage;
12152  allocInfo.pNext = &dedicatedAllocInfo;
12153  }
12154  }
12155 #endif // #if VMA_DEDICATED_ALLOCATION
12156 
12157  // Allocate VkDeviceMemory.
12158  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12159  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12160  if(res < 0)
12161  {
12162  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12163  return res;
12164  }
12165 
12166  void* pMappedData = VMA_NULL;
12167  if(map)
12168  {
12169  res = (*m_VulkanFunctions.vkMapMemory)(
12170  m_hDevice,
12171  hMemory,
12172  0,
12173  VK_WHOLE_SIZE,
12174  0,
12175  &pMappedData);
12176  if(res < 0)
12177  {
12178  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12179  FreeVulkanMemory(memTypeIndex, size, hMemory);
12180  return res;
12181  }
12182  }
12183 
12184  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12185  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12186  (*pAllocation)->SetUserData(this, pUserData);
12187  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12188  {
12189  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12190  }
12191 
12192  // Register it in m_pDedicatedAllocations.
12193  {
12194  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12195  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12196  VMA_ASSERT(pDedicatedAllocations);
12197  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12198  }
12199 
12200  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12201 
12202  return VK_SUCCESS;
12203 }
12204 
12205 void VmaAllocator_T::GetBufferMemoryRequirements(
12206  VkBuffer hBuffer,
12207  VkMemoryRequirements& memReq,
12208  bool& requiresDedicatedAllocation,
12209  bool& prefersDedicatedAllocation) const
12210 {
12211 #if VMA_DEDICATED_ALLOCATION
12212  if(m_UseKhrDedicatedAllocation)
12213  {
12214  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12215  memReqInfo.buffer = hBuffer;
12216 
12217  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12218 
12219  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12220  memReq2.pNext = &memDedicatedReq;
12221 
12222  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12223 
12224  memReq = memReq2.memoryRequirements;
12225  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12226  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12227  }
12228  else
12229 #endif // #if VMA_DEDICATED_ALLOCATION
12230  {
12231  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12232  requiresDedicatedAllocation = false;
12233  prefersDedicatedAllocation = false;
12234  }
12235 }
12236 
12237 void VmaAllocator_T::GetImageMemoryRequirements(
12238  VkImage hImage,
12239  VkMemoryRequirements& memReq,
12240  bool& requiresDedicatedAllocation,
12241  bool& prefersDedicatedAllocation) const
12242 {
12243 #if VMA_DEDICATED_ALLOCATION
12244  if(m_UseKhrDedicatedAllocation)
12245  {
12246  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12247  memReqInfo.image = hImage;
12248 
12249  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12250 
12251  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12252  memReq2.pNext = &memDedicatedReq;
12253 
12254  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12255 
12256  memReq = memReq2.memoryRequirements;
12257  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12258  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12259  }
12260  else
12261 #endif // #if VMA_DEDICATED_ALLOCATION
12262  {
12263  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12264  requiresDedicatedAllocation = false;
12265  prefersDedicatedAllocation = false;
12266  }
12267 }
12268 
12269 VkResult VmaAllocator_T::AllocateMemory(
12270  const VkMemoryRequirements& vkMemReq,
12271  bool requiresDedicatedAllocation,
12272  bool prefersDedicatedAllocation,
12273  VkBuffer dedicatedBuffer,
12274  VkImage dedicatedImage,
12275  const VmaAllocationCreateInfo& createInfo,
12276  VmaSuballocationType suballocType,
12277  VmaAllocation* pAllocation)
12278 {
12279  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12280 
12281  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12282  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12283  {
12284  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12285  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12286  }
12287  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12289  {
12290  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12291  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12292  }
12293  if(requiresDedicatedAllocation)
12294  {
12295  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12296  {
12297  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12298  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12299  }
12300  if(createInfo.pool != VK_NULL_HANDLE)
12301  {
12302  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12303  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12304  }
12305  }
12306  if((createInfo.pool != VK_NULL_HANDLE) &&
12307  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12308  {
12309  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12310  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12311  }
12312 
12313  if(createInfo.pool != VK_NULL_HANDLE)
12314  {
12315  const VkDeviceSize alignmentForPool = VMA_MAX(
12316  vkMemReq.alignment,
12317  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12318  return createInfo.pool->m_BlockVector.Allocate(
12319  createInfo.pool,
12320  m_CurrentFrameIndex.load(),
12321  vkMemReq.size,
12322  alignmentForPool,
12323  createInfo,
12324  suballocType,
12325  pAllocation);
12326  }
12327  else
12328  {
12329  // Bit mask of memory Vulkan types acceptable for this allocation.
12330  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12331  uint32_t memTypeIndex = UINT32_MAX;
12332  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12333  if(res == VK_SUCCESS)
12334  {
12335  VkDeviceSize alignmentForMemType = VMA_MAX(
12336  vkMemReq.alignment,
12337  GetMemoryTypeMinAlignment(memTypeIndex));
12338 
12339  res = AllocateMemoryOfType(
12340  vkMemReq.size,
12341  alignmentForMemType,
12342  requiresDedicatedAllocation || prefersDedicatedAllocation,
12343  dedicatedBuffer,
12344  dedicatedImage,
12345  createInfo,
12346  memTypeIndex,
12347  suballocType,
12348  pAllocation);
12349  // Succeeded on first try.
12350  if(res == VK_SUCCESS)
12351  {
12352  return res;
12353  }
12354  // Allocation from this memory type failed. Try other compatible memory types.
12355  else
12356  {
12357  for(;;)
12358  {
12359  // Remove old memTypeIndex from list of possibilities.
12360  memoryTypeBits &= ~(1u << memTypeIndex);
12361  // Find alternative memTypeIndex.
12362  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12363  if(res == VK_SUCCESS)
12364  {
12365  alignmentForMemType = VMA_MAX(
12366  vkMemReq.alignment,
12367  GetMemoryTypeMinAlignment(memTypeIndex));
12368 
12369  res = AllocateMemoryOfType(
12370  vkMemReq.size,
12371  alignmentForMemType,
12372  requiresDedicatedAllocation || prefersDedicatedAllocation,
12373  dedicatedBuffer,
12374  dedicatedImage,
12375  createInfo,
12376  memTypeIndex,
12377  suballocType,
12378  pAllocation);
12379  // Allocation from this alternative memory type succeeded.
12380  if(res == VK_SUCCESS)
12381  {
12382  return res;
12383  }
12384  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12385  }
12386  // No other matching memory type index could be found.
12387  else
12388  {
12389  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12390  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12391  }
12392  }
12393  }
12394  }
12395  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12396  else
12397  return res;
12398  }
12399 }
12400 
12401 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12402 {
12403  VMA_ASSERT(allocation);
12404 
12405  if(TouchAllocation(allocation))
12406  {
12407  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12408  {
12409  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12410  }
12411 
12412  switch(allocation->GetType())
12413  {
12414  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12415  {
12416  VmaBlockVector* pBlockVector = VMA_NULL;
12417  VmaPool hPool = allocation->GetPool();
12418  if(hPool != VK_NULL_HANDLE)
12419  {
12420  pBlockVector = &hPool->m_BlockVector;
12421  }
12422  else
12423  {
12424  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12425  pBlockVector = m_pBlockVectors[memTypeIndex];
12426  }
12427  pBlockVector->Free(allocation);
12428  }
12429  break;
12430  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12431  FreeDedicatedMemory(allocation);
12432  break;
12433  default:
12434  VMA_ASSERT(0);
12435  }
12436  }
12437 
12438  allocation->SetUserData(this, VMA_NULL);
12439  vma_delete(this, allocation);
12440 }
12441 
12442 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12443 {
12444  // Initialize.
12445  InitStatInfo(pStats->total);
12446  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12447  InitStatInfo(pStats->memoryType[i]);
12448  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12449  InitStatInfo(pStats->memoryHeap[i]);
12450 
12451  // Process default pools.
12452  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12453  {
12454  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12455  VMA_ASSERT(pBlockVector);
12456  pBlockVector->AddStats(pStats);
12457  }
12458 
12459  // Process custom pools.
12460  {
12461  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12462  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12463  {
12464  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12465  }
12466  }
12467 
12468  // Process dedicated allocations.
12469  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12470  {
12471  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12472  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12473  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12474  VMA_ASSERT(pDedicatedAllocVector);
12475  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12476  {
12477  VmaStatInfo allocationStatInfo;
12478  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12479  VmaAddStatInfo(pStats->total, allocationStatInfo);
12480  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12481  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12482  }
12483  }
12484 
12485  // Postprocess.
12486  VmaPostprocessCalcStatInfo(pStats->total);
12487  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12488  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12489  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12490  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12491 }
12492 
12493 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12494 
12495 VkResult VmaAllocator_T::Defragment(
12496  VmaAllocation* pAllocations,
12497  size_t allocationCount,
12498  VkBool32* pAllocationsChanged,
12499  const VmaDefragmentationInfo* pDefragmentationInfo,
12500  VmaDefragmentationStats* pDefragmentationStats)
12501 {
12502  if(pAllocationsChanged != VMA_NULL)
12503  {
12504  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
12505  }
12506  if(pDefragmentationStats != VMA_NULL)
12507  {
12508  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12509  }
12510 
12511  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12512 
12513  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12514 
12515  const size_t poolCount = m_Pools.size();
12516 
12517  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12518  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12519  {
12520  VmaAllocation hAlloc = pAllocations[allocIndex];
12521  VMA_ASSERT(hAlloc);
12522  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12523  // DedicatedAlloc cannot be defragmented.
12524  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12525  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12526  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12527  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12528  // Lost allocation cannot be defragmented.
12529  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12530  {
12531  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12532 
12533  const VmaPool hAllocPool = hAlloc->GetPool();
12534  // This allocation belongs to custom pool.
12535  if(hAllocPool != VK_NULL_HANDLE)
12536  {
12537  // Pools with linear or buddy algorithm are not defragmented.
12538  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12539  {
12540  pAllocBlockVector = &hAllocPool->m_BlockVector;
12541  }
12542  }
12543  // This allocation belongs to general pool.
12544  else
12545  {
12546  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12547  }
12548 
12549  if(pAllocBlockVector != VMA_NULL)
12550  {
12551  VmaDefragmentator* const pDefragmentator =
12552  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12553  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12554  &pAllocationsChanged[allocIndex] : VMA_NULL;
12555  pDefragmentator->AddAllocation(hAlloc, pChanged);
12556  }
12557  }
12558  }
12559 
12560  VkResult result = VK_SUCCESS;
12561 
12562  // ======== Main processing.
12563 
12564  VkDeviceSize maxBytesToMove = SIZE_MAX;
12565  uint32_t maxAllocationsToMove = UINT32_MAX;
12566  if(pDefragmentationInfo != VMA_NULL)
12567  {
12568  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12569  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12570  }
12571 
12572  // Process standard memory.
12573  for(uint32_t memTypeIndex = 0;
12574  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12575  ++memTypeIndex)
12576  {
12577  // Only HOST_VISIBLE memory types can be defragmented.
12578  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12579  {
12580  result = m_pBlockVectors[memTypeIndex]->Defragment(
12581  pDefragmentationStats,
12582  maxBytesToMove,
12583  maxAllocationsToMove);
12584  }
12585  }
12586 
12587  // Process custom pools.
12588  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12589  {
12590  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12591  pDefragmentationStats,
12592  maxBytesToMove,
12593  maxAllocationsToMove);
12594  }
12595 
12596  // ======== Destroy defragmentators.
12597 
12598  // Process custom pools.
12599  for(size_t poolIndex = poolCount; poolIndex--; )
12600  {
12601  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12602  }
12603 
12604  // Process standard memory.
12605  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12606  {
12607  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12608  {
12609  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12610  }
12611  }
12612 
12613  return result;
12614 }
12615 
12616 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12617 {
12618  if(hAllocation->CanBecomeLost())
12619  {
12620  /*
12621  Warning: This is a carefully designed algorithm.
12622  Do not modify unless you really know what you're doing :)
12623  */
12624  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12625  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12626  for(;;)
12627  {
12628  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12629  {
12630  pAllocationInfo->memoryType = UINT32_MAX;
12631  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12632  pAllocationInfo->offset = 0;
12633  pAllocationInfo->size = hAllocation->GetSize();
12634  pAllocationInfo->pMappedData = VMA_NULL;
12635  pAllocationInfo->pUserData = hAllocation->GetUserData();
12636  return;
12637  }
12638  else if(localLastUseFrameIndex == localCurrFrameIndex)
12639  {
12640  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12641  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12642  pAllocationInfo->offset = hAllocation->GetOffset();
12643  pAllocationInfo->size = hAllocation->GetSize();
12644  pAllocationInfo->pMappedData = VMA_NULL;
12645  pAllocationInfo->pUserData = hAllocation->GetUserData();
12646  return;
12647  }
12648  else // Last use time earlier than current time.
12649  {
12650  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12651  {
12652  localLastUseFrameIndex = localCurrFrameIndex;
12653  }
12654  }
12655  }
12656  }
12657  else
12658  {
12659 #if VMA_STATS_STRING_ENABLED
12660  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12661  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12662  for(;;)
12663  {
12664  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12665  if(localLastUseFrameIndex == localCurrFrameIndex)
12666  {
12667  break;
12668  }
12669  else // Last use time earlier than current time.
12670  {
12671  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12672  {
12673  localLastUseFrameIndex = localCurrFrameIndex;
12674  }
12675  }
12676  }
12677 #endif
12678 
12679  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12680  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12681  pAllocationInfo->offset = hAllocation->GetOffset();
12682  pAllocationInfo->size = hAllocation->GetSize();
12683  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12684  pAllocationInfo->pUserData = hAllocation->GetUserData();
12685  }
12686 }
12687 
12688 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12689 {
12690  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12691  if(hAllocation->CanBecomeLost())
12692  {
12693  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12694  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12695  for(;;)
12696  {
12697  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12698  {
12699  return false;
12700  }
12701  else if(localLastUseFrameIndex == localCurrFrameIndex)
12702  {
12703  return true;
12704  }
12705  else // Last use time earlier than current time.
12706  {
12707  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12708  {
12709  localLastUseFrameIndex = localCurrFrameIndex;
12710  }
12711  }
12712  }
12713  }
12714  else
12715  {
12716 #if VMA_STATS_STRING_ENABLED
12717  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12718  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12719  for(;;)
12720  {
12721  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12722  if(localLastUseFrameIndex == localCurrFrameIndex)
12723  {
12724  break;
12725  }
12726  else // Last use time earlier than current time.
12727  {
12728  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12729  {
12730  localLastUseFrameIndex = localCurrFrameIndex;
12731  }
12732  }
12733  }
12734 #endif
12735 
12736  return true;
12737  }
12738 }
12739 
12740 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12741 {
12742  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12743 
12744  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12745 
12746  if(newCreateInfo.maxBlockCount == 0)
12747  {
12748  newCreateInfo.maxBlockCount = SIZE_MAX;
12749  }
12750  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12751  {
12752  return VK_ERROR_INITIALIZATION_FAILED;
12753  }
12754 
12755  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12756 
12757  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12758 
12759  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12760  if(res != VK_SUCCESS)
12761  {
12762  vma_delete(this, *pPool);
12763  *pPool = VMA_NULL;
12764  return res;
12765  }
12766 
12767  // Add to m_Pools.
12768  {
12769  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12770  (*pPool)->SetId(m_NextPoolId++);
12771  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12772  }
12773 
12774  return VK_SUCCESS;
12775 }
12776 
12777 void VmaAllocator_T::DestroyPool(VmaPool pool)
12778 {
12779  // Remove from m_Pools.
12780  {
12781  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12782  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
12783  VMA_ASSERT(success && "Pool not found in Allocator.");
12784  }
12785 
12786  vma_delete(this, pool);
12787 }
12788 
12789 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
12790 {
12791  pool->m_BlockVector.GetPoolStats(pPoolStats);
12792 }
12793 
12794 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
12795 {
12796  m_CurrentFrameIndex.store(frameIndex);
12797 }
12798 
12799 void VmaAllocator_T::MakePoolAllocationsLost(
12800  VmaPool hPool,
12801  size_t* pLostAllocationCount)
12802 {
12803  hPool->m_BlockVector.MakePoolAllocationsLost(
12804  m_CurrentFrameIndex.load(),
12805  pLostAllocationCount);
12806 }
12807 
12808 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
12809 {
12810  return hPool->m_BlockVector.CheckCorruption();
12811 }
12812 
12813 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
12814 {
12815  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
12816 
12817  // Process default pools.
12818  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12819  {
12820  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
12821  {
12822  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12823  VMA_ASSERT(pBlockVector);
12824  VkResult localRes = pBlockVector->CheckCorruption();
12825  switch(localRes)
12826  {
12827  case VK_ERROR_FEATURE_NOT_PRESENT:
12828  break;
12829  case VK_SUCCESS:
12830  finalRes = VK_SUCCESS;
12831  break;
12832  default:
12833  return localRes;
12834  }
12835  }
12836  }
12837 
12838  // Process custom pools.
12839  {
12840  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12841  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12842  {
12843  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
12844  {
12845  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
12846  switch(localRes)
12847  {
12848  case VK_ERROR_FEATURE_NOT_PRESENT:
12849  break;
12850  case VK_SUCCESS:
12851  finalRes = VK_SUCCESS;
12852  break;
12853  default:
12854  return localRes;
12855  }
12856  }
12857  }
12858  }
12859 
12860  return finalRes;
12861 }
12862 
12863 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
12864 {
12865  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
12866  (*pAllocation)->InitLost();
12867 }
12868 
12869 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
12870 {
12871  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
12872 
12873  VkResult res;
12874  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12875  {
12876  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12877  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
12878  {
12879  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12880  if(res == VK_SUCCESS)
12881  {
12882  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
12883  }
12884  }
12885  else
12886  {
12887  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
12888  }
12889  }
12890  else
12891  {
12892  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12893  }
12894 
12895  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
12896  {
12897  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
12898  }
12899 
12900  return res;
12901 }
12902 
12903 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
12904 {
12905  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
12906  {
12907  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
12908  }
12909 
12910  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
12911 
12912  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
12913  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12914  {
12915  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12916  m_HeapSizeLimit[heapIndex] += size;
12917  }
12918 }
12919 
12920 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
12921 {
12922  if(hAllocation->CanBecomeLost())
12923  {
12924  return VK_ERROR_MEMORY_MAP_FAILED;
12925  }
12926 
12927  switch(hAllocation->GetType())
12928  {
12929  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12930  {
12931  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12932  char *pBytes = VMA_NULL;
12933  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
12934  if(res == VK_SUCCESS)
12935  {
12936  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
12937  hAllocation->BlockAllocMap();
12938  }
12939  return res;
12940  }
12941  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12942  return hAllocation->DedicatedAllocMap(this, ppData);
12943  default:
12944  VMA_ASSERT(0);
12945  return VK_ERROR_MEMORY_MAP_FAILED;
12946  }
12947 }
12948 
12949 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12950 {
12951  switch(hAllocation->GetType())
12952  {
12953  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12954  {
12955  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12956  hAllocation->BlockAllocUnmap();
12957  pBlock->Unmap(this, 1);
12958  }
12959  break;
12960  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12961  hAllocation->DedicatedAllocUnmap(this);
12962  break;
12963  default:
12964  VMA_ASSERT(0);
12965  }
12966 }
12967 
12968 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12969 {
12970  VkResult res = VK_SUCCESS;
12971  switch(hAllocation->GetType())
12972  {
12973  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12974  res = GetVulkanFunctions().vkBindBufferMemory(
12975  m_hDevice,
12976  hBuffer,
12977  hAllocation->GetMemory(),
12978  0); //memoryOffset
12979  break;
12980  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12981  {
12982  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12983  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
12984  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
12985  break;
12986  }
12987  default:
12988  VMA_ASSERT(0);
12989  }
12990  return res;
12991 }
12992 
12993 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
12994 {
12995  VkResult res = VK_SUCCESS;
12996  switch(hAllocation->GetType())
12997  {
12998  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12999  res = GetVulkanFunctions().vkBindImageMemory(
13000  m_hDevice,
13001  hImage,
13002  hAllocation->GetMemory(),
13003  0); //memoryOffset
13004  break;
13005  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13006  {
13007  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13008  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13009  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13010  break;
13011  }
13012  default:
13013  VMA_ASSERT(0);
13014  }
13015  return res;
13016 }
13017 
13018 void VmaAllocator_T::FlushOrInvalidateAllocation(
13019  VmaAllocation hAllocation,
13020  VkDeviceSize offset, VkDeviceSize size,
13021  VMA_CACHE_OPERATION op)
13022 {
13023  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13024  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13025  {
13026  const VkDeviceSize allocationSize = hAllocation->GetSize();
13027  VMA_ASSERT(offset <= allocationSize);
13028 
13029  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13030 
13031  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13032  memRange.memory = hAllocation->GetMemory();
13033 
13034  switch(hAllocation->GetType())
13035  {
13036  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13037  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13038  if(size == VK_WHOLE_SIZE)
13039  {
13040  memRange.size = allocationSize - memRange.offset;
13041  }
13042  else
13043  {
13044  VMA_ASSERT(offset + size <= allocationSize);
13045  memRange.size = VMA_MIN(
13046  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13047  allocationSize - memRange.offset);
13048  }
13049  break;
13050 
13051  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13052  {
13053  // 1. Still within this allocation.
13054  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13055  if(size == VK_WHOLE_SIZE)
13056  {
13057  size = allocationSize - offset;
13058  }
13059  else
13060  {
13061  VMA_ASSERT(offset + size <= allocationSize);
13062  }
13063  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13064 
13065  // 2. Adjust to whole block.
13066  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13067  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13068  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13069  memRange.offset += allocationOffset;
13070  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13071 
13072  break;
13073  }
13074 
13075  default:
13076  VMA_ASSERT(0);
13077  }
13078 
13079  switch(op)
13080  {
13081  case VMA_CACHE_FLUSH:
13082  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13083  break;
13084  case VMA_CACHE_INVALIDATE:
13085  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13086  break;
13087  default:
13088  VMA_ASSERT(0);
13089  }
13090  }
13091  // else: Just ignore this call.
13092 }
13093 
13094 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13095 {
13096  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13097 
13098  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13099  {
13100  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13101  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13102  VMA_ASSERT(pDedicatedAllocations);
13103  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13104  VMA_ASSERT(success);
13105  }
13106 
13107  VkDeviceMemory hMemory = allocation->GetMemory();
13108 
13109  if(allocation->GetMappedData() != VMA_NULL)
13110  {
13111  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13112  }
13113 
13114  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13115 
13116  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13117 }
13118 
13119 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13120 {
13121  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13122  !hAllocation->CanBecomeLost() &&
13123  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13124  {
13125  void* pData = VMA_NULL;
13126  VkResult res = Map(hAllocation, &pData);
13127  if(res == VK_SUCCESS)
13128  {
13129  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13130  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13131  Unmap(hAllocation);
13132  }
13133  else
13134  {
13135  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13136  }
13137  }
13138 }
13139 
13140 #if VMA_STATS_STRING_ENABLED
13141 
13142 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13143 {
13144  bool dedicatedAllocationsStarted = false;
13145  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13146  {
13147  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13148  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13149  VMA_ASSERT(pDedicatedAllocVector);
13150  if(pDedicatedAllocVector->empty() == false)
13151  {
13152  if(dedicatedAllocationsStarted == false)
13153  {
13154  dedicatedAllocationsStarted = true;
13155  json.WriteString("DedicatedAllocations");
13156  json.BeginObject();
13157  }
13158 
13159  json.BeginString("Type ");
13160  json.ContinueString(memTypeIndex);
13161  json.EndString();
13162 
13163  json.BeginArray();
13164 
13165  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13166  {
13167  json.BeginObject(true);
13168  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13169  hAlloc->PrintParameters(json);
13170  json.EndObject();
13171  }
13172 
13173  json.EndArray();
13174  }
13175  }
13176  if(dedicatedAllocationsStarted)
13177  {
13178  json.EndObject();
13179  }
13180 
13181  {
13182  bool allocationsStarted = false;
13183  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13184  {
13185  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13186  {
13187  if(allocationsStarted == false)
13188  {
13189  allocationsStarted = true;
13190  json.WriteString("DefaultPools");
13191  json.BeginObject();
13192  }
13193 
13194  json.BeginString("Type ");
13195  json.ContinueString(memTypeIndex);
13196  json.EndString();
13197 
13198  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13199  }
13200  }
13201  if(allocationsStarted)
13202  {
13203  json.EndObject();
13204  }
13205  }
13206 
13207  // Custom pools
13208  {
13209  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13210  const size_t poolCount = m_Pools.size();
13211  if(poolCount > 0)
13212  {
13213  json.WriteString("Pools");
13214  json.BeginObject();
13215  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13216  {
13217  json.BeginString();
13218  json.ContinueString(m_Pools[poolIndex]->GetId());
13219  json.EndString();
13220 
13221  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13222  }
13223  json.EndObject();
13224  }
13225  }
13226 }
13227 
13228 #endif // #if VMA_STATS_STRING_ENABLED
13229 
13231 // Public interface
13232 
13233 VkResult vmaCreateAllocator(
13234  const VmaAllocatorCreateInfo* pCreateInfo,
13235  VmaAllocator* pAllocator)
13236 {
13237  VMA_ASSERT(pCreateInfo && pAllocator);
13238  VMA_DEBUG_LOG("vmaCreateAllocator");
13239  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13240  return (*pAllocator)->Init(pCreateInfo);
13241 }
13242 
13243 void vmaDestroyAllocator(
13244  VmaAllocator allocator)
13245 {
13246  if(allocator != VK_NULL_HANDLE)
13247  {
13248  VMA_DEBUG_LOG("vmaDestroyAllocator");
13249  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13250  vma_delete(&allocationCallbacks, allocator);
13251  }
13252 }
13253 
13255  VmaAllocator allocator,
13256  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13257 {
13258  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13259  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13260 }
13261 
13263  VmaAllocator allocator,
13264  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13265 {
13266  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13267  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13268 }
13269 
13271  VmaAllocator allocator,
13272  uint32_t memoryTypeIndex,
13273  VkMemoryPropertyFlags* pFlags)
13274 {
13275  VMA_ASSERT(allocator && pFlags);
13276  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13277  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13278 }
13279 
13281  VmaAllocator allocator,
13282  uint32_t frameIndex)
13283 {
13284  VMA_ASSERT(allocator);
13285  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13286 
13287  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13288 
13289  allocator->SetCurrentFrameIndex(frameIndex);
13290 }
13291 
13292 void vmaCalculateStats(
13293  VmaAllocator allocator,
13294  VmaStats* pStats)
13295 {
13296  VMA_ASSERT(allocator && pStats);
13297  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13298  allocator->CalculateStats(pStats);
13299 }
13300 
13301 #if VMA_STATS_STRING_ENABLED
13302 
13303 void vmaBuildStatsString(
13304  VmaAllocator allocator,
13305  char** ppStatsString,
13306  VkBool32 detailedMap)
13307 {
13308  VMA_ASSERT(allocator && ppStatsString);
13309  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13310 
13311  VmaStringBuilder sb(allocator);
13312  {
13313  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13314  json.BeginObject();
13315 
13316  VmaStats stats;
13317  allocator->CalculateStats(&stats);
13318 
13319  json.WriteString("Total");
13320  VmaPrintStatInfo(json, stats.total);
13321 
13322  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13323  {
13324  json.BeginString("Heap ");
13325  json.ContinueString(heapIndex);
13326  json.EndString();
13327  json.BeginObject();
13328 
13329  json.WriteString("Size");
13330  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13331 
13332  json.WriteString("Flags");
13333  json.BeginArray(true);
13334  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13335  {
13336  json.WriteString("DEVICE_LOCAL");
13337  }
13338  json.EndArray();
13339 
13340  if(stats.memoryHeap[heapIndex].blockCount > 0)
13341  {
13342  json.WriteString("Stats");
13343  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13344  }
13345 
13346  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13347  {
13348  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13349  {
13350  json.BeginString("Type ");
13351  json.ContinueString(typeIndex);
13352  json.EndString();
13353 
13354  json.BeginObject();
13355 
13356  json.WriteString("Flags");
13357  json.BeginArray(true);
13358  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13359  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13360  {
13361  json.WriteString("DEVICE_LOCAL");
13362  }
13363  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13364  {
13365  json.WriteString("HOST_VISIBLE");
13366  }
13367  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13368  {
13369  json.WriteString("HOST_COHERENT");
13370  }
13371  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13372  {
13373  json.WriteString("HOST_CACHED");
13374  }
13375  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13376  {
13377  json.WriteString("LAZILY_ALLOCATED");
13378  }
13379  json.EndArray();
13380 
13381  if(stats.memoryType[typeIndex].blockCount > 0)
13382  {
13383  json.WriteString("Stats");
13384  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13385  }
13386 
13387  json.EndObject();
13388  }
13389  }
13390 
13391  json.EndObject();
13392  }
13393  if(detailedMap == VK_TRUE)
13394  {
13395  allocator->PrintDetailedMap(json);
13396  }
13397 
13398  json.EndObject();
13399  }
13400 
13401  const size_t len = sb.GetLength();
13402  char* const pChars = vma_new_array(allocator, char, len + 1);
13403  if(len > 0)
13404  {
13405  memcpy(pChars, sb.GetData(), len);
13406  }
13407  pChars[len] = '\0';
13408  *ppStatsString = pChars;
13409 }
13410 
13411 void vmaFreeStatsString(
13412  VmaAllocator allocator,
13413  char* pStatsString)
13414 {
13415  if(pStatsString != VMA_NULL)
13416  {
13417  VMA_ASSERT(allocator);
13418  size_t len = strlen(pStatsString);
13419  vma_delete_array(allocator, pStatsString, len + 1);
13420  }
13421 }
13422 
13423 #endif // #if VMA_STATS_STRING_ENABLED
13424 
13425 /*
13426 This function is not protected by any mutex because it just reads immutable data.
13427 */
13428 VkResult vmaFindMemoryTypeIndex(
13429  VmaAllocator allocator,
13430  uint32_t memoryTypeBits,
13431  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13432  uint32_t* pMemoryTypeIndex)
13433 {
13434  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13435  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13436  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13437 
13438  if(pAllocationCreateInfo->memoryTypeBits != 0)
13439  {
13440  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13441  }
13442 
13443  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13444  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13445 
13446  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13447  if(mapped)
13448  {
13449  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13450  }
13451 
13452  // Convert usage to requiredFlags and preferredFlags.
13453  switch(pAllocationCreateInfo->usage)
13454  {
13456  break;
13458  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13459  {
13460  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13461  }
13462  break;
13464  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13465  break;
13467  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13468  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13469  {
13470  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13471  }
13472  break;
13474  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13475  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13476  break;
13477  default:
13478  break;
13479  }
13480 
13481  *pMemoryTypeIndex = UINT32_MAX;
13482  uint32_t minCost = UINT32_MAX;
13483  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13484  memTypeIndex < allocator->GetMemoryTypeCount();
13485  ++memTypeIndex, memTypeBit <<= 1)
13486  {
13487  // This memory type is acceptable according to memoryTypeBits bitmask.
13488  if((memTypeBit & memoryTypeBits) != 0)
13489  {
13490  const VkMemoryPropertyFlags currFlags =
13491  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13492  // This memory type contains requiredFlags.
13493  if((requiredFlags & ~currFlags) == 0)
13494  {
13495  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13496  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13497  // Remember memory type with lowest cost.
13498  if(currCost < minCost)
13499  {
13500  *pMemoryTypeIndex = memTypeIndex;
13501  if(currCost == 0)
13502  {
13503  return VK_SUCCESS;
13504  }
13505  minCost = currCost;
13506  }
13507  }
13508  }
13509  }
13510  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13511 }
13512 
13514  VmaAllocator allocator,
13515  const VkBufferCreateInfo* pBufferCreateInfo,
13516  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13517  uint32_t* pMemoryTypeIndex)
13518 {
13519  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13520  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13521  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13522  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13523 
13524  const VkDevice hDev = allocator->m_hDevice;
13525  VkBuffer hBuffer = VK_NULL_HANDLE;
13526  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13527  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13528  if(res == VK_SUCCESS)
13529  {
13530  VkMemoryRequirements memReq = {};
13531  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13532  hDev, hBuffer, &memReq);
13533 
13534  res = vmaFindMemoryTypeIndex(
13535  allocator,
13536  memReq.memoryTypeBits,
13537  pAllocationCreateInfo,
13538  pMemoryTypeIndex);
13539 
13540  allocator->GetVulkanFunctions().vkDestroyBuffer(
13541  hDev, hBuffer, allocator->GetAllocationCallbacks());
13542  }
13543  return res;
13544 }
13545 
13547  VmaAllocator allocator,
13548  const VkImageCreateInfo* pImageCreateInfo,
13549  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13550  uint32_t* pMemoryTypeIndex)
13551 {
13552  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13553  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13554  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13555  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13556 
13557  const VkDevice hDev = allocator->m_hDevice;
13558  VkImage hImage = VK_NULL_HANDLE;
13559  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13560  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13561  if(res == VK_SUCCESS)
13562  {
13563  VkMemoryRequirements memReq = {};
13564  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13565  hDev, hImage, &memReq);
13566 
13567  res = vmaFindMemoryTypeIndex(
13568  allocator,
13569  memReq.memoryTypeBits,
13570  pAllocationCreateInfo,
13571  pMemoryTypeIndex);
13572 
13573  allocator->GetVulkanFunctions().vkDestroyImage(
13574  hDev, hImage, allocator->GetAllocationCallbacks());
13575  }
13576  return res;
13577 }
13578 
13579 VkResult vmaCreatePool(
13580  VmaAllocator allocator,
13581  const VmaPoolCreateInfo* pCreateInfo,
13582  VmaPool* pPool)
13583 {
13584  VMA_ASSERT(allocator && pCreateInfo && pPool);
13585 
13586  VMA_DEBUG_LOG("vmaCreatePool");
13587 
13588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13589 
13590  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13591 
13592 #if VMA_RECORDING_ENABLED
13593  if(allocator->GetRecorder() != VMA_NULL)
13594  {
13595  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13596  }
13597 #endif
13598 
13599  return res;
13600 }
13601 
13602 void vmaDestroyPool(
13603  VmaAllocator allocator,
13604  VmaPool pool)
13605 {
13606  VMA_ASSERT(allocator);
13607 
13608  if(pool == VK_NULL_HANDLE)
13609  {
13610  return;
13611  }
13612 
13613  VMA_DEBUG_LOG("vmaDestroyPool");
13614 
13615  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13616 
13617 #if VMA_RECORDING_ENABLED
13618  if(allocator->GetRecorder() != VMA_NULL)
13619  {
13620  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13621  }
13622 #endif
13623 
13624  allocator->DestroyPool(pool);
13625 }
13626 
13627 void vmaGetPoolStats(
13628  VmaAllocator allocator,
13629  VmaPool pool,
13630  VmaPoolStats* pPoolStats)
13631 {
13632  VMA_ASSERT(allocator && pool && pPoolStats);
13633 
13634  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13635 
13636  allocator->GetPoolStats(pool, pPoolStats);
13637 }
13638 
13640  VmaAllocator allocator,
13641  VmaPool pool,
13642  size_t* pLostAllocationCount)
13643 {
13644  VMA_ASSERT(allocator && pool);
13645 
13646  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13647 
13648 #if VMA_RECORDING_ENABLED
13649  if(allocator->GetRecorder() != VMA_NULL)
13650  {
13651  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13652  }
13653 #endif
13654 
13655  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13656 }
13657 
13658 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13659 {
13660  VMA_ASSERT(allocator && pool);
13661 
13662  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13663 
13664  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13665 
13666  return allocator->CheckPoolCorruption(pool);
13667 }
13668 
13669 VkResult vmaAllocateMemory(
13670  VmaAllocator allocator,
13671  const VkMemoryRequirements* pVkMemoryRequirements,
13672  const VmaAllocationCreateInfo* pCreateInfo,
13673  VmaAllocation* pAllocation,
13674  VmaAllocationInfo* pAllocationInfo)
13675 {
13676  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13677 
13678  VMA_DEBUG_LOG("vmaAllocateMemory");
13679 
13680  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13681 
13682  VkResult result = allocator->AllocateMemory(
13683  *pVkMemoryRequirements,
13684  false, // requiresDedicatedAllocation
13685  false, // prefersDedicatedAllocation
13686  VK_NULL_HANDLE, // dedicatedBuffer
13687  VK_NULL_HANDLE, // dedicatedImage
13688  *pCreateInfo,
13689  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13690  pAllocation);
13691 
13692 #if VMA_RECORDING_ENABLED
13693  if(allocator->GetRecorder() != VMA_NULL)
13694  {
13695  allocator->GetRecorder()->RecordAllocateMemory(
13696  allocator->GetCurrentFrameIndex(),
13697  *pVkMemoryRequirements,
13698  *pCreateInfo,
13699  *pAllocation);
13700  }
13701 #endif
13702 
13703  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13704  {
13705  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13706  }
13707 
13708  return result;
13709 }
13710 
13712  VmaAllocator allocator,
13713  VkBuffer buffer,
13714  const VmaAllocationCreateInfo* pCreateInfo,
13715  VmaAllocation* pAllocation,
13716  VmaAllocationInfo* pAllocationInfo)
13717 {
13718  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13719 
13720  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13721 
13722  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13723 
13724  VkMemoryRequirements vkMemReq = {};
13725  bool requiresDedicatedAllocation = false;
13726  bool prefersDedicatedAllocation = false;
13727  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
13728  requiresDedicatedAllocation,
13729  prefersDedicatedAllocation);
13730 
13731  VkResult result = allocator->AllocateMemory(
13732  vkMemReq,
13733  requiresDedicatedAllocation,
13734  prefersDedicatedAllocation,
13735  buffer, // dedicatedBuffer
13736  VK_NULL_HANDLE, // dedicatedImage
13737  *pCreateInfo,
13738  VMA_SUBALLOCATION_TYPE_BUFFER,
13739  pAllocation);
13740 
13741 #if VMA_RECORDING_ENABLED
13742  if(allocator->GetRecorder() != VMA_NULL)
13743  {
13744  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
13745  allocator->GetCurrentFrameIndex(),
13746  vkMemReq,
13747  requiresDedicatedAllocation,
13748  prefersDedicatedAllocation,
13749  *pCreateInfo,
13750  *pAllocation);
13751  }
13752 #endif
13753 
13754  if(pAllocationInfo && result == VK_SUCCESS)
13755  {
13756  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13757  }
13758 
13759  return result;
13760 }
13761 
13762 VkResult vmaAllocateMemoryForImage(
13763  VmaAllocator allocator,
13764  VkImage image,
13765  const VmaAllocationCreateInfo* pCreateInfo,
13766  VmaAllocation* pAllocation,
13767  VmaAllocationInfo* pAllocationInfo)
13768 {
13769  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13770 
13771  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
13772 
13773  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13774 
13775  VkMemoryRequirements vkMemReq = {};
13776  bool requiresDedicatedAllocation = false;
13777  bool prefersDedicatedAllocation = false;
13778  allocator->GetImageMemoryRequirements(image, vkMemReq,
13779  requiresDedicatedAllocation, prefersDedicatedAllocation);
13780 
13781  VkResult result = allocator->AllocateMemory(
13782  vkMemReq,
13783  requiresDedicatedAllocation,
13784  prefersDedicatedAllocation,
13785  VK_NULL_HANDLE, // dedicatedBuffer
13786  image, // dedicatedImage
13787  *pCreateInfo,
13788  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
13789  pAllocation);
13790 
13791 #if VMA_RECORDING_ENABLED
13792  if(allocator->GetRecorder() != VMA_NULL)
13793  {
13794  allocator->GetRecorder()->RecordAllocateMemoryForImage(
13795  allocator->GetCurrentFrameIndex(),
13796  vkMemReq,
13797  requiresDedicatedAllocation,
13798  prefersDedicatedAllocation,
13799  *pCreateInfo,
13800  *pAllocation);
13801  }
13802 #endif
13803 
13804  if(pAllocationInfo && result == VK_SUCCESS)
13805  {
13806  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13807  }
13808 
13809  return result;
13810 }
13811 
13812 void vmaFreeMemory(
13813  VmaAllocator allocator,
13814  VmaAllocation allocation)
13815 {
13816  VMA_ASSERT(allocator);
13817 
13818  if(allocation == VK_NULL_HANDLE)
13819  {
13820  return;
13821  }
13822 
13823  VMA_DEBUG_LOG("vmaFreeMemory");
13824 
13825  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13826 
13827 #if VMA_RECORDING_ENABLED
13828  if(allocator->GetRecorder() != VMA_NULL)
13829  {
13830  allocator->GetRecorder()->RecordFreeMemory(
13831  allocator->GetCurrentFrameIndex(),
13832  allocation);
13833  }
13834 #endif
13835 
13836  allocator->FreeMemory(allocation);
13837 }
13838 
13840  VmaAllocator allocator,
13841  VmaAllocation allocation,
13842  VmaAllocationInfo* pAllocationInfo)
13843 {
13844  VMA_ASSERT(allocator && allocation && pAllocationInfo);
13845 
13846  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13847 
13848 #if VMA_RECORDING_ENABLED
13849  if(allocator->GetRecorder() != VMA_NULL)
13850  {
13851  allocator->GetRecorder()->RecordGetAllocationInfo(
13852  allocator->GetCurrentFrameIndex(),
13853  allocation);
13854  }
13855 #endif
13856 
13857  allocator->GetAllocationInfo(allocation, pAllocationInfo);
13858 }
13859 
13860 VkBool32 vmaTouchAllocation(
13861  VmaAllocator allocator,
13862  VmaAllocation allocation)
13863 {
13864  VMA_ASSERT(allocator && allocation);
13865 
13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13867 
13868 #if VMA_RECORDING_ENABLED
13869  if(allocator->GetRecorder() != VMA_NULL)
13870  {
13871  allocator->GetRecorder()->RecordTouchAllocation(
13872  allocator->GetCurrentFrameIndex(),
13873  allocation);
13874  }
13875 #endif
13876 
13877  return allocator->TouchAllocation(allocation);
13878 }
13879 
13881  VmaAllocator allocator,
13882  VmaAllocation allocation,
13883  void* pUserData)
13884 {
13885  VMA_ASSERT(allocator && allocation);
13886 
13887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13888 
13889  allocation->SetUserData(allocator, pUserData);
13890 
13891 #if VMA_RECORDING_ENABLED
13892  if(allocator->GetRecorder() != VMA_NULL)
13893  {
13894  allocator->GetRecorder()->RecordSetAllocationUserData(
13895  allocator->GetCurrentFrameIndex(),
13896  allocation,
13897  pUserData);
13898  }
13899 #endif
13900 }
13901 
13903  VmaAllocator allocator,
13904  VmaAllocation* pAllocation)
13905 {
13906  VMA_ASSERT(allocator && pAllocation);
13907 
13908  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
13909 
13910  allocator->CreateLostAllocation(pAllocation);
13911 
13912 #if VMA_RECORDING_ENABLED
13913  if(allocator->GetRecorder() != VMA_NULL)
13914  {
13915  allocator->GetRecorder()->RecordCreateLostAllocation(
13916  allocator->GetCurrentFrameIndex(),
13917  *pAllocation);
13918  }
13919 #endif
13920 }
13921 
13922 VkResult vmaMapMemory(
13923  VmaAllocator allocator,
13924  VmaAllocation allocation,
13925  void** ppData)
13926 {
13927  VMA_ASSERT(allocator && allocation && ppData);
13928 
13929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13930 
13931  VkResult res = allocator->Map(allocation, ppData);
13932 
13933 #if VMA_RECORDING_ENABLED
13934  if(allocator->GetRecorder() != VMA_NULL)
13935  {
13936  allocator->GetRecorder()->RecordMapMemory(
13937  allocator->GetCurrentFrameIndex(),
13938  allocation);
13939  }
13940 #endif
13941 
13942  return res;
13943 }
13944 
13945 void vmaUnmapMemory(
13946  VmaAllocator allocator,
13947  VmaAllocation allocation)
13948 {
13949  VMA_ASSERT(allocator && allocation);
13950 
13951  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13952 
13953 #if VMA_RECORDING_ENABLED
13954  if(allocator->GetRecorder() != VMA_NULL)
13955  {
13956  allocator->GetRecorder()->RecordUnmapMemory(
13957  allocator->GetCurrentFrameIndex(),
13958  allocation);
13959  }
13960 #endif
13961 
13962  allocator->Unmap(allocation);
13963 }
13964 
13965 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13966 {
13967  VMA_ASSERT(allocator && allocation);
13968 
13969  VMA_DEBUG_LOG("vmaFlushAllocation");
13970 
13971  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13972 
13973  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
13974 
13975 #if VMA_RECORDING_ENABLED
13976  if(allocator->GetRecorder() != VMA_NULL)
13977  {
13978  allocator->GetRecorder()->RecordFlushAllocation(
13979  allocator->GetCurrentFrameIndex(),
13980  allocation, offset, size);
13981  }
13982 #endif
13983 }
13984 
13985 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13986 {
13987  VMA_ASSERT(allocator && allocation);
13988 
13989  VMA_DEBUG_LOG("vmaInvalidateAllocation");
13990 
13991  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13992 
13993  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
13994 
13995 #if VMA_RECORDING_ENABLED
13996  if(allocator->GetRecorder() != VMA_NULL)
13997  {
13998  allocator->GetRecorder()->RecordInvalidateAllocation(
13999  allocator->GetCurrentFrameIndex(),
14000  allocation, offset, size);
14001  }
14002 #endif
14003 }
14004 
14005 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14006 {
14007  VMA_ASSERT(allocator);
14008 
14009  VMA_DEBUG_LOG("vmaCheckCorruption");
14010 
14011  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14012 
14013  return allocator->CheckCorruption(memoryTypeBits);
14014 }
14015 
14016 VkResult vmaDefragment(
14017  VmaAllocator allocator,
14018  VmaAllocation* pAllocations,
14019  size_t allocationCount,
14020  VkBool32* pAllocationsChanged,
14021  const VmaDefragmentationInfo *pDefragmentationInfo,
14022  VmaDefragmentationStats* pDefragmentationStats)
14023 {
14024  VMA_ASSERT(allocator && pAllocations);
14025 
14026  VMA_DEBUG_LOG("vmaDefragment");
14027 
14028  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14029 
14030  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14031 }
14032 
14033 VkResult vmaBindBufferMemory(
14034  VmaAllocator allocator,
14035  VmaAllocation allocation,
14036  VkBuffer buffer)
14037 {
14038  VMA_ASSERT(allocator && allocation && buffer);
14039 
14040  VMA_DEBUG_LOG("vmaBindBufferMemory");
14041 
14042  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14043 
14044  return allocator->BindBufferMemory(allocation, buffer);
14045 }
14046 
14047 VkResult vmaBindImageMemory(
14048  VmaAllocator allocator,
14049  VmaAllocation allocation,
14050  VkImage image)
14051 {
14052  VMA_ASSERT(allocator && allocation && image);
14053 
14054  VMA_DEBUG_LOG("vmaBindImageMemory");
14055 
14056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14057 
14058  return allocator->BindImageMemory(allocation, image);
14059 }
14060 
14061 VkResult vmaCreateBuffer(
14062  VmaAllocator allocator,
14063  const VkBufferCreateInfo* pBufferCreateInfo,
14064  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14065  VkBuffer* pBuffer,
14066  VmaAllocation* pAllocation,
14067  VmaAllocationInfo* pAllocationInfo)
14068 {
14069  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14070 
14071  VMA_DEBUG_LOG("vmaCreateBuffer");
14072 
14073  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14074 
14075  *pBuffer = VK_NULL_HANDLE;
14076  *pAllocation = VK_NULL_HANDLE;
14077 
14078  // 1. Create VkBuffer.
14079  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14080  allocator->m_hDevice,
14081  pBufferCreateInfo,
14082  allocator->GetAllocationCallbacks(),
14083  pBuffer);
14084  if(res >= 0)
14085  {
14086  // 2. vkGetBufferMemoryRequirements.
14087  VkMemoryRequirements vkMemReq = {};
14088  bool requiresDedicatedAllocation = false;
14089  bool prefersDedicatedAllocation = false;
14090  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14091  requiresDedicatedAllocation, prefersDedicatedAllocation);
14092 
14093  // Make sure alignment requirements for specific buffer usages reported
14094  // in Physical Device Properties are included in alignment reported by memory requirements.
14095  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14096  {
14097  VMA_ASSERT(vkMemReq.alignment %
14098  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14099  }
14100  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14101  {
14102  VMA_ASSERT(vkMemReq.alignment %
14103  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14104  }
14105  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14106  {
14107  VMA_ASSERT(vkMemReq.alignment %
14108  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14109  }
14110 
14111  // 3. Allocate memory using allocator.
14112  res = allocator->AllocateMemory(
14113  vkMemReq,
14114  requiresDedicatedAllocation,
14115  prefersDedicatedAllocation,
14116  *pBuffer, // dedicatedBuffer
14117  VK_NULL_HANDLE, // dedicatedImage
14118  *pAllocationCreateInfo,
14119  VMA_SUBALLOCATION_TYPE_BUFFER,
14120  pAllocation);
14121 
14122 #if VMA_RECORDING_ENABLED
14123  if(allocator->GetRecorder() != VMA_NULL)
14124  {
14125  allocator->GetRecorder()->RecordCreateBuffer(
14126  allocator->GetCurrentFrameIndex(),
14127  *pBufferCreateInfo,
14128  *pAllocationCreateInfo,
14129  *pAllocation);
14130  }
14131 #endif
14132 
14133  if(res >= 0)
14134  {
14135  // 3. Bind buffer with memory.
14136  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14137  if(res >= 0)
14138  {
14139  // All steps succeeded.
14140  #if VMA_STATS_STRING_ENABLED
14141  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14142  #endif
14143  if(pAllocationInfo != VMA_NULL)
14144  {
14145  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14146  }
14147 
14148  return VK_SUCCESS;
14149  }
14150  allocator->FreeMemory(*pAllocation);
14151  *pAllocation = VK_NULL_HANDLE;
14152  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14153  *pBuffer = VK_NULL_HANDLE;
14154  return res;
14155  }
14156  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14157  *pBuffer = VK_NULL_HANDLE;
14158  return res;
14159  }
14160  return res;
14161 }
14162 
14163 void vmaDestroyBuffer(
14164  VmaAllocator allocator,
14165  VkBuffer buffer,
14166  VmaAllocation allocation)
14167 {
14168  VMA_ASSERT(allocator);
14169 
14170  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14171  {
14172  return;
14173  }
14174 
14175  VMA_DEBUG_LOG("vmaDestroyBuffer");
14176 
14177  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14178 
14179 #if VMA_RECORDING_ENABLED
14180  if(allocator->GetRecorder() != VMA_NULL)
14181  {
14182  allocator->GetRecorder()->RecordDestroyBuffer(
14183  allocator->GetCurrentFrameIndex(),
14184  allocation);
14185  }
14186 #endif
14187 
14188  if(buffer != VK_NULL_HANDLE)
14189  {
14190  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14191  }
14192 
14193  if(allocation != VK_NULL_HANDLE)
14194  {
14195  allocator->FreeMemory(allocation);
14196  }
14197 }
14198 
14199 VkResult vmaCreateImage(
14200  VmaAllocator allocator,
14201  const VkImageCreateInfo* pImageCreateInfo,
14202  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14203  VkImage* pImage,
14204  VmaAllocation* pAllocation,
14205  VmaAllocationInfo* pAllocationInfo)
14206 {
14207  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14208 
14209  VMA_DEBUG_LOG("vmaCreateImage");
14210 
14211  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14212 
14213  *pImage = VK_NULL_HANDLE;
14214  *pAllocation = VK_NULL_HANDLE;
14215 
14216  // 1. Create VkImage.
14217  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14218  allocator->m_hDevice,
14219  pImageCreateInfo,
14220  allocator->GetAllocationCallbacks(),
14221  pImage);
14222  if(res >= 0)
14223  {
14224  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14225  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14226  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14227 
14228  // 2. Allocate memory using allocator.
14229  VkMemoryRequirements vkMemReq = {};
14230  bool requiresDedicatedAllocation = false;
14231  bool prefersDedicatedAllocation = false;
14232  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14233  requiresDedicatedAllocation, prefersDedicatedAllocation);
14234 
14235  res = allocator->AllocateMemory(
14236  vkMemReq,
14237  requiresDedicatedAllocation,
14238  prefersDedicatedAllocation,
14239  VK_NULL_HANDLE, // dedicatedBuffer
14240  *pImage, // dedicatedImage
14241  *pAllocationCreateInfo,
14242  suballocType,
14243  pAllocation);
14244 
14245 #if VMA_RECORDING_ENABLED
14246  if(allocator->GetRecorder() != VMA_NULL)
14247  {
14248  allocator->GetRecorder()->RecordCreateImage(
14249  allocator->GetCurrentFrameIndex(),
14250  *pImageCreateInfo,
14251  *pAllocationCreateInfo,
14252  *pAllocation);
14253  }
14254 #endif
14255 
14256  if(res >= 0)
14257  {
14258  // 3. Bind image with memory.
14259  res = allocator->BindImageMemory(*pAllocation, *pImage);
14260  if(res >= 0)
14261  {
14262  // All steps succeeded.
14263  #if VMA_STATS_STRING_ENABLED
14264  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14265  #endif
14266  if(pAllocationInfo != VMA_NULL)
14267  {
14268  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14269  }
14270 
14271  return VK_SUCCESS;
14272  }
14273  allocator->FreeMemory(*pAllocation);
14274  *pAllocation = VK_NULL_HANDLE;
14275  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14276  *pImage = VK_NULL_HANDLE;
14277  return res;
14278  }
14279  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14280  *pImage = VK_NULL_HANDLE;
14281  return res;
14282  }
14283  return res;
14284 }
14285 
14286 void vmaDestroyImage(
14287  VmaAllocator allocator,
14288  VkImage image,
14289  VmaAllocation allocation)
14290 {
14291  VMA_ASSERT(allocator);
14292 
14293  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14294  {
14295  return;
14296  }
14297 
14298  VMA_DEBUG_LOG("vmaDestroyImage");
14299 
14300  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14301 
14302 #if VMA_RECORDING_ENABLED
14303  if(allocator->GetRecorder() != VMA_NULL)
14304  {
14305  allocator->GetRecorder()->RecordDestroyImage(
14306  allocator->GetCurrentFrameIndex(),
14307  allocation);
14308  }
14309 #endif
14310 
14311  if(image != VK_NULL_HANDLE)
14312  {
14313  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14314  }
14315  if(allocation != VK_NULL_HANDLE)
14316  {
14317  allocator->FreeMemory(allocation);
14318  }
14319 }
14320 
14321 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1484
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1797
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1553
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1515
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2119
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1496
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1754
Definition: vk_mem_alloc.h:1857
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1488
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2219
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1550
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2464
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2008
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1527
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2100
Definition: vk_mem_alloc.h:1834
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1477
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1907
Definition: vk_mem_alloc.h:1781
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1562
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2036
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1615
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1547
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1785
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1687
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1493
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1686
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2468
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1579
VmaStatInfo total
Definition: vk_mem_alloc.h:1696
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2476
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1891
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2459
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1494
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1419
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1556
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2050
Definition: vk_mem_alloc.h:2044
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1622
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2229
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1489
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1513
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1928
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2070
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2106
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1475
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2053
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1732
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2454
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2472
Definition: vk_mem_alloc.h:1771
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1915
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1492
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1692
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1425
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1446
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1517
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1451
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2474
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1902
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2116
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1485
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1675
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2065
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1438
Definition: vk_mem_alloc.h:2040
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1841
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1688
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1442
Definition: vk_mem_alloc.h:1865
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2056
Definition: vk_mem_alloc.h:1780
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1491
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1897
Definition: vk_mem_alloc.h:1888
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1678
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1487
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2078
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1565
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2109
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1886
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1921
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1603
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1694
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1821
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1687
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1498
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1535
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1440
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1497
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2092
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1490
Definition: vk_mem_alloc.h:1852
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1543
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2243
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1559
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1687
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1684
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2097
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1861
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2224
Definition: vk_mem_alloc.h:1872
Definition: vk_mem_alloc.h:1884
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2470
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1483
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1682
Definition: vk_mem_alloc.h:1737
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2046
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1532
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1680
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1495
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1499
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1808
Definition: vk_mem_alloc.h:1879
Definition: vk_mem_alloc.h:1764
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2238
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1473
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1486
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2025
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2205
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1869
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1990
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1688
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1507
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1695
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2103
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1688
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2210