Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1477 /*
1478 Define this macro to 0/1 to disable/enable support for recording functionality,
1479 available through VmaAllocatorCreateInfo::pRecordSettings.
1480 */
1481 #ifndef VMA_RECORDING_ENABLED
1482  #ifdef _WIN32
1483  #define VMA_RECORDING_ENABLED 1
1484  #else
1485  #define VMA_RECORDING_ENABLED 0
1486  #endif
1487 #endif
1488 
1489 #ifndef NOMINMAX
1490  #define NOMINMAX // For windows.h
1491 #endif
1492 
1493 #include <vulkan/vulkan.h>
1494 
1495 #if VMA_RECORDING_ENABLED
1496  #include <windows.h>
1497 #endif
1498 
1499 #if !defined(VMA_DEDICATED_ALLOCATION)
1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1501  #define VMA_DEDICATED_ALLOCATION 1
1502  #else
1503  #define VMA_DEDICATED_ALLOCATION 0
1504  #endif
1505 #endif
1506 
1516 VK_DEFINE_HANDLE(VmaAllocator)
1517 
1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1520  VmaAllocator allocator,
1521  uint32_t memoryType,
1522  VkDeviceMemory memory,
1523  VkDeviceSize size);
1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1526  VmaAllocator allocator,
1527  uint32_t memoryType,
1528  VkDeviceMemory memory,
1529  VkDeviceSize size);
1530 
1544 
1574 
1577 typedef VkFlags VmaAllocatorCreateFlags;
1578 
1583 typedef struct VmaVulkanFunctions {
1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1586  PFN_vkAllocateMemory vkAllocateMemory;
1587  PFN_vkFreeMemory vkFreeMemory;
1588  PFN_vkMapMemory vkMapMemory;
1589  PFN_vkUnmapMemory vkUnmapMemory;
1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1592  PFN_vkBindBufferMemory vkBindBufferMemory;
1593  PFN_vkBindImageMemory vkBindImageMemory;
1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1596  PFN_vkCreateBuffer vkCreateBuffer;
1597  PFN_vkDestroyBuffer vkDestroyBuffer;
1598  PFN_vkCreateImage vkCreateImage;
1599  PFN_vkDestroyImage vkDestroyImage;
1600 #if VMA_DEDICATED_ALLOCATION
1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1603 #endif
1605 
1607 typedef enum VmaRecordFlagBits {
1614 
1617 typedef VkFlags VmaRecordFlags;
1618 
1620 typedef struct VmaRecordSettings
1621 {
1631  const char* pFilePath;
1633 
1636 {
1640 
1641  VkPhysicalDevice physicalDevice;
1643 
1644  VkDevice device;
1646 
1649 
1650  const VkAllocationCallbacks* pAllocationCallbacks;
1652 
1691  const VkDeviceSize* pHeapSizeLimit;
1712 
1714 VkResult vmaCreateAllocator(
1715  const VmaAllocatorCreateInfo* pCreateInfo,
1716  VmaAllocator* pAllocator);
1717 
1719 void vmaDestroyAllocator(
1720  VmaAllocator allocator);
1721 
1727  VmaAllocator allocator,
1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1729 
1735  VmaAllocator allocator,
1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1737 
1745  VmaAllocator allocator,
1746  uint32_t memoryTypeIndex,
1747  VkMemoryPropertyFlags* pFlags);
1748 
1758  VmaAllocator allocator,
1759  uint32_t frameIndex);
1760 
1763 typedef struct VmaStatInfo
1764 {
1766  uint32_t blockCount;
1772  VkDeviceSize usedBytes;
1774  VkDeviceSize unusedBytes;
1777 } VmaStatInfo;
1778 
1780 typedef struct VmaStats
1781 {
1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1785 } VmaStats;
1786 
1788 void vmaCalculateStats(
1789  VmaAllocator allocator,
1790  VmaStats* pStats);
1791 
1792 #define VMA_STATS_STRING_ENABLED 1
1793 
1794 #if VMA_STATS_STRING_ENABLED
1795 
1797 
1799 void vmaBuildStatsString(
1800  VmaAllocator allocator,
1801  char** ppStatsString,
1802  VkBool32 detailedMap);
1803 
1804 void vmaFreeStatsString(
1805  VmaAllocator allocator,
1806  char* pStatsString);
1807 
1808 #endif // #if VMA_STATS_STRING_ENABLED
1809 
1818 VK_DEFINE_HANDLE(VmaPool)
1819 
1820 typedef enum VmaMemoryUsage
1821 {
1870 } VmaMemoryUsage;
1871 
1886 
1941 
1954 
1964 
1971 
1975 
1977 {
1990  VkMemoryPropertyFlags requiredFlags;
1995  VkMemoryPropertyFlags preferredFlags;
2003  uint32_t memoryTypeBits;
2016  void* pUserData;
2018 
2035 VkResult vmaFindMemoryTypeIndex(
2036  VmaAllocator allocator,
2037  uint32_t memoryTypeBits,
2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2039  uint32_t* pMemoryTypeIndex);
2040 
2054  VmaAllocator allocator,
2055  const VkBufferCreateInfo* pBufferCreateInfo,
2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2057  uint32_t* pMemoryTypeIndex);
2058 
2072  VmaAllocator allocator,
2073  const VkImageCreateInfo* pImageCreateInfo,
2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2075  uint32_t* pMemoryTypeIndex);
2076 
2097 
2114 
2125 
2131 
2134 typedef VkFlags VmaPoolCreateFlags;
2135 
2138 typedef struct VmaPoolCreateInfo {
2153  VkDeviceSize blockSize;
2182 
2185 typedef struct VmaPoolStats {
2188  VkDeviceSize size;
2191  VkDeviceSize unusedSize;
2204  VkDeviceSize unusedRangeSizeMax;
2207  size_t blockCount;
2208 } VmaPoolStats;
2209 
2216 VkResult vmaCreatePool(
2217  VmaAllocator allocator,
2218  const VmaPoolCreateInfo* pCreateInfo,
2219  VmaPool* pPool);
2220 
2223 void vmaDestroyPool(
2224  VmaAllocator allocator,
2225  VmaPool pool);
2226 
2233 void vmaGetPoolStats(
2234  VmaAllocator allocator,
2235  VmaPool pool,
2236  VmaPoolStats* pPoolStats);
2237 
2245  VmaAllocator allocator,
2246  VmaPool pool,
2247  size_t* pLostAllocationCount);
2248 
2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2264 
2289 VK_DEFINE_HANDLE(VmaAllocation)
2290 
2291 
2293 typedef struct VmaAllocationInfo {
2298  uint32_t memoryType;
2307  VkDeviceMemory deviceMemory;
2312  VkDeviceSize offset;
2317  VkDeviceSize size;
2331  void* pUserData;
2333 
2344 VkResult vmaAllocateMemory(
2345  VmaAllocator allocator,
2346  const VkMemoryRequirements* pVkMemoryRequirements,
2347  const VmaAllocationCreateInfo* pCreateInfo,
2348  VmaAllocation* pAllocation,
2349  VmaAllocationInfo* pAllocationInfo);
2350 
2358  VmaAllocator allocator,
2359  VkBuffer buffer,
2360  const VmaAllocationCreateInfo* pCreateInfo,
2361  VmaAllocation* pAllocation,
2362  VmaAllocationInfo* pAllocationInfo);
2363 
2365 VkResult vmaAllocateMemoryForImage(
2366  VmaAllocator allocator,
2367  VkImage image,
2368  const VmaAllocationCreateInfo* pCreateInfo,
2369  VmaAllocation* pAllocation,
2370  VmaAllocationInfo* pAllocationInfo);
2371 
2373 void vmaFreeMemory(
2374  VmaAllocator allocator,
2375  VmaAllocation allocation);
2376 
2394  VmaAllocator allocator,
2395  VmaAllocation allocation,
2396  VmaAllocationInfo* pAllocationInfo);
2397 
2412 VkBool32 vmaTouchAllocation(
2413  VmaAllocator allocator,
2414  VmaAllocation allocation);
2415 
2430  VmaAllocator allocator,
2431  VmaAllocation allocation,
2432  void* pUserData);
2433 
2445  VmaAllocator allocator,
2446  VmaAllocation* pAllocation);
2447 
2482 VkResult vmaMapMemory(
2483  VmaAllocator allocator,
2484  VmaAllocation allocation,
2485  void** ppData);
2486 
2491 void vmaUnmapMemory(
2492  VmaAllocator allocator,
2493  VmaAllocation allocation);
2494 
2507 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2508 
2521 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2522 
2539 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2540 
2542 typedef struct VmaDefragmentationInfo {
2547  VkDeviceSize maxBytesToMove;
2554 
2556 typedef struct VmaDefragmentationStats {
2558  VkDeviceSize bytesMoved;
2560  VkDeviceSize bytesFreed;
2566 
2605 VkResult vmaDefragment(
2606  VmaAllocator allocator,
2607  VmaAllocation* pAllocations,
2608  size_t allocationCount,
2609  VkBool32* pAllocationsChanged,
2610  const VmaDefragmentationInfo *pDefragmentationInfo,
2611  VmaDefragmentationStats* pDefragmentationStats);
2612 
2625 VkResult vmaBindBufferMemory(
2626  VmaAllocator allocator,
2627  VmaAllocation allocation,
2628  VkBuffer buffer);
2629 
2642 VkResult vmaBindImageMemory(
2643  VmaAllocator allocator,
2644  VmaAllocation allocation,
2645  VkImage image);
2646 
2673 VkResult vmaCreateBuffer(
2674  VmaAllocator allocator,
2675  const VkBufferCreateInfo* pBufferCreateInfo,
2676  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2677  VkBuffer* pBuffer,
2678  VmaAllocation* pAllocation,
2679  VmaAllocationInfo* pAllocationInfo);
2680 
2692 void vmaDestroyBuffer(
2693  VmaAllocator allocator,
2694  VkBuffer buffer,
2695  VmaAllocation allocation);
2696 
2698 VkResult vmaCreateImage(
2699  VmaAllocator allocator,
2700  const VkImageCreateInfo* pImageCreateInfo,
2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2702  VkImage* pImage,
2703  VmaAllocation* pAllocation,
2704  VmaAllocationInfo* pAllocationInfo);
2705 
2717 void vmaDestroyImage(
2718  VmaAllocator allocator,
2719  VkImage image,
2720  VmaAllocation allocation);
2721 
2722 #ifdef __cplusplus
2723 }
2724 #endif
2725 
2726 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2727 
2728 // For Visual Studio IntelliSense.
2729 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2730 #define VMA_IMPLEMENTATION
2731 #endif
2732 
2733 #ifdef VMA_IMPLEMENTATION
2734 #undef VMA_IMPLEMENTATION
2735 
2736 #include <cstdint>
2737 #include <cstdlib>
2738 #include <cstring>
2739 
2740 /*******************************************************************************
2741 CONFIGURATION SECTION
2742 
2743 Define some of these macros before each #include of this header or change them
2744 here if you need other then default behavior depending on your environment.
2745 */
2746 
2747 /*
2748 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2749 internally, like:
2750 
2751  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2752 
2753 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2754 VmaAllocatorCreateInfo::pVulkanFunctions.
2755 */
2756 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2757 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2758 #endif
2759 
2760 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2761 //#define VMA_USE_STL_CONTAINERS 1
2762 
2763 /* Set this macro to 1 to make the library including and using STL containers:
2764 std::pair, std::vector, std::list, std::unordered_map.
2765 
2766 Set it to 0 or undefined to make the library using its own implementation of
2767 the containers.
2768 */
2769 #if VMA_USE_STL_CONTAINERS
2770  #define VMA_USE_STL_VECTOR 1
2771  #define VMA_USE_STL_UNORDERED_MAP 1
2772  #define VMA_USE_STL_LIST 1
2773 #endif
2774 
2775 #if VMA_USE_STL_VECTOR
2776  #include <vector>
2777 #endif
2778 
2779 #if VMA_USE_STL_UNORDERED_MAP
2780  #include <unordered_map>
2781 #endif
2782 
2783 #if VMA_USE_STL_LIST
2784  #include <list>
2785 #endif
2786 
2787 /*
2788 Following headers are used in this CONFIGURATION section only, so feel free to
2789 remove them if not needed.
2790 */
2791 #include <cassert> // for assert
2792 #include <algorithm> // for min, max
2793 #include <mutex> // for std::mutex
2794 #include <atomic> // for std::atomic
2795 
2796 #ifndef VMA_NULL
2797  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2798  #define VMA_NULL nullptr
2799 #endif
2800 
2801 #if defined(__APPLE__) || defined(__ANDROID__)
2802 #include <cstdlib>
2803 void *aligned_alloc(size_t alignment, size_t size)
2804 {
2805  // alignment must be >= sizeof(void*)
2806  if(alignment < sizeof(void*))
2807  {
2808  alignment = sizeof(void*);
2809  }
2810 
2811  void *pointer;
2812  if(posix_memalign(&pointer, alignment, size) == 0)
2813  return pointer;
2814  return VMA_NULL;
2815 }
2816 #endif
2817 
2818 // If your compiler is not compatible with C++11 and definition of
2819 // aligned_alloc() function is missing, uncommeting following line may help:
2820 
2821 //#include <malloc.h>
2822 
2823 // Normal assert to check for programmer's errors, especially in Debug configuration.
2824 #ifndef VMA_ASSERT
2825  #ifdef _DEBUG
2826  #define VMA_ASSERT(expr) assert(expr)
2827  #else
2828  #define VMA_ASSERT(expr)
2829  #endif
2830 #endif
2831 
2832 // Assert that will be called very often, like inside data structures e.g. operator[].
2833 // Making it non-empty can make program slow.
2834 #ifndef VMA_HEAVY_ASSERT
2835  #ifdef _DEBUG
2836  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2837  #else
2838  #define VMA_HEAVY_ASSERT(expr)
2839  #endif
2840 #endif
2841 
2842 #ifndef VMA_ALIGN_OF
2843  #define VMA_ALIGN_OF(type) (__alignof(type))
2844 #endif
2845 
2846 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2847  #if defined(_WIN32)
2848  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2849  #else
2850  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2851  #endif
2852 #endif
2853 
2854 #ifndef VMA_SYSTEM_FREE
2855  #if defined(_WIN32)
2856  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2857  #else
2858  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2859  #endif
2860 #endif
2861 
2862 #ifndef VMA_MIN
2863  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2864 #endif
2865 
2866 #ifndef VMA_MAX
2867  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2868 #endif
2869 
2870 #ifndef VMA_SWAP
2871  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2872 #endif
2873 
2874 #ifndef VMA_SORT
2875  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2876 #endif
2877 
2878 #ifndef VMA_DEBUG_LOG
2879  #define VMA_DEBUG_LOG(format, ...)
2880  /*
2881  #define VMA_DEBUG_LOG(format, ...) do { \
2882  printf(format, __VA_ARGS__); \
2883  printf("\n"); \
2884  } while(false)
2885  */
2886 #endif
2887 
2888 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2889 #if VMA_STATS_STRING_ENABLED
2890  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2891  {
2892  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2893  }
2894  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2895  {
2896  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2897  }
2898  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2899  {
2900  snprintf(outStr, strLen, "%p", ptr);
2901  }
2902 #endif
2903 
2904 #ifndef VMA_MUTEX
2905  class VmaMutex
2906  {
2907  public:
2908  VmaMutex() { }
2909  ~VmaMutex() { }
2910  void Lock() { m_Mutex.lock(); }
2911  void Unlock() { m_Mutex.unlock(); }
2912  private:
2913  std::mutex m_Mutex;
2914  };
2915  #define VMA_MUTEX VmaMutex
2916 #endif
2917 
2918 /*
2919 If providing your own implementation, you need to implement a subset of std::atomic:
2920 
2921 - Constructor(uint32_t desired)
2922 - uint32_t load() const
2923 - void store(uint32_t desired)
2924 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2925 */
2926 #ifndef VMA_ATOMIC_UINT32
2927  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2928 #endif
2929 
2930 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2931 
2935  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2936 #endif
2937 
2938 #ifndef VMA_DEBUG_ALIGNMENT
2939 
2943  #define VMA_DEBUG_ALIGNMENT (1)
2944 #endif
2945 
2946 #ifndef VMA_DEBUG_MARGIN
2947 
2951  #define VMA_DEBUG_MARGIN (0)
2952 #endif
2953 
2954 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2955 
2959  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2960 #endif
2961 
2962 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2963 
2968  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2969 #endif
2970 
2971 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2972 
2976  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2977 #endif
2978 
2979 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2980 
2984  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2985 #endif
2986 
2987 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2988  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2990 #endif
2991 
2992 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2993  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2995 #endif
2996 
2997 #ifndef VMA_CLASS_NO_COPY
2998  #define VMA_CLASS_NO_COPY(className) \
2999  private: \
3000  className(const className&) = delete; \
3001  className& operator=(const className&) = delete;
3002 #endif
3003 
3004 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3005 
3006 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3007 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3008 
3009 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3010 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3011 
3012 /*******************************************************************************
3013 END OF CONFIGURATION
3014 */
3015 
3016 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3017  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3018 
3019 // Returns number of bits set to 1 in (v).
3020 static inline uint32_t VmaCountBitsSet(uint32_t v)
3021 {
3022  uint32_t c = v - ((v >> 1) & 0x55555555);
3023  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3024  c = ((c >> 4) + c) & 0x0F0F0F0F;
3025  c = ((c >> 8) + c) & 0x00FF00FF;
3026  c = ((c >> 16) + c) & 0x0000FFFF;
3027  return c;
3028 }
3029 
3030 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3031 // Use types like uint32_t, uint64_t as T.
3032 template <typename T>
3033 static inline T VmaAlignUp(T val, T align)
3034 {
3035  return (val + align - 1) / align * align;
3036 }
3037 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3038 // Use types like uint32_t, uint64_t as T.
3039 template <typename T>
3040 static inline T VmaAlignDown(T val, T align)
3041 {
3042  return val / align * align;
3043 }
3044 
3045 // Division with mathematical rounding to nearest number.
3046 template <typename T>
3047 static inline T VmaRoundDiv(T x, T y)
3048 {
3049  return (x + (y / (T)2)) / y;
3050 }
3051 
3052 /*
3053 Returns true if given number is a power of two.
3054 T must be unsigned integer number or signed integer but always nonnegative.
3055 For 0 returns true.
3056 */
3057 template <typename T>
3058 inline bool VmaIsPow2(T x)
3059 {
3060  return (x & (x-1)) == 0;
3061 }
3062 
3063 // Returns smallest power of 2 greater or equal to v.
3064 static inline uint32_t VmaNextPow2(uint32_t v)
3065 {
3066  v--;
3067  v |= v >> 1;
3068  v |= v >> 2;
3069  v |= v >> 4;
3070  v |= v >> 8;
3071  v |= v >> 16;
3072  v++;
3073  return v;
3074 }
3075 static inline uint64_t VmaNextPow2(uint64_t v)
3076 {
3077  v--;
3078  v |= v >> 1;
3079  v |= v >> 2;
3080  v |= v >> 4;
3081  v |= v >> 8;
3082  v |= v >> 16;
3083  v |= v >> 32;
3084  v++;
3085  return v;
3086 }
3087 
3088 // Returns largest power of 2 less or equal to v.
3089 static inline uint32_t VmaPrevPow2(uint32_t v)
3090 {
3091  v |= v >> 1;
3092  v |= v >> 2;
3093  v |= v >> 4;
3094  v |= v >> 8;
3095  v |= v >> 16;
3096  v = v ^ (v >> 1);
3097  return v;
3098 }
3099 static inline uint64_t VmaPrevPow2(uint64_t v)
3100 {
3101  v |= v >> 1;
3102  v |= v >> 2;
3103  v |= v >> 4;
3104  v |= v >> 8;
3105  v |= v >> 16;
3106  v |= v >> 32;
3107  v = v ^ (v >> 1);
3108  return v;
3109 }
3110 
3111 static inline bool VmaStrIsEmpty(const char* pStr)
3112 {
3113  return pStr == VMA_NULL || *pStr == '\0';
3114 }
3115 
3116 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3117 {
3118  switch(algorithm)
3119  {
3121  return "Linear";
3123  return "Buddy";
3124  case 0:
3125  return "Default";
3126  default:
3127  VMA_ASSERT(0);
3128  return "";
3129  }
3130 }
3131 
3132 #ifndef VMA_SORT
3133 
3134 template<typename Iterator, typename Compare>
3135 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3136 {
3137  Iterator centerValue = end; --centerValue;
3138  Iterator insertIndex = beg;
3139  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3140  {
3141  if(cmp(*memTypeIndex, *centerValue))
3142  {
3143  if(insertIndex != memTypeIndex)
3144  {
3145  VMA_SWAP(*memTypeIndex, *insertIndex);
3146  }
3147  ++insertIndex;
3148  }
3149  }
3150  if(insertIndex != centerValue)
3151  {
3152  VMA_SWAP(*insertIndex, *centerValue);
3153  }
3154  return insertIndex;
3155 }
3156 
3157 template<typename Iterator, typename Compare>
3158 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3159 {
3160  if(beg < end)
3161  {
3162  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3163  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3164  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3165  }
3166 }
3167 
3168 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3169 
3170 #endif // #ifndef VMA_SORT
3171 
3172 /*
3173 Returns true if two memory blocks occupy overlapping pages.
3174 ResourceA must be in less memory offset than ResourceB.
3175 
3176 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3177 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3178 */
3179 static inline bool VmaBlocksOnSamePage(
3180  VkDeviceSize resourceAOffset,
3181  VkDeviceSize resourceASize,
3182  VkDeviceSize resourceBOffset,
3183  VkDeviceSize pageSize)
3184 {
3185  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3186  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3187  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3188  VkDeviceSize resourceBStart = resourceBOffset;
3189  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3190  return resourceAEndPage == resourceBStartPage;
3191 }
3192 
3193 enum VmaSuballocationType
3194 {
3195  VMA_SUBALLOCATION_TYPE_FREE = 0,
3196  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3197  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3198  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3199  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3200  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3201  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3202 };
3203 
3204 /*
3205 Returns true if given suballocation types could conflict and must respect
3206 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3207 or linear image and another one is optimal image. If type is unknown, behave
3208 conservatively.
3209 */
3210 static inline bool VmaIsBufferImageGranularityConflict(
3211  VmaSuballocationType suballocType1,
3212  VmaSuballocationType suballocType2)
3213 {
3214  if(suballocType1 > suballocType2)
3215  {
3216  VMA_SWAP(suballocType1, suballocType2);
3217  }
3218 
3219  switch(suballocType1)
3220  {
3221  case VMA_SUBALLOCATION_TYPE_FREE:
3222  return false;
3223  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3224  return true;
3225  case VMA_SUBALLOCATION_TYPE_BUFFER:
3226  return
3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3228  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3229  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3230  return
3231  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3232  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3233  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3234  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3235  return
3236  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3237  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3238  return false;
3239  default:
3240  VMA_ASSERT(0);
3241  return true;
3242  }
3243 }
3244 
3245 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3246 {
3247  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3248  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3249  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3250  {
3251  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3252  }
3253 }
3254 
3255 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3256 {
3257  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3258  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3259  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3260  {
3261  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3262  {
3263  return false;
3264  }
3265  }
3266  return true;
3267 }
3268 
3269 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3270 struct VmaMutexLock
3271 {
3272  VMA_CLASS_NO_COPY(VmaMutexLock)
3273 public:
3274  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3275  m_pMutex(useMutex ? &mutex : VMA_NULL)
3276  {
3277  if(m_pMutex)
3278  {
3279  m_pMutex->Lock();
3280  }
3281  }
3282 
3283  ~VmaMutexLock()
3284  {
3285  if(m_pMutex)
3286  {
3287  m_pMutex->Unlock();
3288  }
3289  }
3290 
3291 private:
3292  VMA_MUTEX* m_pMutex;
3293 };
3294 
3295 #if VMA_DEBUG_GLOBAL_MUTEX
3296  static VMA_MUTEX gDebugGlobalMutex;
3297  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3298 #else
3299  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3300 #endif
3301 
3302 // Minimum size of a free suballocation to register it in the free suballocation collection.
3303 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3304 
3305 /*
3306 Performs binary search and returns iterator to first element that is greater or
3307 equal to (key), according to comparison (cmp).
3308 
3309 Cmp should return true if first argument is less than second argument.
3310 
3311 Returned value is the found element, if present in the collection or place where
3312 new element with value (key) should be inserted.
3313 */
3314 template <typename CmpLess, typename IterT, typename KeyT>
3315 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3316 {
3317  size_t down = 0, up = (end - beg);
3318  while(down < up)
3319  {
3320  const size_t mid = (down + up) / 2;
3321  if(cmp(*(beg+mid), key))
3322  {
3323  down = mid + 1;
3324  }
3325  else
3326  {
3327  up = mid;
3328  }
3329  }
3330  return beg + down;
3331 }
3332 
3334 // Memory allocation
3335 
3336 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3337 {
3338  if((pAllocationCallbacks != VMA_NULL) &&
3339  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3340  {
3341  return (*pAllocationCallbacks->pfnAllocation)(
3342  pAllocationCallbacks->pUserData,
3343  size,
3344  alignment,
3345  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3346  }
3347  else
3348  {
3349  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3350  }
3351 }
3352 
3353 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3354 {
3355  if((pAllocationCallbacks != VMA_NULL) &&
3356  (pAllocationCallbacks->pfnFree != VMA_NULL))
3357  {
3358  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3359  }
3360  else
3361  {
3362  VMA_SYSTEM_FREE(ptr);
3363  }
3364 }
3365 
3366 template<typename T>
3367 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3368 {
3369  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3370 }
3371 
3372 template<typename T>
3373 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3374 {
3375  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3376 }
3377 
3378 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3379 
3380 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3381 
3382 template<typename T>
3383 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3384 {
3385  ptr->~T();
3386  VmaFree(pAllocationCallbacks, ptr);
3387 }
3388 
3389 template<typename T>
3390 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3391 {
3392  if(ptr != VMA_NULL)
3393  {
3394  for(size_t i = count; i--; )
3395  {
3396  ptr[i].~T();
3397  }
3398  VmaFree(pAllocationCallbacks, ptr);
3399  }
3400 }
3401 
3402 // STL-compatible allocator.
3403 template<typename T>
3404 class VmaStlAllocator
3405 {
3406 public:
3407  const VkAllocationCallbacks* const m_pCallbacks;
3408  typedef T value_type;
3409 
3410  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3411  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3412 
3413  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3414  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3415 
3416  template<typename U>
3417  bool operator==(const VmaStlAllocator<U>& rhs) const
3418  {
3419  return m_pCallbacks == rhs.m_pCallbacks;
3420  }
3421  template<typename U>
3422  bool operator!=(const VmaStlAllocator<U>& rhs) const
3423  {
3424  return m_pCallbacks != rhs.m_pCallbacks;
3425  }
3426 
3427  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3428 };
3429 
3430 #if VMA_USE_STL_VECTOR
3431 
3432 #define VmaVector std::vector
3433 
3434 template<typename T, typename allocatorT>
3435 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3436 {
3437  vec.insert(vec.begin() + index, item);
3438 }
3439 
3440 template<typename T, typename allocatorT>
3441 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3442 {
3443  vec.erase(vec.begin() + index);
3444 }
3445 
3446 #else // #if VMA_USE_STL_VECTOR
3447 
3448 /* Class with interface compatible with subset of std::vector.
3449 T must be POD because constructors and destructors are not called and memcpy is
3450 used for these objects. */
3451 template<typename T, typename AllocatorT>
3452 class VmaVector
3453 {
3454 public:
3455  typedef T value_type;
3456 
3457  VmaVector(const AllocatorT& allocator) :
3458  m_Allocator(allocator),
3459  m_pArray(VMA_NULL),
3460  m_Count(0),
3461  m_Capacity(0)
3462  {
3463  }
3464 
3465  VmaVector(size_t count, const AllocatorT& allocator) :
3466  m_Allocator(allocator),
3467  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3468  m_Count(count),
3469  m_Capacity(count)
3470  {
3471  }
3472 
3473  VmaVector(const VmaVector<T, AllocatorT>& src) :
3474  m_Allocator(src.m_Allocator),
3475  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3476  m_Count(src.m_Count),
3477  m_Capacity(src.m_Count)
3478  {
3479  if(m_Count != 0)
3480  {
3481  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3482  }
3483  }
3484 
3485  ~VmaVector()
3486  {
3487  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3488  }
3489 
3490  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3491  {
3492  if(&rhs != this)
3493  {
3494  resize(rhs.m_Count);
3495  if(m_Count != 0)
3496  {
3497  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3498  }
3499  }
3500  return *this;
3501  }
3502 
3503  bool empty() const { return m_Count == 0; }
3504  size_t size() const { return m_Count; }
3505  T* data() { return m_pArray; }
3506  const T* data() const { return m_pArray; }
3507 
3508  T& operator[](size_t index)
3509  {
3510  VMA_HEAVY_ASSERT(index < m_Count);
3511  return m_pArray[index];
3512  }
3513  const T& operator[](size_t index) const
3514  {
3515  VMA_HEAVY_ASSERT(index < m_Count);
3516  return m_pArray[index];
3517  }
3518 
3519  T& front()
3520  {
3521  VMA_HEAVY_ASSERT(m_Count > 0);
3522  return m_pArray[0];
3523  }
3524  const T& front() const
3525  {
3526  VMA_HEAVY_ASSERT(m_Count > 0);
3527  return m_pArray[0];
3528  }
3529  T& back()
3530  {
3531  VMA_HEAVY_ASSERT(m_Count > 0);
3532  return m_pArray[m_Count - 1];
3533  }
3534  const T& back() const
3535  {
3536  VMA_HEAVY_ASSERT(m_Count > 0);
3537  return m_pArray[m_Count - 1];
3538  }
3539 
3540  void reserve(size_t newCapacity, bool freeMemory = false)
3541  {
3542  newCapacity = VMA_MAX(newCapacity, m_Count);
3543 
3544  if((newCapacity < m_Capacity) && !freeMemory)
3545  {
3546  newCapacity = m_Capacity;
3547  }
3548 
3549  if(newCapacity != m_Capacity)
3550  {
3551  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3552  if(m_Count != 0)
3553  {
3554  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3555  }
3556  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3557  m_Capacity = newCapacity;
3558  m_pArray = newArray;
3559  }
3560  }
3561 
3562  void resize(size_t newCount, bool freeMemory = false)
3563  {
3564  size_t newCapacity = m_Capacity;
3565  if(newCount > m_Capacity)
3566  {
3567  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3568  }
3569  else if(freeMemory)
3570  {
3571  newCapacity = newCount;
3572  }
3573 
3574  if(newCapacity != m_Capacity)
3575  {
3576  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3577  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3578  if(elementsToCopy != 0)
3579  {
3580  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3581  }
3582  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3583  m_Capacity = newCapacity;
3584  m_pArray = newArray;
3585  }
3586 
3587  m_Count = newCount;
3588  }
3589 
3590  void clear(bool freeMemory = false)
3591  {
3592  resize(0, freeMemory);
3593  }
3594 
3595  void insert(size_t index, const T& src)
3596  {
3597  VMA_HEAVY_ASSERT(index <= m_Count);
3598  const size_t oldCount = size();
3599  resize(oldCount + 1);
3600  if(index < oldCount)
3601  {
3602  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3603  }
3604  m_pArray[index] = src;
3605  }
3606 
3607  void remove(size_t index)
3608  {
3609  VMA_HEAVY_ASSERT(index < m_Count);
3610  const size_t oldCount = size();
3611  if(index < oldCount - 1)
3612  {
3613  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3614  }
3615  resize(oldCount - 1);
3616  }
3617 
3618  void push_back(const T& src)
3619  {
3620  const size_t newIndex = size();
3621  resize(newIndex + 1);
3622  m_pArray[newIndex] = src;
3623  }
3624 
3625  void pop_back()
3626  {
3627  VMA_HEAVY_ASSERT(m_Count > 0);
3628  resize(size() - 1);
3629  }
3630 
3631  void push_front(const T& src)
3632  {
3633  insert(0, src);
3634  }
3635 
3636  void pop_front()
3637  {
3638  VMA_HEAVY_ASSERT(m_Count > 0);
3639  remove(0);
3640  }
3641 
3642  typedef T* iterator;
3643 
3644  iterator begin() { return m_pArray; }
3645  iterator end() { return m_pArray + m_Count; }
3646 
3647 private:
3648  AllocatorT m_Allocator;
3649  T* m_pArray;
3650  size_t m_Count;
3651  size_t m_Capacity;
3652 };
3653 
3654 template<typename T, typename allocatorT>
3655 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3656 {
3657  vec.insert(index, item);
3658 }
3659 
3660 template<typename T, typename allocatorT>
3661 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3662 {
3663  vec.remove(index);
3664 }
3665 
3666 #endif // #if VMA_USE_STL_VECTOR
3667 
3668 template<typename CmpLess, typename VectorT>
3669 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3670 {
3671  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3672  vector.data(),
3673  vector.data() + vector.size(),
3674  value,
3675  CmpLess()) - vector.data();
3676  VmaVectorInsert(vector, indexToInsert, value);
3677  return indexToInsert;
3678 }
3679 
3680 template<typename CmpLess, typename VectorT>
3681 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3682 {
3683  CmpLess comparator;
3684  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3685  vector.begin(),
3686  vector.end(),
3687  value,
3688  comparator);
3689  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3690  {
3691  size_t indexToRemove = it - vector.begin();
3692  VmaVectorRemove(vector, indexToRemove);
3693  return true;
3694  }
3695  return false;
3696 }
3697 
3698 template<typename CmpLess, typename IterT, typename KeyT>
3699 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3700 {
3701  CmpLess comparator;
3702  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3703  beg, end, value, comparator);
3704  if(it == end ||
3705  (!comparator(*it, value) && !comparator(value, *it)))
3706  {
3707  return it;
3708  }
3709  return end;
3710 }
3711 
3713 // class VmaPoolAllocator
3714 
3715 /*
3716 Allocator for objects of type T using a list of arrays (pools) to speed up
3717 allocation. Number of elements that can be allocated is not bounded because
3718 allocator can create multiple blocks.
3719 */
3720 template<typename T>
3721 class VmaPoolAllocator
3722 {
3723  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3724 public:
3725  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3726  ~VmaPoolAllocator();
3727  void Clear();
3728  T* Alloc();
3729  void Free(T* ptr);
3730 
3731 private:
3732  union Item
3733  {
3734  uint32_t NextFreeIndex;
3735  T Value;
3736  };
3737 
3738  struct ItemBlock
3739  {
3740  Item* pItems;
3741  uint32_t FirstFreeIndex;
3742  };
3743 
3744  const VkAllocationCallbacks* m_pAllocationCallbacks;
3745  size_t m_ItemsPerBlock;
3746  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3747 
3748  ItemBlock& CreateNewBlock();
3749 };
3750 
3751 template<typename T>
3752 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3753  m_pAllocationCallbacks(pAllocationCallbacks),
3754  m_ItemsPerBlock(itemsPerBlock),
3755  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3756 {
3757  VMA_ASSERT(itemsPerBlock > 0);
3758 }
3759 
3760 template<typename T>
3761 VmaPoolAllocator<T>::~VmaPoolAllocator()
3762 {
3763  Clear();
3764 }
3765 
3766 template<typename T>
3767 void VmaPoolAllocator<T>::Clear()
3768 {
3769  for(size_t i = m_ItemBlocks.size(); i--; )
3770  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3771  m_ItemBlocks.clear();
3772 }
3773 
3774 template<typename T>
3775 T* VmaPoolAllocator<T>::Alloc()
3776 {
3777  for(size_t i = m_ItemBlocks.size(); i--; )
3778  {
3779  ItemBlock& block = m_ItemBlocks[i];
3780  // This block has some free items: Use first one.
3781  if(block.FirstFreeIndex != UINT32_MAX)
3782  {
3783  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3784  block.FirstFreeIndex = pItem->NextFreeIndex;
3785  return &pItem->Value;
3786  }
3787  }
3788 
3789  // No block has free item: Create new one and use it.
3790  ItemBlock& newBlock = CreateNewBlock();
3791  Item* const pItem = &newBlock.pItems[0];
3792  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3793  return &pItem->Value;
3794 }
3795 
3796 template<typename T>
3797 void VmaPoolAllocator<T>::Free(T* ptr)
3798 {
3799  // Search all memory blocks to find ptr.
3800  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3801  {
3802  ItemBlock& block = m_ItemBlocks[i];
3803 
3804  // Casting to union.
3805  Item* pItemPtr;
3806  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3807 
3808  // Check if pItemPtr is in address range of this block.
3809  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3810  {
3811  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3812  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3813  block.FirstFreeIndex = index;
3814  return;
3815  }
3816  }
3817  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3818 }
3819 
3820 template<typename T>
3821 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3822 {
3823  ItemBlock newBlock = {
3824  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3825 
3826  m_ItemBlocks.push_back(newBlock);
3827 
3828  // Setup singly-linked list of all free items in this block.
3829  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3830  newBlock.pItems[i].NextFreeIndex = i + 1;
3831  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3832  return m_ItemBlocks.back();
3833 }
3834 
3836 // class VmaRawList, VmaList
3837 
3838 #if VMA_USE_STL_LIST
3839 
3840 #define VmaList std::list
3841 
3842 #else // #if VMA_USE_STL_LIST
3843 
3844 template<typename T>
3845 struct VmaListItem
3846 {
3847  VmaListItem* pPrev;
3848  VmaListItem* pNext;
3849  T Value;
3850 };
3851 
3852 // Doubly linked list.
3853 template<typename T>
3854 class VmaRawList
3855 {
3856  VMA_CLASS_NO_COPY(VmaRawList)
3857 public:
3858  typedef VmaListItem<T> ItemType;
3859 
3860  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3861  ~VmaRawList();
3862  void Clear();
3863 
3864  size_t GetCount() const { return m_Count; }
3865  bool IsEmpty() const { return m_Count == 0; }
3866 
3867  ItemType* Front() { return m_pFront; }
3868  const ItemType* Front() const { return m_pFront; }
3869  ItemType* Back() { return m_pBack; }
3870  const ItemType* Back() const { return m_pBack; }
3871 
3872  ItemType* PushBack();
3873  ItemType* PushFront();
3874  ItemType* PushBack(const T& value);
3875  ItemType* PushFront(const T& value);
3876  void PopBack();
3877  void PopFront();
3878 
3879  // Item can be null - it means PushBack.
3880  ItemType* InsertBefore(ItemType* pItem);
3881  // Item can be null - it means PushFront.
3882  ItemType* InsertAfter(ItemType* pItem);
3883 
3884  ItemType* InsertBefore(ItemType* pItem, const T& value);
3885  ItemType* InsertAfter(ItemType* pItem, const T& value);
3886 
3887  void Remove(ItemType* pItem);
3888 
3889 private:
3890  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3891  VmaPoolAllocator<ItemType> m_ItemAllocator;
3892  ItemType* m_pFront;
3893  ItemType* m_pBack;
3894  size_t m_Count;
3895 };
3896 
3897 template<typename T>
3898 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3899  m_pAllocationCallbacks(pAllocationCallbacks),
3900  m_ItemAllocator(pAllocationCallbacks, 128),
3901  m_pFront(VMA_NULL),
3902  m_pBack(VMA_NULL),
3903  m_Count(0)
3904 {
3905 }
3906 
3907 template<typename T>
3908 VmaRawList<T>::~VmaRawList()
3909 {
3910  // Intentionally not calling Clear, because that would be unnecessary
3911  // computations to return all items to m_ItemAllocator as free.
3912 }
3913 
3914 template<typename T>
3915 void VmaRawList<T>::Clear()
3916 {
3917  if(IsEmpty() == false)
3918  {
3919  ItemType* pItem = m_pBack;
3920  while(pItem != VMA_NULL)
3921  {
3922  ItemType* const pPrevItem = pItem->pPrev;
3923  m_ItemAllocator.Free(pItem);
3924  pItem = pPrevItem;
3925  }
3926  m_pFront = VMA_NULL;
3927  m_pBack = VMA_NULL;
3928  m_Count = 0;
3929  }
3930 }
3931 
3932 template<typename T>
3933 VmaListItem<T>* VmaRawList<T>::PushBack()
3934 {
3935  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3936  pNewItem->pNext = VMA_NULL;
3937  if(IsEmpty())
3938  {
3939  pNewItem->pPrev = VMA_NULL;
3940  m_pFront = pNewItem;
3941  m_pBack = pNewItem;
3942  m_Count = 1;
3943  }
3944  else
3945  {
3946  pNewItem->pPrev = m_pBack;
3947  m_pBack->pNext = pNewItem;
3948  m_pBack = pNewItem;
3949  ++m_Count;
3950  }
3951  return pNewItem;
3952 }
3953 
3954 template<typename T>
3955 VmaListItem<T>* VmaRawList<T>::PushFront()
3956 {
3957  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3958  pNewItem->pPrev = VMA_NULL;
3959  if(IsEmpty())
3960  {
3961  pNewItem->pNext = VMA_NULL;
3962  m_pFront = pNewItem;
3963  m_pBack = pNewItem;
3964  m_Count = 1;
3965  }
3966  else
3967  {
3968  pNewItem->pNext = m_pFront;
3969  m_pFront->pPrev = pNewItem;
3970  m_pFront = pNewItem;
3971  ++m_Count;
3972  }
3973  return pNewItem;
3974 }
3975 
3976 template<typename T>
3977 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3978 {
3979  ItemType* const pNewItem = PushBack();
3980  pNewItem->Value = value;
3981  return pNewItem;
3982 }
3983 
3984 template<typename T>
3985 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3986 {
3987  ItemType* const pNewItem = PushFront();
3988  pNewItem->Value = value;
3989  return pNewItem;
3990 }
3991 
3992 template<typename T>
3993 void VmaRawList<T>::PopBack()
3994 {
3995  VMA_HEAVY_ASSERT(m_Count > 0);
3996  ItemType* const pBackItem = m_pBack;
3997  ItemType* const pPrevItem = pBackItem->pPrev;
3998  if(pPrevItem != VMA_NULL)
3999  {
4000  pPrevItem->pNext = VMA_NULL;
4001  }
4002  m_pBack = pPrevItem;
4003  m_ItemAllocator.Free(pBackItem);
4004  --m_Count;
4005 }
4006 
4007 template<typename T>
4008 void VmaRawList<T>::PopFront()
4009 {
4010  VMA_HEAVY_ASSERT(m_Count > 0);
4011  ItemType* const pFrontItem = m_pFront;
4012  ItemType* const pNextItem = pFrontItem->pNext;
4013  if(pNextItem != VMA_NULL)
4014  {
4015  pNextItem->pPrev = VMA_NULL;
4016  }
4017  m_pFront = pNextItem;
4018  m_ItemAllocator.Free(pFrontItem);
4019  --m_Count;
4020 }
4021 
4022 template<typename T>
4023 void VmaRawList<T>::Remove(ItemType* pItem)
4024 {
4025  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4026  VMA_HEAVY_ASSERT(m_Count > 0);
4027 
4028  if(pItem->pPrev != VMA_NULL)
4029  {
4030  pItem->pPrev->pNext = pItem->pNext;
4031  }
4032  else
4033  {
4034  VMA_HEAVY_ASSERT(m_pFront == pItem);
4035  m_pFront = pItem->pNext;
4036  }
4037 
4038  if(pItem->pNext != VMA_NULL)
4039  {
4040  pItem->pNext->pPrev = pItem->pPrev;
4041  }
4042  else
4043  {
4044  VMA_HEAVY_ASSERT(m_pBack == pItem);
4045  m_pBack = pItem->pPrev;
4046  }
4047 
4048  m_ItemAllocator.Free(pItem);
4049  --m_Count;
4050 }
4051 
4052 template<typename T>
4053 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4054 {
4055  if(pItem != VMA_NULL)
4056  {
4057  ItemType* const prevItem = pItem->pPrev;
4058  ItemType* const newItem = m_ItemAllocator.Alloc();
4059  newItem->pPrev = prevItem;
4060  newItem->pNext = pItem;
4061  pItem->pPrev = newItem;
4062  if(prevItem != VMA_NULL)
4063  {
4064  prevItem->pNext = newItem;
4065  }
4066  else
4067  {
4068  VMA_HEAVY_ASSERT(m_pFront == pItem);
4069  m_pFront = newItem;
4070  }
4071  ++m_Count;
4072  return newItem;
4073  }
4074  else
4075  return PushBack();
4076 }
4077 
4078 template<typename T>
4079 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4080 {
4081  if(pItem != VMA_NULL)
4082  {
4083  ItemType* const nextItem = pItem->pNext;
4084  ItemType* const newItem = m_ItemAllocator.Alloc();
4085  newItem->pNext = nextItem;
4086  newItem->pPrev = pItem;
4087  pItem->pNext = newItem;
4088  if(nextItem != VMA_NULL)
4089  {
4090  nextItem->pPrev = newItem;
4091  }
4092  else
4093  {
4094  VMA_HEAVY_ASSERT(m_pBack == pItem);
4095  m_pBack = newItem;
4096  }
4097  ++m_Count;
4098  return newItem;
4099  }
4100  else
4101  return PushFront();
4102 }
4103 
4104 template<typename T>
4105 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4106 {
4107  ItemType* const newItem = InsertBefore(pItem);
4108  newItem->Value = value;
4109  return newItem;
4110 }
4111 
4112 template<typename T>
4113 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4114 {
4115  ItemType* const newItem = InsertAfter(pItem);
4116  newItem->Value = value;
4117  return newItem;
4118 }
4119 
4120 template<typename T, typename AllocatorT>
4121 class VmaList
4122 {
4123  VMA_CLASS_NO_COPY(VmaList)
4124 public:
4125  class iterator
4126  {
4127  public:
4128  iterator() :
4129  m_pList(VMA_NULL),
4130  m_pItem(VMA_NULL)
4131  {
4132  }
4133 
4134  T& operator*() const
4135  {
4136  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4137  return m_pItem->Value;
4138  }
4139  T* operator->() const
4140  {
4141  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4142  return &m_pItem->Value;
4143  }
4144 
4145  iterator& operator++()
4146  {
4147  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4148  m_pItem = m_pItem->pNext;
4149  return *this;
4150  }
4151  iterator& operator--()
4152  {
4153  if(m_pItem != VMA_NULL)
4154  {
4155  m_pItem = m_pItem->pPrev;
4156  }
4157  else
4158  {
4159  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4160  m_pItem = m_pList->Back();
4161  }
4162  return *this;
4163  }
4164 
4165  iterator operator++(int)
4166  {
4167  iterator result = *this;
4168  ++*this;
4169  return result;
4170  }
4171  iterator operator--(int)
4172  {
4173  iterator result = *this;
4174  --*this;
4175  return result;
4176  }
4177 
4178  bool operator==(const iterator& rhs) const
4179  {
4180  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4181  return m_pItem == rhs.m_pItem;
4182  }
4183  bool operator!=(const iterator& rhs) const
4184  {
4185  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4186  return m_pItem != rhs.m_pItem;
4187  }
4188 
4189  private:
4190  VmaRawList<T>* m_pList;
4191  VmaListItem<T>* m_pItem;
4192 
4193  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4194  m_pList(pList),
4195  m_pItem(pItem)
4196  {
4197  }
4198 
4199  friend class VmaList<T, AllocatorT>;
4200  };
4201 
4202  class const_iterator
4203  {
4204  public:
4205  const_iterator() :
4206  m_pList(VMA_NULL),
4207  m_pItem(VMA_NULL)
4208  {
4209  }
4210 
4211  const_iterator(const iterator& src) :
4212  m_pList(src.m_pList),
4213  m_pItem(src.m_pItem)
4214  {
4215  }
4216 
4217  const T& operator*() const
4218  {
4219  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4220  return m_pItem->Value;
4221  }
4222  const T* operator->() const
4223  {
4224  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4225  return &m_pItem->Value;
4226  }
4227 
4228  const_iterator& operator++()
4229  {
4230  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4231  m_pItem = m_pItem->pNext;
4232  return *this;
4233  }
4234  const_iterator& operator--()
4235  {
4236  if(m_pItem != VMA_NULL)
4237  {
4238  m_pItem = m_pItem->pPrev;
4239  }
4240  else
4241  {
4242  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4243  m_pItem = m_pList->Back();
4244  }
4245  return *this;
4246  }
4247 
4248  const_iterator operator++(int)
4249  {
4250  const_iterator result = *this;
4251  ++*this;
4252  return result;
4253  }
4254  const_iterator operator--(int)
4255  {
4256  const_iterator result = *this;
4257  --*this;
4258  return result;
4259  }
4260 
4261  bool operator==(const const_iterator& rhs) const
4262  {
4263  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4264  return m_pItem == rhs.m_pItem;
4265  }
4266  bool operator!=(const const_iterator& rhs) const
4267  {
4268  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4269  return m_pItem != rhs.m_pItem;
4270  }
4271 
4272  private:
4273  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4274  m_pList(pList),
4275  m_pItem(pItem)
4276  {
4277  }
4278 
4279  const VmaRawList<T>* m_pList;
4280  const VmaListItem<T>* m_pItem;
4281 
4282  friend class VmaList<T, AllocatorT>;
4283  };
4284 
4285  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4286 
4287  bool empty() const { return m_RawList.IsEmpty(); }
4288  size_t size() const { return m_RawList.GetCount(); }
4289 
4290  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4291  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4292 
4293  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4294  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4295 
4296  void clear() { m_RawList.Clear(); }
4297  void push_back(const T& value) { m_RawList.PushBack(value); }
4298  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4299  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4300 
4301 private:
4302  VmaRawList<T> m_RawList;
4303 };
4304 
4305 #endif // #if VMA_USE_STL_LIST
4306 
4308 // class VmaMap
4309 
4310 // Unused in this version.
4311 #if 0
4312 
4313 #if VMA_USE_STL_UNORDERED_MAP
4314 
4315 #define VmaPair std::pair
4316 
4317 #define VMA_MAP_TYPE(KeyT, ValueT) \
4318  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4319 
4320 #else // #if VMA_USE_STL_UNORDERED_MAP
4321 
4322 template<typename T1, typename T2>
4323 struct VmaPair
4324 {
4325  T1 first;
4326  T2 second;
4327 
4328  VmaPair() : first(), second() { }
4329  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4330 };
4331 
4332 /* Class compatible with subset of interface of std::unordered_map.
4333 KeyT, ValueT must be POD because they will be stored in VmaVector.
4334 */
4335 template<typename KeyT, typename ValueT>
4336 class VmaMap
4337 {
4338 public:
4339  typedef VmaPair<KeyT, ValueT> PairType;
4340  typedef PairType* iterator;
4341 
4342  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4343 
4344  iterator begin() { return m_Vector.begin(); }
4345  iterator end() { return m_Vector.end(); }
4346 
4347  void insert(const PairType& pair);
4348  iterator find(const KeyT& key);
4349  void erase(iterator it);
4350 
4351 private:
4352  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4353 };
4354 
4355 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4356 
4357 template<typename FirstT, typename SecondT>
4358 struct VmaPairFirstLess
4359 {
4360  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4361  {
4362  return lhs.first < rhs.first;
4363  }
4364  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4365  {
4366  return lhs.first < rhsFirst;
4367  }
4368 };
4369 
4370 template<typename KeyT, typename ValueT>
4371 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4372 {
4373  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4374  m_Vector.data(),
4375  m_Vector.data() + m_Vector.size(),
4376  pair,
4377  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4378  VmaVectorInsert(m_Vector, indexToInsert, pair);
4379 }
4380 
4381 template<typename KeyT, typename ValueT>
4382 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4383 {
4384  PairType* it = VmaBinaryFindFirstNotLess(
4385  m_Vector.data(),
4386  m_Vector.data() + m_Vector.size(),
4387  key,
4388  VmaPairFirstLess<KeyT, ValueT>());
4389  if((it != m_Vector.end()) && (it->first == key))
4390  {
4391  return it;
4392  }
4393  else
4394  {
4395  return m_Vector.end();
4396  }
4397 }
4398 
4399 template<typename KeyT, typename ValueT>
4400 void VmaMap<KeyT, ValueT>::erase(iterator it)
4401 {
4402  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4403 }
4404 
4405 #endif // #if VMA_USE_STL_UNORDERED_MAP
4406 
4407 #endif // #if 0
4408 
4410 
4411 class VmaDeviceMemoryBlock;
4412 
4413 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4414 
4415 struct VmaAllocation_T
4416 {
4417  VMA_CLASS_NO_COPY(VmaAllocation_T)
4418 private:
4419  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4420 
4421  enum FLAGS
4422  {
4423  FLAG_USER_DATA_STRING = 0x01,
4424  };
4425 
4426 public:
4427  enum ALLOCATION_TYPE
4428  {
4429  ALLOCATION_TYPE_NONE,
4430  ALLOCATION_TYPE_BLOCK,
4431  ALLOCATION_TYPE_DEDICATED,
4432  };
4433 
4434  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4435  m_Alignment(1),
4436  m_Size(0),
4437  m_pUserData(VMA_NULL),
4438  m_LastUseFrameIndex(currentFrameIndex),
4439  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4440  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4441  m_MapCount(0),
4442  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4443  {
4444 #if VMA_STATS_STRING_ENABLED
4445  m_CreationFrameIndex = currentFrameIndex;
4446  m_BufferImageUsage = 0;
4447 #endif
4448  }
4449 
4450  ~VmaAllocation_T()
4451  {
4452  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4453 
4454  // Check if owned string was freed.
4455  VMA_ASSERT(m_pUserData == VMA_NULL);
4456  }
4457 
4458  void InitBlockAllocation(
4459  VmaPool hPool,
4460  VmaDeviceMemoryBlock* block,
4461  VkDeviceSize offset,
4462  VkDeviceSize alignment,
4463  VkDeviceSize size,
4464  VmaSuballocationType suballocationType,
4465  bool mapped,
4466  bool canBecomeLost)
4467  {
4468  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4469  VMA_ASSERT(block != VMA_NULL);
4470  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4471  m_Alignment = alignment;
4472  m_Size = size;
4473  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4474  m_SuballocationType = (uint8_t)suballocationType;
4475  m_BlockAllocation.m_hPool = hPool;
4476  m_BlockAllocation.m_Block = block;
4477  m_BlockAllocation.m_Offset = offset;
4478  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4479  }
4480 
4481  void InitLost()
4482  {
4483  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4484  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4485  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4486  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4487  m_BlockAllocation.m_Block = VMA_NULL;
4488  m_BlockAllocation.m_Offset = 0;
4489  m_BlockAllocation.m_CanBecomeLost = true;
4490  }
4491 
4492  void ChangeBlockAllocation(
4493  VmaAllocator hAllocator,
4494  VmaDeviceMemoryBlock* block,
4495  VkDeviceSize offset);
4496 
4497  // pMappedData not null means allocation is created with MAPPED flag.
4498  void InitDedicatedAllocation(
4499  uint32_t memoryTypeIndex,
4500  VkDeviceMemory hMemory,
4501  VmaSuballocationType suballocationType,
4502  void* pMappedData,
4503  VkDeviceSize size)
4504  {
4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4506  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4507  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4508  m_Alignment = 0;
4509  m_Size = size;
4510  m_SuballocationType = (uint8_t)suballocationType;
4511  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4512  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4513  m_DedicatedAllocation.m_hMemory = hMemory;
4514  m_DedicatedAllocation.m_pMappedData = pMappedData;
4515  }
4516 
4517  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4518  VkDeviceSize GetAlignment() const { return m_Alignment; }
4519  VkDeviceSize GetSize() const { return m_Size; }
4520  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4521  void* GetUserData() const { return m_pUserData; }
4522  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4523  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4524 
4525  VmaDeviceMemoryBlock* GetBlock() const
4526  {
4527  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4528  return m_BlockAllocation.m_Block;
4529  }
4530  VkDeviceSize GetOffset() const;
4531  VkDeviceMemory GetMemory() const;
4532  uint32_t GetMemoryTypeIndex() const;
4533  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4534  void* GetMappedData() const;
4535  bool CanBecomeLost() const;
4536  VmaPool GetPool() const;
4537 
4538  uint32_t GetLastUseFrameIndex() const
4539  {
4540  return m_LastUseFrameIndex.load();
4541  }
4542  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4543  {
4544  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4545  }
4546  /*
4547  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4548  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4549  - Else, returns false.
4550 
4551  If hAllocation is already lost, assert - you should not call it then.
4552  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4553  */
4554  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4555 
4556  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4557  {
4558  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4559  outInfo.blockCount = 1;
4560  outInfo.allocationCount = 1;
4561  outInfo.unusedRangeCount = 0;
4562  outInfo.usedBytes = m_Size;
4563  outInfo.unusedBytes = 0;
4564  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4565  outInfo.unusedRangeSizeMin = UINT64_MAX;
4566  outInfo.unusedRangeSizeMax = 0;
4567  }
4568 
4569  void BlockAllocMap();
4570  void BlockAllocUnmap();
4571  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4572  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4573 
4574 #if VMA_STATS_STRING_ENABLED
4575  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4576  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4577 
4578  void InitBufferImageUsage(uint32_t bufferImageUsage)
4579  {
4580  VMA_ASSERT(m_BufferImageUsage == 0);
4581  m_BufferImageUsage = bufferImageUsage;
4582  }
4583 
4584  void PrintParameters(class VmaJsonWriter& json) const;
4585 #endif
4586 
4587 private:
4588  VkDeviceSize m_Alignment;
4589  VkDeviceSize m_Size;
4590  void* m_pUserData;
4591  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4592  uint8_t m_Type; // ALLOCATION_TYPE
4593  uint8_t m_SuballocationType; // VmaSuballocationType
4594  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4595  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4596  uint8_t m_MapCount;
4597  uint8_t m_Flags; // enum FLAGS
4598 
4599  // Allocation out of VmaDeviceMemoryBlock.
4600  struct BlockAllocation
4601  {
4602  VmaPool m_hPool; // Null if belongs to general memory.
4603  VmaDeviceMemoryBlock* m_Block;
4604  VkDeviceSize m_Offset;
4605  bool m_CanBecomeLost;
4606  };
4607 
4608  // Allocation for an object that has its own private VkDeviceMemory.
4609  struct DedicatedAllocation
4610  {
4611  uint32_t m_MemoryTypeIndex;
4612  VkDeviceMemory m_hMemory;
4613  void* m_pMappedData; // Not null means memory is mapped.
4614  };
4615 
4616  union
4617  {
4618  // Allocation out of VmaDeviceMemoryBlock.
4619  BlockAllocation m_BlockAllocation;
4620  // Allocation for an object that has its own private VkDeviceMemory.
4621  DedicatedAllocation m_DedicatedAllocation;
4622  };
4623 
4624 #if VMA_STATS_STRING_ENABLED
4625  uint32_t m_CreationFrameIndex;
4626  uint32_t m_BufferImageUsage; // 0 if unknown.
4627 #endif
4628 
4629  void FreeUserDataString(VmaAllocator hAllocator);
4630 };
4631 
4632 /*
4633 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4634 allocated memory block or free.
4635 */
4636 struct VmaSuballocation
4637 {
4638  VkDeviceSize offset;
4639  VkDeviceSize size;
4640  VmaAllocation hAllocation;
4641  VmaSuballocationType type;
4642 };
4643 
4644 // Comparator for offsets.
4645 struct VmaSuballocationOffsetLess
4646 {
4647  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4648  {
4649  return lhs.offset < rhs.offset;
4650  }
4651 };
4652 struct VmaSuballocationOffsetGreater
4653 {
4654  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4655  {
4656  return lhs.offset > rhs.offset;
4657  }
4658 };
4659 
4660 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4661 
4662 // Cost of one additional allocation lost, as equivalent in bytes.
4663 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4664 
4665 /*
4666 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4667 
4668 If canMakeOtherLost was false:
4669 - item points to a FREE suballocation.
4670 - itemsToMakeLostCount is 0.
4671 
4672 If canMakeOtherLost was true:
4673 - item points to first of sequence of suballocations, which are either FREE,
4674  or point to VmaAllocations that can become lost.
4675 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4676  the requested allocation to succeed.
4677 */
4678 struct VmaAllocationRequest
4679 {
4680  VkDeviceSize offset;
4681  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4682  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4683  VmaSuballocationList::iterator item;
4684  size_t itemsToMakeLostCount;
4685  void* customData;
4686 
4687  VkDeviceSize CalcCost() const
4688  {
4689  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4690  }
4691 };
4692 
4693 /*
4694 Data structure used for bookkeeping of allocations and unused ranges of memory
4695 in a single VkDeviceMemory block.
4696 */
4697 class VmaBlockMetadata
4698 {
4699 public:
4700  VmaBlockMetadata(VmaAllocator hAllocator);
4701  virtual ~VmaBlockMetadata() { }
4702  virtual void Init(VkDeviceSize size) { m_Size = size; }
4703 
4704  // Validates all data structures inside this object. If not valid, returns false.
4705  virtual bool Validate() const = 0;
4706  VkDeviceSize GetSize() const { return m_Size; }
4707  virtual size_t GetAllocationCount() const = 0;
4708  virtual VkDeviceSize GetSumFreeSize() const = 0;
4709  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4710  // Returns true if this block is empty - contains only single free suballocation.
4711  virtual bool IsEmpty() const = 0;
4712 
4713  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4714  // Shouldn't modify blockCount.
4715  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4716 
4717 #if VMA_STATS_STRING_ENABLED
4718  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4719 #endif
4720 
4721  // Tries to find a place for suballocation with given parameters inside this block.
4722  // If succeeded, fills pAllocationRequest and returns true.
4723  // If failed, returns false.
4724  virtual bool CreateAllocationRequest(
4725  uint32_t currentFrameIndex,
4726  uint32_t frameInUseCount,
4727  VkDeviceSize bufferImageGranularity,
4728  VkDeviceSize allocSize,
4729  VkDeviceSize allocAlignment,
4730  bool upperAddress,
4731  VmaSuballocationType allocType,
4732  bool canMakeOtherLost,
4733  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4734  VmaAllocationRequest* pAllocationRequest) = 0;
4735 
4736  virtual bool MakeRequestedAllocationsLost(
4737  uint32_t currentFrameIndex,
4738  uint32_t frameInUseCount,
4739  VmaAllocationRequest* pAllocationRequest) = 0;
4740 
4741  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4742 
4743  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4744 
4745  // Makes actual allocation based on request. Request must already be checked and valid.
4746  virtual void Alloc(
4747  const VmaAllocationRequest& request,
4748  VmaSuballocationType type,
4749  VkDeviceSize allocSize,
4750  bool upperAddress,
4751  VmaAllocation hAllocation) = 0;
4752 
4753  // Frees suballocation assigned to given memory region.
4754  virtual void Free(const VmaAllocation allocation) = 0;
4755  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4756 
4757 protected:
4758  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4759 
4760 #if VMA_STATS_STRING_ENABLED
4761  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4762  VkDeviceSize unusedBytes,
4763  size_t allocationCount,
4764  size_t unusedRangeCount) const;
4765  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4766  VkDeviceSize offset,
4767  VmaAllocation hAllocation) const;
4768  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4769  VkDeviceSize offset,
4770  VkDeviceSize size) const;
4771  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4772 #endif
4773 
4774 private:
4775  VkDeviceSize m_Size;
4776  const VkAllocationCallbacks* m_pAllocationCallbacks;
4777 };
4778 
4779 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4780  VMA_ASSERT(0 && "Validation failed: " #cond); \
4781  return false; \
4782  } } while(false)
4783 
4784 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4785 {
4786  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4787 public:
4788  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4789  virtual ~VmaBlockMetadata_Generic();
4790  virtual void Init(VkDeviceSize size);
4791 
4792  virtual bool Validate() const;
4793  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4794  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4795  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4796  virtual bool IsEmpty() const;
4797 
4798  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4799  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4800 
4801 #if VMA_STATS_STRING_ENABLED
4802  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4803 #endif
4804 
4805  virtual bool CreateAllocationRequest(
4806  uint32_t currentFrameIndex,
4807  uint32_t frameInUseCount,
4808  VkDeviceSize bufferImageGranularity,
4809  VkDeviceSize allocSize,
4810  VkDeviceSize allocAlignment,
4811  bool upperAddress,
4812  VmaSuballocationType allocType,
4813  bool canMakeOtherLost,
4814  uint32_t strategy,
4815  VmaAllocationRequest* pAllocationRequest);
4816 
4817  virtual bool MakeRequestedAllocationsLost(
4818  uint32_t currentFrameIndex,
4819  uint32_t frameInUseCount,
4820  VmaAllocationRequest* pAllocationRequest);
4821 
4822  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4823 
4824  virtual VkResult CheckCorruption(const void* pBlockData);
4825 
4826  virtual void Alloc(
4827  const VmaAllocationRequest& request,
4828  VmaSuballocationType type,
4829  VkDeviceSize allocSize,
4830  bool upperAddress,
4831  VmaAllocation hAllocation);
4832 
4833  virtual void Free(const VmaAllocation allocation);
4834  virtual void FreeAtOffset(VkDeviceSize offset);
4835 
4836 private:
4837  uint32_t m_FreeCount;
4838  VkDeviceSize m_SumFreeSize;
4839  VmaSuballocationList m_Suballocations;
4840  // Suballocations that are free and have size greater than certain threshold.
4841  // Sorted by size, ascending.
4842  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4843 
4844  bool ValidateFreeSuballocationList() const;
4845 
4846  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4847  // If yes, fills pOffset and returns true. If no, returns false.
4848  bool CheckAllocation(
4849  uint32_t currentFrameIndex,
4850  uint32_t frameInUseCount,
4851  VkDeviceSize bufferImageGranularity,
4852  VkDeviceSize allocSize,
4853  VkDeviceSize allocAlignment,
4854  VmaSuballocationType allocType,
4855  VmaSuballocationList::const_iterator suballocItem,
4856  bool canMakeOtherLost,
4857  VkDeviceSize* pOffset,
4858  size_t* itemsToMakeLostCount,
4859  VkDeviceSize* pSumFreeSize,
4860  VkDeviceSize* pSumItemSize) const;
4861  // Given free suballocation, it merges it with following one, which must also be free.
4862  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4863  // Releases given suballocation, making it free.
4864  // Merges it with adjacent free suballocations if applicable.
4865  // Returns iterator to new free suballocation at this place.
4866  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4867  // Given free suballocation, it inserts it into sorted list of
4868  // m_FreeSuballocationsBySize if it's suitable.
4869  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4870  // Given free suballocation, it removes it from sorted list of
4871  // m_FreeSuballocationsBySize if it's suitable.
4872  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4873 };
4874 
4875 /*
4876 Allocations and their references in internal data structure look like this:
4877 
4878 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4879 
4880  0 +-------+
4881  | |
4882  | |
4883  | |
4884  +-------+
4885  | Alloc | 1st[m_1stNullItemsBeginCount]
4886  +-------+
4887  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4888  +-------+
4889  | ... |
4890  +-------+
4891  | Alloc | 1st[1st.size() - 1]
4892  +-------+
4893  | |
4894  | |
4895  | |
4896 GetSize() +-------+
4897 
4898 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4899 
4900  0 +-------+
4901  | Alloc | 2nd[0]
4902  +-------+
4903  | Alloc | 2nd[1]
4904  +-------+
4905  | ... |
4906  +-------+
4907  | Alloc | 2nd[2nd.size() - 1]
4908  +-------+
4909  | |
4910  | |
4911  | |
4912  +-------+
4913  | Alloc | 1st[m_1stNullItemsBeginCount]
4914  +-------+
4915  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4916  +-------+
4917  | ... |
4918  +-------+
4919  | Alloc | 1st[1st.size() - 1]
4920  +-------+
4921  | |
4922 GetSize() +-------+
4923 
4924 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4925 
4926  0 +-------+
4927  | |
4928  | |
4929  | |
4930  +-------+
4931  | Alloc | 1st[m_1stNullItemsBeginCount]
4932  +-------+
4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4934  +-------+
4935  | ... |
4936  +-------+
4937  | Alloc | 1st[1st.size() - 1]
4938  +-------+
4939  | |
4940  | |
4941  | |
4942  +-------+
4943  | Alloc | 2nd[2nd.size() - 1]
4944  +-------+
4945  | ... |
4946  +-------+
4947  | Alloc | 2nd[1]
4948  +-------+
4949  | Alloc | 2nd[0]
4950 GetSize() +-------+
4951 
4952 */
4953 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4954 {
4955  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4956 public:
4957  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4958  virtual ~VmaBlockMetadata_Linear();
4959  virtual void Init(VkDeviceSize size);
4960 
4961  virtual bool Validate() const;
4962  virtual size_t GetAllocationCount() const;
4963  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4964  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4965  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4966 
4967  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4968  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4969 
4970 #if VMA_STATS_STRING_ENABLED
4971  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4972 #endif
4973 
4974  virtual bool CreateAllocationRequest(
4975  uint32_t currentFrameIndex,
4976  uint32_t frameInUseCount,
4977  VkDeviceSize bufferImageGranularity,
4978  VkDeviceSize allocSize,
4979  VkDeviceSize allocAlignment,
4980  bool upperAddress,
4981  VmaSuballocationType allocType,
4982  bool canMakeOtherLost,
4983  uint32_t strategy,
4984  VmaAllocationRequest* pAllocationRequest);
4985 
4986  virtual bool MakeRequestedAllocationsLost(
4987  uint32_t currentFrameIndex,
4988  uint32_t frameInUseCount,
4989  VmaAllocationRequest* pAllocationRequest);
4990 
4991  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4992 
4993  virtual VkResult CheckCorruption(const void* pBlockData);
4994 
4995  virtual void Alloc(
4996  const VmaAllocationRequest& request,
4997  VmaSuballocationType type,
4998  VkDeviceSize allocSize,
4999  bool upperAddress,
5000  VmaAllocation hAllocation);
5001 
5002  virtual void Free(const VmaAllocation allocation);
5003  virtual void FreeAtOffset(VkDeviceSize offset);
5004 
5005 private:
5006  /*
5007  There are two suballocation vectors, used in ping-pong way.
5008  The one with index m_1stVectorIndex is called 1st.
5009  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5010  2nd can be non-empty only when 1st is not empty.
5011  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5012  */
5013  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5014 
5015  enum SECOND_VECTOR_MODE
5016  {
5017  SECOND_VECTOR_EMPTY,
5018  /*
5019  Suballocations in 2nd vector are created later than the ones in 1st, but they
5020  all have smaller offset.
5021  */
5022  SECOND_VECTOR_RING_BUFFER,
5023  /*
5024  Suballocations in 2nd vector are upper side of double stack.
5025  They all have offsets higher than those in 1st vector.
5026  Top of this stack means smaller offsets, but higher indices in this vector.
5027  */
5028  SECOND_VECTOR_DOUBLE_STACK,
5029  };
5030 
5031  VkDeviceSize m_SumFreeSize;
5032  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5033  uint32_t m_1stVectorIndex;
5034  SECOND_VECTOR_MODE m_2ndVectorMode;
5035 
5036  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5037  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5038  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5039  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5040 
5041  // Number of items in 1st vector with hAllocation = null at the beginning.
5042  size_t m_1stNullItemsBeginCount;
5043  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5044  size_t m_1stNullItemsMiddleCount;
5045  // Number of items in 2nd vector with hAllocation = null.
5046  size_t m_2ndNullItemsCount;
5047 
5048  bool ShouldCompact1st() const;
5049  void CleanupAfterFree();
5050 };
5051 
5052 /*
5053 - GetSize() is the original size of allocated memory block.
5054 - m_UsableSize is this size aligned down to a power of two.
5055  All allocations and calculations happen relative to m_UsableSize.
5056 - GetUnusableSize() is the difference between them.
5057  It is repoted as separate, unused range, not available for allocations.
5058 
5059 Node at level 0 has size = m_UsableSize.
5060 Each next level contains nodes with size 2 times smaller than current level.
5061 m_LevelCount is the maximum number of levels to use in the current object.
5062 */
5063 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5064 {
5065  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5066 public:
5067  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5068  virtual ~VmaBlockMetadata_Buddy();
5069  virtual void Init(VkDeviceSize size);
5070 
5071  virtual bool Validate() const;
5072  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5073  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5074  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5075  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5076 
5077  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5078  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5079 
5080 #if VMA_STATS_STRING_ENABLED
5081  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5082 #endif
5083 
5084  virtual bool CreateAllocationRequest(
5085  uint32_t currentFrameIndex,
5086  uint32_t frameInUseCount,
5087  VkDeviceSize bufferImageGranularity,
5088  VkDeviceSize allocSize,
5089  VkDeviceSize allocAlignment,
5090  bool upperAddress,
5091  VmaSuballocationType allocType,
5092  bool canMakeOtherLost,
5093  uint32_t strategy,
5094  VmaAllocationRequest* pAllocationRequest);
5095 
5096  virtual bool MakeRequestedAllocationsLost(
5097  uint32_t currentFrameIndex,
5098  uint32_t frameInUseCount,
5099  VmaAllocationRequest* pAllocationRequest);
5100 
5101  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5102 
5103  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5104 
5105  virtual void Alloc(
5106  const VmaAllocationRequest& request,
5107  VmaSuballocationType type,
5108  VkDeviceSize allocSize,
5109  bool upperAddress,
5110  VmaAllocation hAllocation);
5111 
5112  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5113  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5114 
5115 private:
5116  static const VkDeviceSize MIN_NODE_SIZE = 32;
5117  static const size_t MAX_LEVELS = 30;
5118 
5119  struct ValidationContext
5120  {
5121  size_t calculatedAllocationCount;
5122  size_t calculatedFreeCount;
5123  VkDeviceSize calculatedSumFreeSize;
5124 
5125  ValidationContext() :
5126  calculatedAllocationCount(0),
5127  calculatedFreeCount(0),
5128  calculatedSumFreeSize(0) { }
5129  };
5130 
5131  struct Node
5132  {
5133  VkDeviceSize offset;
5134  enum TYPE
5135  {
5136  TYPE_FREE,
5137  TYPE_ALLOCATION,
5138  TYPE_SPLIT,
5139  TYPE_COUNT
5140  } type;
5141  Node* parent;
5142  Node* buddy;
5143 
5144  union
5145  {
5146  struct
5147  {
5148  Node* prev;
5149  Node* next;
5150  } free;
5151  struct
5152  {
5153  VmaAllocation alloc;
5154  } allocation;
5155  struct
5156  {
5157  Node* leftChild;
5158  } split;
5159  };
5160  };
5161 
5162  // Size of the memory block aligned down to a power of two.
5163  VkDeviceSize m_UsableSize;
5164  uint32_t m_LevelCount;
5165 
5166  Node* m_Root;
5167  struct {
5168  Node* front;
5169  Node* back;
5170  } m_FreeList[MAX_LEVELS];
5171  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5172  size_t m_AllocationCount;
5173  // Number of nodes in the tree with type == TYPE_FREE.
5174  size_t m_FreeCount;
5175  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5176  VkDeviceSize m_SumFreeSize;
5177 
5178  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5179  void DeleteNode(Node* node);
5180  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5181  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5182  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5183  // Alloc passed just for validation. Can be null.
5184  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5185  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5186  // Adds node to the front of FreeList at given level.
5187  // node->type must be FREE.
5188  // node->free.prev, next can be undefined.
5189  void AddToFreeListFront(uint32_t level, Node* node);
5190  // Removes node from FreeList at given level.
5191  // node->type must be FREE.
5192  // node->free.prev, next stay untouched.
5193  void RemoveFromFreeList(uint32_t level, Node* node);
5194 
5195 #if VMA_STATS_STRING_ENABLED
5196  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5197 #endif
5198 };
5199 
5200 /*
5201 Represents a single block of device memory (`VkDeviceMemory`) with all the
5202 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5203 
5204 Thread-safety: This class must be externally synchronized.
5205 */
5206 class VmaDeviceMemoryBlock
5207 {
5208  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5209 public:
5210  VmaBlockMetadata* m_pMetadata;
5211 
5212  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5213 
5214  ~VmaDeviceMemoryBlock()
5215  {
5216  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5217  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5218  }
5219 
5220  // Always call after construction.
5221  void Init(
5222  VmaAllocator hAllocator,
5223  uint32_t newMemoryTypeIndex,
5224  VkDeviceMemory newMemory,
5225  VkDeviceSize newSize,
5226  uint32_t id,
5227  uint32_t algorithm);
5228  // Always call before destruction.
5229  void Destroy(VmaAllocator allocator);
5230 
5231  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5232  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5233  uint32_t GetId() const { return m_Id; }
5234  void* GetMappedData() const { return m_pMappedData; }
5235 
5236  // Validates all data structures inside this object. If not valid, returns false.
5237  bool Validate() const;
5238 
5239  VkResult CheckCorruption(VmaAllocator hAllocator);
5240 
5241  // ppData can be null.
5242  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5243  void Unmap(VmaAllocator hAllocator, uint32_t count);
5244 
5245  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5246  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5247 
5248  VkResult BindBufferMemory(
5249  const VmaAllocator hAllocator,
5250  const VmaAllocation hAllocation,
5251  VkBuffer hBuffer);
5252  VkResult BindImageMemory(
5253  const VmaAllocator hAllocator,
5254  const VmaAllocation hAllocation,
5255  VkImage hImage);
5256 
5257 private:
5258  uint32_t m_MemoryTypeIndex;
5259  uint32_t m_Id;
5260  VkDeviceMemory m_hMemory;
5261 
5262  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5263  // Also protects m_MapCount, m_pMappedData.
5264  VMA_MUTEX m_Mutex;
5265  uint32_t m_MapCount;
5266  void* m_pMappedData;
5267 };
5268 
5269 struct VmaPointerLess
5270 {
5271  bool operator()(const void* lhs, const void* rhs) const
5272  {
5273  return lhs < rhs;
5274  }
5275 };
5276 
5277 class VmaDefragmentator;
5278 
5279 /*
5280 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5281 Vulkan memory type.
5282 
5283 Synchronized internally with a mutex.
5284 */
5285 struct VmaBlockVector
5286 {
5287  VMA_CLASS_NO_COPY(VmaBlockVector)
5288 public:
5289  VmaBlockVector(
5290  VmaAllocator hAllocator,
5291  uint32_t memoryTypeIndex,
5292  VkDeviceSize preferredBlockSize,
5293  size_t minBlockCount,
5294  size_t maxBlockCount,
5295  VkDeviceSize bufferImageGranularity,
5296  uint32_t frameInUseCount,
5297  bool isCustomPool,
5298  bool explicitBlockSize,
5299  uint32_t algorithm);
5300  ~VmaBlockVector();
5301 
5302  VkResult CreateMinBlocks();
5303 
5304  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5305  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5306  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5307  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5308  uint32_t GetAlgorithm() const { return m_Algorithm; }
5309 
5310  void GetPoolStats(VmaPoolStats* pStats);
5311 
5312  bool IsEmpty() const { return m_Blocks.empty(); }
5313  bool IsCorruptionDetectionEnabled() const;
5314 
5315  VkResult Allocate(
5316  VmaPool hCurrentPool,
5317  uint32_t currentFrameIndex,
5318  VkDeviceSize size,
5319  VkDeviceSize alignment,
5320  const VmaAllocationCreateInfo& createInfo,
5321  VmaSuballocationType suballocType,
5322  VmaAllocation* pAllocation);
5323 
5324  void Free(
5325  VmaAllocation hAllocation);
5326 
5327  // Adds statistics of this BlockVector to pStats.
5328  void AddStats(VmaStats* pStats);
5329 
5330 #if VMA_STATS_STRING_ENABLED
5331  void PrintDetailedMap(class VmaJsonWriter& json);
5332 #endif
5333 
5334  void MakePoolAllocationsLost(
5335  uint32_t currentFrameIndex,
5336  size_t* pLostAllocationCount);
5337  VkResult CheckCorruption();
5338 
5339  VmaDefragmentator* EnsureDefragmentator(
5340  VmaAllocator hAllocator,
5341  uint32_t currentFrameIndex);
5342 
5343  VkResult Defragment(
5344  VmaDefragmentationStats* pDefragmentationStats,
5345  VkDeviceSize& maxBytesToMove,
5346  uint32_t& maxAllocationsToMove);
5347 
5348  void DestroyDefragmentator();
5349 
5350 private:
5351  friend class VmaDefragmentator;
5352 
5353  const VmaAllocator m_hAllocator;
5354  const uint32_t m_MemoryTypeIndex;
5355  const VkDeviceSize m_PreferredBlockSize;
5356  const size_t m_MinBlockCount;
5357  const size_t m_MaxBlockCount;
5358  const VkDeviceSize m_BufferImageGranularity;
5359  const uint32_t m_FrameInUseCount;
5360  const bool m_IsCustomPool;
5361  const bool m_ExplicitBlockSize;
5362  const uint32_t m_Algorithm;
5363  bool m_HasEmptyBlock;
5364  VMA_MUTEX m_Mutex;
5365  // Incrementally sorted by sumFreeSize, ascending.
5366  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5367  /* There can be at most one allocation that is completely empty - a
5368  hysteresis to avoid pessimistic case of alternating creation and destruction
5369  of a VkDeviceMemory. */
5370  VmaDefragmentator* m_pDefragmentator;
5371  uint32_t m_NextBlockId;
5372 
5373  VkDeviceSize CalcMaxBlockSize() const;
5374 
5375  // Finds and removes given block from vector.
5376  void Remove(VmaDeviceMemoryBlock* pBlock);
5377 
5378  // Performs single step in sorting m_Blocks. They may not be fully sorted
5379  // after this call.
5380  void IncrementallySortBlocks();
5381 
5382  // To be used only without CAN_MAKE_OTHER_LOST flag.
5383  VkResult AllocateFromBlock(
5384  VmaDeviceMemoryBlock* pBlock,
5385  VmaPool hCurrentPool,
5386  uint32_t currentFrameIndex,
5387  VkDeviceSize size,
5388  VkDeviceSize alignment,
5389  VmaAllocationCreateFlags allocFlags,
5390  void* pUserData,
5391  VmaSuballocationType suballocType,
5392  uint32_t strategy,
5393  VmaAllocation* pAllocation);
5394 
5395  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5396 };
5397 
5398 struct VmaPool_T
5399 {
5400  VMA_CLASS_NO_COPY(VmaPool_T)
5401 public:
5402  VmaBlockVector m_BlockVector;
5403 
5404  VmaPool_T(
5405  VmaAllocator hAllocator,
5406  const VmaPoolCreateInfo& createInfo,
5407  VkDeviceSize preferredBlockSize);
5408  ~VmaPool_T();
5409 
5410  uint32_t GetId() const { return m_Id; }
5411  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5412 
5413 #if VMA_STATS_STRING_ENABLED
5414  //void PrintDetailedMap(class VmaStringBuilder& sb);
5415 #endif
5416 
5417 private:
5418  uint32_t m_Id;
5419 };
5420 
5421 class VmaDefragmentator
5422 {
5423  VMA_CLASS_NO_COPY(VmaDefragmentator)
5424 private:
5425  const VmaAllocator m_hAllocator;
5426  VmaBlockVector* const m_pBlockVector;
5427  uint32_t m_CurrentFrameIndex;
5428  VkDeviceSize m_BytesMoved;
5429  uint32_t m_AllocationsMoved;
5430 
5431  struct AllocationInfo
5432  {
5433  VmaAllocation m_hAllocation;
5434  VkBool32* m_pChanged;
5435 
5436  AllocationInfo() :
5437  m_hAllocation(VK_NULL_HANDLE),
5438  m_pChanged(VMA_NULL)
5439  {
5440  }
5441  };
5442 
5443  struct AllocationInfoSizeGreater
5444  {
5445  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5446  {
5447  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5448  }
5449  };
5450 
5451  // Used between AddAllocation and Defragment.
5452  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5453 
5454  struct BlockInfo
5455  {
5456  VmaDeviceMemoryBlock* m_pBlock;
5457  bool m_HasNonMovableAllocations;
5458  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5459 
5460  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5461  m_pBlock(VMA_NULL),
5462  m_HasNonMovableAllocations(true),
5463  m_Allocations(pAllocationCallbacks),
5464  m_pMappedDataForDefragmentation(VMA_NULL)
5465  {
5466  }
5467 
5468  void CalcHasNonMovableAllocations()
5469  {
5470  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5471  const size_t defragmentAllocCount = m_Allocations.size();
5472  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5473  }
5474 
5475  void SortAllocationsBySizeDescecnding()
5476  {
5477  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5478  }
5479 
5480  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5481  void Unmap(VmaAllocator hAllocator);
5482 
5483  private:
5484  // Not null if mapped for defragmentation only, not originally mapped.
5485  void* m_pMappedDataForDefragmentation;
5486  };
5487 
5488  struct BlockPointerLess
5489  {
5490  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5491  {
5492  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5493  }
5494  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5495  {
5496  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5497  }
5498  };
5499 
5500  // 1. Blocks with some non-movable allocations go first.
5501  // 2. Blocks with smaller sumFreeSize go first.
5502  struct BlockInfoCompareMoveDestination
5503  {
5504  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5505  {
5506  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5507  {
5508  return true;
5509  }
5510  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5511  {
5512  return false;
5513  }
5514  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5515  {
5516  return true;
5517  }
5518  return false;
5519  }
5520  };
5521 
5522  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5523  BlockInfoVector m_Blocks;
5524 
5525  VkResult DefragmentRound(
5526  VkDeviceSize maxBytesToMove,
5527  uint32_t maxAllocationsToMove);
5528 
5529  static bool MoveMakesSense(
5530  size_t dstBlockIndex, VkDeviceSize dstOffset,
5531  size_t srcBlockIndex, VkDeviceSize srcOffset);
5532 
5533 public:
5534  VmaDefragmentator(
5535  VmaAllocator hAllocator,
5536  VmaBlockVector* pBlockVector,
5537  uint32_t currentFrameIndex);
5538 
5539  ~VmaDefragmentator();
5540 
5541  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5542  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5543 
5544  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5545 
5546  VkResult Defragment(
5547  VkDeviceSize maxBytesToMove,
5548  uint32_t maxAllocationsToMove);
5549 };
5550 
5551 #if VMA_RECORDING_ENABLED
5552 
5553 class VmaRecorder
5554 {
5555 public:
5556  VmaRecorder();
5557  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5558  void WriteConfiguration(
5559  const VkPhysicalDeviceProperties& devProps,
5560  const VkPhysicalDeviceMemoryProperties& memProps,
5561  bool dedicatedAllocationExtensionEnabled);
5562  ~VmaRecorder();
5563 
5564  void RecordCreateAllocator(uint32_t frameIndex);
5565  void RecordDestroyAllocator(uint32_t frameIndex);
5566  void RecordCreatePool(uint32_t frameIndex,
5567  const VmaPoolCreateInfo& createInfo,
5568  VmaPool pool);
5569  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5570  void RecordAllocateMemory(uint32_t frameIndex,
5571  const VkMemoryRequirements& vkMemReq,
5572  const VmaAllocationCreateInfo& createInfo,
5573  VmaAllocation allocation);
5574  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5575  const VkMemoryRequirements& vkMemReq,
5576  bool requiresDedicatedAllocation,
5577  bool prefersDedicatedAllocation,
5578  const VmaAllocationCreateInfo& createInfo,
5579  VmaAllocation allocation);
5580  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5581  const VkMemoryRequirements& vkMemReq,
5582  bool requiresDedicatedAllocation,
5583  bool prefersDedicatedAllocation,
5584  const VmaAllocationCreateInfo& createInfo,
5585  VmaAllocation allocation);
5586  void RecordFreeMemory(uint32_t frameIndex,
5587  VmaAllocation allocation);
5588  void RecordSetAllocationUserData(uint32_t frameIndex,
5589  VmaAllocation allocation,
5590  const void* pUserData);
5591  void RecordCreateLostAllocation(uint32_t frameIndex,
5592  VmaAllocation allocation);
5593  void RecordMapMemory(uint32_t frameIndex,
5594  VmaAllocation allocation);
5595  void RecordUnmapMemory(uint32_t frameIndex,
5596  VmaAllocation allocation);
5597  void RecordFlushAllocation(uint32_t frameIndex,
5598  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5599  void RecordInvalidateAllocation(uint32_t frameIndex,
5600  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5601  void RecordCreateBuffer(uint32_t frameIndex,
5602  const VkBufferCreateInfo& bufCreateInfo,
5603  const VmaAllocationCreateInfo& allocCreateInfo,
5604  VmaAllocation allocation);
5605  void RecordCreateImage(uint32_t frameIndex,
5606  const VkImageCreateInfo& imageCreateInfo,
5607  const VmaAllocationCreateInfo& allocCreateInfo,
5608  VmaAllocation allocation);
5609  void RecordDestroyBuffer(uint32_t frameIndex,
5610  VmaAllocation allocation);
5611  void RecordDestroyImage(uint32_t frameIndex,
5612  VmaAllocation allocation);
5613  void RecordTouchAllocation(uint32_t frameIndex,
5614  VmaAllocation allocation);
5615  void RecordGetAllocationInfo(uint32_t frameIndex,
5616  VmaAllocation allocation);
5617  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5618  VmaPool pool);
5619 
5620 private:
5621  struct CallParams
5622  {
5623  uint32_t threadId;
5624  double time;
5625  };
5626 
5627  class UserDataString
5628  {
5629  public:
5630  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5631  const char* GetString() const { return m_Str; }
5632 
5633  private:
5634  char m_PtrStr[17];
5635  const char* m_Str;
5636  };
5637 
5638  bool m_UseMutex;
5639  VmaRecordFlags m_Flags;
5640  FILE* m_File;
5641  VMA_MUTEX m_FileMutex;
5642  int64_t m_Freq;
5643  int64_t m_StartCounter;
5644 
5645  void GetBasicParams(CallParams& outParams);
5646  void Flush();
5647 };
5648 
5649 #endif // #if VMA_RECORDING_ENABLED
5650 
5651 // Main allocator object.
5652 struct VmaAllocator_T
5653 {
5654  VMA_CLASS_NO_COPY(VmaAllocator_T)
5655 public:
5656  bool m_UseMutex;
5657  bool m_UseKhrDedicatedAllocation;
5658  VkDevice m_hDevice;
5659  bool m_AllocationCallbacksSpecified;
5660  VkAllocationCallbacks m_AllocationCallbacks;
5661  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5662 
5663  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5664  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5665  VMA_MUTEX m_HeapSizeLimitMutex;
5666 
5667  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5668  VkPhysicalDeviceMemoryProperties m_MemProps;
5669 
5670  // Default pools.
5671  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5672 
5673  // Each vector is sorted by memory (handle value).
5674  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5675  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5676  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5677 
5678  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5679  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5680  ~VmaAllocator_T();
5681 
5682  const VkAllocationCallbacks* GetAllocationCallbacks() const
5683  {
5684  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5685  }
5686  const VmaVulkanFunctions& GetVulkanFunctions() const
5687  {
5688  return m_VulkanFunctions;
5689  }
5690 
5691  VkDeviceSize GetBufferImageGranularity() const
5692  {
5693  return VMA_MAX(
5694  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5695  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5696  }
5697 
5698  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5699  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5700 
5701  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5702  {
5703  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5704  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5705  }
5706  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5707  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5708  {
5709  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5710  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5711  }
5712  // Minimum alignment for all allocations in specific memory type.
5713  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5714  {
5715  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5716  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5717  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5718  }
5719 
5720  bool IsIntegratedGpu() const
5721  {
5722  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5723  }
5724 
5725 #if VMA_RECORDING_ENABLED
5726  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5727 #endif
5728 
5729  void GetBufferMemoryRequirements(
5730  VkBuffer hBuffer,
5731  VkMemoryRequirements& memReq,
5732  bool& requiresDedicatedAllocation,
5733  bool& prefersDedicatedAllocation) const;
5734  void GetImageMemoryRequirements(
5735  VkImage hImage,
5736  VkMemoryRequirements& memReq,
5737  bool& requiresDedicatedAllocation,
5738  bool& prefersDedicatedAllocation) const;
5739 
5740  // Main allocation function.
5741  VkResult AllocateMemory(
5742  const VkMemoryRequirements& vkMemReq,
5743  bool requiresDedicatedAllocation,
5744  bool prefersDedicatedAllocation,
5745  VkBuffer dedicatedBuffer,
5746  VkImage dedicatedImage,
5747  const VmaAllocationCreateInfo& createInfo,
5748  VmaSuballocationType suballocType,
5749  VmaAllocation* pAllocation);
5750 
5751  // Main deallocation function.
5752  void FreeMemory(const VmaAllocation allocation);
5753 
5754  void CalculateStats(VmaStats* pStats);
5755 
5756 #if VMA_STATS_STRING_ENABLED
5757  void PrintDetailedMap(class VmaJsonWriter& json);
5758 #endif
5759 
5760  VkResult Defragment(
5761  VmaAllocation* pAllocations,
5762  size_t allocationCount,
5763  VkBool32* pAllocationsChanged,
5764  const VmaDefragmentationInfo* pDefragmentationInfo,
5765  VmaDefragmentationStats* pDefragmentationStats);
5766 
5767  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5768  bool TouchAllocation(VmaAllocation hAllocation);
5769 
5770  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5771  void DestroyPool(VmaPool pool);
5772  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5773 
5774  void SetCurrentFrameIndex(uint32_t frameIndex);
5775  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5776 
5777  void MakePoolAllocationsLost(
5778  VmaPool hPool,
5779  size_t* pLostAllocationCount);
5780  VkResult CheckPoolCorruption(VmaPool hPool);
5781  VkResult CheckCorruption(uint32_t memoryTypeBits);
5782 
5783  void CreateLostAllocation(VmaAllocation* pAllocation);
5784 
5785  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5786  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5787 
5788  VkResult Map(VmaAllocation hAllocation, void** ppData);
5789  void Unmap(VmaAllocation hAllocation);
5790 
5791  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5792  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5793 
5794  void FlushOrInvalidateAllocation(
5795  VmaAllocation hAllocation,
5796  VkDeviceSize offset, VkDeviceSize size,
5797  VMA_CACHE_OPERATION op);
5798 
5799  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5800 
5801 private:
5802  VkDeviceSize m_PreferredLargeHeapBlockSize;
5803 
5804  VkPhysicalDevice m_PhysicalDevice;
5805  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5806 
5807  VMA_MUTEX m_PoolsMutex;
5808  // Protected by m_PoolsMutex. Sorted by pointer value.
5809  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5810  uint32_t m_NextPoolId;
5811 
5812  VmaVulkanFunctions m_VulkanFunctions;
5813 
5814 #if VMA_RECORDING_ENABLED
5815  VmaRecorder* m_pRecorder;
5816 #endif
5817 
5818  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5819 
5820  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5821 
5822  VkResult AllocateMemoryOfType(
5823  VkDeviceSize size,
5824  VkDeviceSize alignment,
5825  bool dedicatedAllocation,
5826  VkBuffer dedicatedBuffer,
5827  VkImage dedicatedImage,
5828  const VmaAllocationCreateInfo& createInfo,
5829  uint32_t memTypeIndex,
5830  VmaSuballocationType suballocType,
5831  VmaAllocation* pAllocation);
5832 
5833  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5834  VkResult AllocateDedicatedMemory(
5835  VkDeviceSize size,
5836  VmaSuballocationType suballocType,
5837  uint32_t memTypeIndex,
5838  bool map,
5839  bool isUserDataString,
5840  void* pUserData,
5841  VkBuffer dedicatedBuffer,
5842  VkImage dedicatedImage,
5843  VmaAllocation* pAllocation);
5844 
5845  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5846  void FreeDedicatedMemory(VmaAllocation allocation);
5847 };
5848 
5850 // Memory allocation #2 after VmaAllocator_T definition
5851 
5852 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5853 {
5854  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5855 }
5856 
5857 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5858 {
5859  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5860 }
5861 
5862 template<typename T>
5863 static T* VmaAllocate(VmaAllocator hAllocator)
5864 {
5865  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5866 }
5867 
5868 template<typename T>
5869 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5870 {
5871  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5872 }
5873 
5874 template<typename T>
5875 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5876 {
5877  if(ptr != VMA_NULL)
5878  {
5879  ptr->~T();
5880  VmaFree(hAllocator, ptr);
5881  }
5882 }
5883 
5884 template<typename T>
5885 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5886 {
5887  if(ptr != VMA_NULL)
5888  {
5889  for(size_t i = count; i--; )
5890  ptr[i].~T();
5891  VmaFree(hAllocator, ptr);
5892  }
5893 }
5894 
5896 // VmaStringBuilder
5897 
5898 #if VMA_STATS_STRING_ENABLED
5899 
5900 class VmaStringBuilder
5901 {
5902 public:
5903  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5904  size_t GetLength() const { return m_Data.size(); }
5905  const char* GetData() const { return m_Data.data(); }
5906 
5907  void Add(char ch) { m_Data.push_back(ch); }
5908  void Add(const char* pStr);
5909  void AddNewLine() { Add('\n'); }
5910  void AddNumber(uint32_t num);
5911  void AddNumber(uint64_t num);
5912  void AddPointer(const void* ptr);
5913 
5914 private:
5915  VmaVector< char, VmaStlAllocator<char> > m_Data;
5916 };
5917 
5918 void VmaStringBuilder::Add(const char* pStr)
5919 {
5920  const size_t strLen = strlen(pStr);
5921  if(strLen > 0)
5922  {
5923  const size_t oldCount = m_Data.size();
5924  m_Data.resize(oldCount + strLen);
5925  memcpy(m_Data.data() + oldCount, pStr, strLen);
5926  }
5927 }
5928 
5929 void VmaStringBuilder::AddNumber(uint32_t num)
5930 {
5931  char buf[11];
5932  VmaUint32ToStr(buf, sizeof(buf), num);
5933  Add(buf);
5934 }
5935 
5936 void VmaStringBuilder::AddNumber(uint64_t num)
5937 {
5938  char buf[21];
5939  VmaUint64ToStr(buf, sizeof(buf), num);
5940  Add(buf);
5941 }
5942 
5943 void VmaStringBuilder::AddPointer(const void* ptr)
5944 {
5945  char buf[21];
5946  VmaPtrToStr(buf, sizeof(buf), ptr);
5947  Add(buf);
5948 }
5949 
5950 #endif // #if VMA_STATS_STRING_ENABLED
5951 
5953 // VmaJsonWriter
5954 
5955 #if VMA_STATS_STRING_ENABLED
5956 
5957 class VmaJsonWriter
5958 {
5959  VMA_CLASS_NO_COPY(VmaJsonWriter)
5960 public:
5961  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5962  ~VmaJsonWriter();
5963 
5964  void BeginObject(bool singleLine = false);
5965  void EndObject();
5966 
5967  void BeginArray(bool singleLine = false);
5968  void EndArray();
5969 
5970  void WriteString(const char* pStr);
5971  void BeginString(const char* pStr = VMA_NULL);
5972  void ContinueString(const char* pStr);
5973  void ContinueString(uint32_t n);
5974  void ContinueString(uint64_t n);
5975  void ContinueString_Pointer(const void* ptr);
5976  void EndString(const char* pStr = VMA_NULL);
5977 
5978  void WriteNumber(uint32_t n);
5979  void WriteNumber(uint64_t n);
5980  void WriteBool(bool b);
5981  void WriteNull();
5982 
5983 private:
5984  static const char* const INDENT;
5985 
5986  enum COLLECTION_TYPE
5987  {
5988  COLLECTION_TYPE_OBJECT,
5989  COLLECTION_TYPE_ARRAY,
5990  };
5991  struct StackItem
5992  {
5993  COLLECTION_TYPE type;
5994  uint32_t valueCount;
5995  bool singleLineMode;
5996  };
5997 
5998  VmaStringBuilder& m_SB;
5999  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6000  bool m_InsideString;
6001 
6002  void BeginValue(bool isString);
6003  void WriteIndent(bool oneLess = false);
6004 };
6005 
6006 const char* const VmaJsonWriter::INDENT = " ";
6007 
6008 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6009  m_SB(sb),
6010  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6011  m_InsideString(false)
6012 {
6013 }
6014 
6015 VmaJsonWriter::~VmaJsonWriter()
6016 {
6017  VMA_ASSERT(!m_InsideString);
6018  VMA_ASSERT(m_Stack.empty());
6019 }
6020 
6021 void VmaJsonWriter::BeginObject(bool singleLine)
6022 {
6023  VMA_ASSERT(!m_InsideString);
6024 
6025  BeginValue(false);
6026  m_SB.Add('{');
6027 
6028  StackItem item;
6029  item.type = COLLECTION_TYPE_OBJECT;
6030  item.valueCount = 0;
6031  item.singleLineMode = singleLine;
6032  m_Stack.push_back(item);
6033 }
6034 
6035 void VmaJsonWriter::EndObject()
6036 {
6037  VMA_ASSERT(!m_InsideString);
6038 
6039  WriteIndent(true);
6040  m_SB.Add('}');
6041 
6042  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6043  m_Stack.pop_back();
6044 }
6045 
6046 void VmaJsonWriter::BeginArray(bool singleLine)
6047 {
6048  VMA_ASSERT(!m_InsideString);
6049 
6050  BeginValue(false);
6051  m_SB.Add('[');
6052 
6053  StackItem item;
6054  item.type = COLLECTION_TYPE_ARRAY;
6055  item.valueCount = 0;
6056  item.singleLineMode = singleLine;
6057  m_Stack.push_back(item);
6058 }
6059 
6060 void VmaJsonWriter::EndArray()
6061 {
6062  VMA_ASSERT(!m_InsideString);
6063 
6064  WriteIndent(true);
6065  m_SB.Add(']');
6066 
6067  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6068  m_Stack.pop_back();
6069 }
6070 
6071 void VmaJsonWriter::WriteString(const char* pStr)
6072 {
6073  BeginString(pStr);
6074  EndString();
6075 }
6076 
6077 void VmaJsonWriter::BeginString(const char* pStr)
6078 {
6079  VMA_ASSERT(!m_InsideString);
6080 
6081  BeginValue(true);
6082  m_SB.Add('"');
6083  m_InsideString = true;
6084  if(pStr != VMA_NULL && pStr[0] != '\0')
6085  {
6086  ContinueString(pStr);
6087  }
6088 }
6089 
6090 void VmaJsonWriter::ContinueString(const char* pStr)
6091 {
6092  VMA_ASSERT(m_InsideString);
6093 
6094  const size_t strLen = strlen(pStr);
6095  for(size_t i = 0; i < strLen; ++i)
6096  {
6097  char ch = pStr[i];
6098  if(ch == '\\')
6099  {
6100  m_SB.Add("\\\\");
6101  }
6102  else if(ch == '"')
6103  {
6104  m_SB.Add("\\\"");
6105  }
6106  else if(ch >= 32)
6107  {
6108  m_SB.Add(ch);
6109  }
6110  else switch(ch)
6111  {
6112  case '\b':
6113  m_SB.Add("\\b");
6114  break;
6115  case '\f':
6116  m_SB.Add("\\f");
6117  break;
6118  case '\n':
6119  m_SB.Add("\\n");
6120  break;
6121  case '\r':
6122  m_SB.Add("\\r");
6123  break;
6124  case '\t':
6125  m_SB.Add("\\t");
6126  break;
6127  default:
6128  VMA_ASSERT(0 && "Character not currently supported.");
6129  break;
6130  }
6131  }
6132 }
6133 
6134 void VmaJsonWriter::ContinueString(uint32_t n)
6135 {
6136  VMA_ASSERT(m_InsideString);
6137  m_SB.AddNumber(n);
6138 }
6139 
6140 void VmaJsonWriter::ContinueString(uint64_t n)
6141 {
6142  VMA_ASSERT(m_InsideString);
6143  m_SB.AddNumber(n);
6144 }
6145 
6146 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6147 {
6148  VMA_ASSERT(m_InsideString);
6149  m_SB.AddPointer(ptr);
6150 }
6151 
6152 void VmaJsonWriter::EndString(const char* pStr)
6153 {
6154  VMA_ASSERT(m_InsideString);
6155  if(pStr != VMA_NULL && pStr[0] != '\0')
6156  {
6157  ContinueString(pStr);
6158  }
6159  m_SB.Add('"');
6160  m_InsideString = false;
6161 }
6162 
6163 void VmaJsonWriter::WriteNumber(uint32_t n)
6164 {
6165  VMA_ASSERT(!m_InsideString);
6166  BeginValue(false);
6167  m_SB.AddNumber(n);
6168 }
6169 
6170 void VmaJsonWriter::WriteNumber(uint64_t n)
6171 {
6172  VMA_ASSERT(!m_InsideString);
6173  BeginValue(false);
6174  m_SB.AddNumber(n);
6175 }
6176 
6177 void VmaJsonWriter::WriteBool(bool b)
6178 {
6179  VMA_ASSERT(!m_InsideString);
6180  BeginValue(false);
6181  m_SB.Add(b ? "true" : "false");
6182 }
6183 
6184 void VmaJsonWriter::WriteNull()
6185 {
6186  VMA_ASSERT(!m_InsideString);
6187  BeginValue(false);
6188  m_SB.Add("null");
6189 }
6190 
6191 void VmaJsonWriter::BeginValue(bool isString)
6192 {
6193  if(!m_Stack.empty())
6194  {
6195  StackItem& currItem = m_Stack.back();
6196  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6197  currItem.valueCount % 2 == 0)
6198  {
6199  VMA_ASSERT(isString);
6200  }
6201 
6202  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6203  currItem.valueCount % 2 != 0)
6204  {
6205  m_SB.Add(": ");
6206  }
6207  else if(currItem.valueCount > 0)
6208  {
6209  m_SB.Add(", ");
6210  WriteIndent();
6211  }
6212  else
6213  {
6214  WriteIndent();
6215  }
6216  ++currItem.valueCount;
6217  }
6218 }
6219 
6220 void VmaJsonWriter::WriteIndent(bool oneLess)
6221 {
6222  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6223  {
6224  m_SB.AddNewLine();
6225 
6226  size_t count = m_Stack.size();
6227  if(count > 0 && oneLess)
6228  {
6229  --count;
6230  }
6231  for(size_t i = 0; i < count; ++i)
6232  {
6233  m_SB.Add(INDENT);
6234  }
6235  }
6236 }
6237 
6238 #endif // #if VMA_STATS_STRING_ENABLED
6239 
6241 
6242 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6243 {
6244  if(IsUserDataString())
6245  {
6246  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6247 
6248  FreeUserDataString(hAllocator);
6249 
6250  if(pUserData != VMA_NULL)
6251  {
6252  const char* const newStrSrc = (char*)pUserData;
6253  const size_t newStrLen = strlen(newStrSrc);
6254  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6255  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6256  m_pUserData = newStrDst;
6257  }
6258  }
6259  else
6260  {
6261  m_pUserData = pUserData;
6262  }
6263 }
6264 
6265 void VmaAllocation_T::ChangeBlockAllocation(
6266  VmaAllocator hAllocator,
6267  VmaDeviceMemoryBlock* block,
6268  VkDeviceSize offset)
6269 {
6270  VMA_ASSERT(block != VMA_NULL);
6271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6272 
6273  // Move mapping reference counter from old block to new block.
6274  if(block != m_BlockAllocation.m_Block)
6275  {
6276  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6277  if(IsPersistentMap())
6278  ++mapRefCount;
6279  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6280  block->Map(hAllocator, mapRefCount, VMA_NULL);
6281  }
6282 
6283  m_BlockAllocation.m_Block = block;
6284  m_BlockAllocation.m_Offset = offset;
6285 }
6286 
6287 VkDeviceSize VmaAllocation_T::GetOffset() const
6288 {
6289  switch(m_Type)
6290  {
6291  case ALLOCATION_TYPE_BLOCK:
6292  return m_BlockAllocation.m_Offset;
6293  case ALLOCATION_TYPE_DEDICATED:
6294  return 0;
6295  default:
6296  VMA_ASSERT(0);
6297  return 0;
6298  }
6299 }
6300 
6301 VkDeviceMemory VmaAllocation_T::GetMemory() const
6302 {
6303  switch(m_Type)
6304  {
6305  case ALLOCATION_TYPE_BLOCK:
6306  return m_BlockAllocation.m_Block->GetDeviceMemory();
6307  case ALLOCATION_TYPE_DEDICATED:
6308  return m_DedicatedAllocation.m_hMemory;
6309  default:
6310  VMA_ASSERT(0);
6311  return VK_NULL_HANDLE;
6312  }
6313 }
6314 
6315 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6316 {
6317  switch(m_Type)
6318  {
6319  case ALLOCATION_TYPE_BLOCK:
6320  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6321  case ALLOCATION_TYPE_DEDICATED:
6322  return m_DedicatedAllocation.m_MemoryTypeIndex;
6323  default:
6324  VMA_ASSERT(0);
6325  return UINT32_MAX;
6326  }
6327 }
6328 
6329 void* VmaAllocation_T::GetMappedData() const
6330 {
6331  switch(m_Type)
6332  {
6333  case ALLOCATION_TYPE_BLOCK:
6334  if(m_MapCount != 0)
6335  {
6336  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6337  VMA_ASSERT(pBlockData != VMA_NULL);
6338  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6339  }
6340  else
6341  {
6342  return VMA_NULL;
6343  }
6344  break;
6345  case ALLOCATION_TYPE_DEDICATED:
6346  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6347  return m_DedicatedAllocation.m_pMappedData;
6348  default:
6349  VMA_ASSERT(0);
6350  return VMA_NULL;
6351  }
6352 }
6353 
6354 bool VmaAllocation_T::CanBecomeLost() const
6355 {
6356  switch(m_Type)
6357  {
6358  case ALLOCATION_TYPE_BLOCK:
6359  return m_BlockAllocation.m_CanBecomeLost;
6360  case ALLOCATION_TYPE_DEDICATED:
6361  return false;
6362  default:
6363  VMA_ASSERT(0);
6364  return false;
6365  }
6366 }
6367 
6368 VmaPool VmaAllocation_T::GetPool() const
6369 {
6370  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6371  return m_BlockAllocation.m_hPool;
6372 }
6373 
6374 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6375 {
6376  VMA_ASSERT(CanBecomeLost());
6377 
6378  /*
6379  Warning: This is a carefully designed algorithm.
6380  Do not modify unless you really know what you're doing :)
6381  */
6382  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6383  for(;;)
6384  {
6385  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6386  {
6387  VMA_ASSERT(0);
6388  return false;
6389  }
6390  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6391  {
6392  return false;
6393  }
6394  else // Last use time earlier than current time.
6395  {
6396  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6397  {
6398  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6399  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6400  return true;
6401  }
6402  }
6403  }
6404 }
6405 
6406 #if VMA_STATS_STRING_ENABLED
6407 
6408 // Correspond to values of enum VmaSuballocationType.
6409 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6410  "FREE",
6411  "UNKNOWN",
6412  "BUFFER",
6413  "IMAGE_UNKNOWN",
6414  "IMAGE_LINEAR",
6415  "IMAGE_OPTIMAL",
6416 };
6417 
6418 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6419 {
6420  json.WriteString("Type");
6421  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6422 
6423  json.WriteString("Size");
6424  json.WriteNumber(m_Size);
6425 
6426  if(m_pUserData != VMA_NULL)
6427  {
6428  json.WriteString("UserData");
6429  if(IsUserDataString())
6430  {
6431  json.WriteString((const char*)m_pUserData);
6432  }
6433  else
6434  {
6435  json.BeginString();
6436  json.ContinueString_Pointer(m_pUserData);
6437  json.EndString();
6438  }
6439  }
6440 
6441  json.WriteString("CreationFrameIndex");
6442  json.WriteNumber(m_CreationFrameIndex);
6443 
6444  json.WriteString("LastUseFrameIndex");
6445  json.WriteNumber(GetLastUseFrameIndex());
6446 
6447  if(m_BufferImageUsage != 0)
6448  {
6449  json.WriteString("Usage");
6450  json.WriteNumber(m_BufferImageUsage);
6451  }
6452 }
6453 
6454 #endif
6455 
6456 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6457 {
6458  VMA_ASSERT(IsUserDataString());
6459  if(m_pUserData != VMA_NULL)
6460  {
6461  char* const oldStr = (char*)m_pUserData;
6462  const size_t oldStrLen = strlen(oldStr);
6463  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6464  m_pUserData = VMA_NULL;
6465  }
6466 }
6467 
6468 void VmaAllocation_T::BlockAllocMap()
6469 {
6470  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6471 
6472  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6473  {
6474  ++m_MapCount;
6475  }
6476  else
6477  {
6478  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6479  }
6480 }
6481 
6482 void VmaAllocation_T::BlockAllocUnmap()
6483 {
6484  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6485 
6486  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6487  {
6488  --m_MapCount;
6489  }
6490  else
6491  {
6492  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6493  }
6494 }
6495 
6496 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6497 {
6498  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6499 
6500  if(m_MapCount != 0)
6501  {
6502  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6503  {
6504  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6505  *ppData = m_DedicatedAllocation.m_pMappedData;
6506  ++m_MapCount;
6507  return VK_SUCCESS;
6508  }
6509  else
6510  {
6511  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6512  return VK_ERROR_MEMORY_MAP_FAILED;
6513  }
6514  }
6515  else
6516  {
6517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6518  hAllocator->m_hDevice,
6519  m_DedicatedAllocation.m_hMemory,
6520  0, // offset
6521  VK_WHOLE_SIZE,
6522  0, // flags
6523  ppData);
6524  if(result == VK_SUCCESS)
6525  {
6526  m_DedicatedAllocation.m_pMappedData = *ppData;
6527  m_MapCount = 1;
6528  }
6529  return result;
6530  }
6531 }
6532 
6533 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6534 {
6535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6536 
6537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6538  {
6539  --m_MapCount;
6540  if(m_MapCount == 0)
6541  {
6542  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6543  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6544  hAllocator->m_hDevice,
6545  m_DedicatedAllocation.m_hMemory);
6546  }
6547  }
6548  else
6549  {
6550  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6551  }
6552 }
6553 
6554 #if VMA_STATS_STRING_ENABLED
6555 
6556 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6557 {
6558  json.BeginObject();
6559 
6560  json.WriteString("Blocks");
6561  json.WriteNumber(stat.blockCount);
6562 
6563  json.WriteString("Allocations");
6564  json.WriteNumber(stat.allocationCount);
6565 
6566  json.WriteString("UnusedRanges");
6567  json.WriteNumber(stat.unusedRangeCount);
6568 
6569  json.WriteString("UsedBytes");
6570  json.WriteNumber(stat.usedBytes);
6571 
6572  json.WriteString("UnusedBytes");
6573  json.WriteNumber(stat.unusedBytes);
6574 
6575  if(stat.allocationCount > 1)
6576  {
6577  json.WriteString("AllocationSize");
6578  json.BeginObject(true);
6579  json.WriteString("Min");
6580  json.WriteNumber(stat.allocationSizeMin);
6581  json.WriteString("Avg");
6582  json.WriteNumber(stat.allocationSizeAvg);
6583  json.WriteString("Max");
6584  json.WriteNumber(stat.allocationSizeMax);
6585  json.EndObject();
6586  }
6587 
6588  if(stat.unusedRangeCount > 1)
6589  {
6590  json.WriteString("UnusedRangeSize");
6591  json.BeginObject(true);
6592  json.WriteString("Min");
6593  json.WriteNumber(stat.unusedRangeSizeMin);
6594  json.WriteString("Avg");
6595  json.WriteNumber(stat.unusedRangeSizeAvg);
6596  json.WriteString("Max");
6597  json.WriteNumber(stat.unusedRangeSizeMax);
6598  json.EndObject();
6599  }
6600 
6601  json.EndObject();
6602 }
6603 
6604 #endif // #if VMA_STATS_STRING_ENABLED
6605 
6606 struct VmaSuballocationItemSizeLess
6607 {
6608  bool operator()(
6609  const VmaSuballocationList::iterator lhs,
6610  const VmaSuballocationList::iterator rhs) const
6611  {
6612  return lhs->size < rhs->size;
6613  }
6614  bool operator()(
6615  const VmaSuballocationList::iterator lhs,
6616  VkDeviceSize rhsSize) const
6617  {
6618  return lhs->size < rhsSize;
6619  }
6620 };
6621 
6622 
6624 // class VmaBlockMetadata
6625 
6626 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6627  m_Size(0),
6628  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6629 {
6630 }
6631 
6632 #if VMA_STATS_STRING_ENABLED
6633 
6634 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6635  VkDeviceSize unusedBytes,
6636  size_t allocationCount,
6637  size_t unusedRangeCount) const
6638 {
6639  json.BeginObject();
6640 
6641  json.WriteString("TotalBytes");
6642  json.WriteNumber(GetSize());
6643 
6644  json.WriteString("UnusedBytes");
6645  json.WriteNumber(unusedBytes);
6646 
6647  json.WriteString("Allocations");
6648  json.WriteNumber((uint64_t)allocationCount);
6649 
6650  json.WriteString("UnusedRanges");
6651  json.WriteNumber((uint64_t)unusedRangeCount);
6652 
6653  json.WriteString("Suballocations");
6654  json.BeginArray();
6655 }
6656 
6657 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6658  VkDeviceSize offset,
6659  VmaAllocation hAllocation) const
6660 {
6661  json.BeginObject(true);
6662 
6663  json.WriteString("Offset");
6664  json.WriteNumber(offset);
6665 
6666  hAllocation->PrintParameters(json);
6667 
6668  json.EndObject();
6669 }
6670 
6671 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6672  VkDeviceSize offset,
6673  VkDeviceSize size) const
6674 {
6675  json.BeginObject(true);
6676 
6677  json.WriteString("Offset");
6678  json.WriteNumber(offset);
6679 
6680  json.WriteString("Type");
6681  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6682 
6683  json.WriteString("Size");
6684  json.WriteNumber(size);
6685 
6686  json.EndObject();
6687 }
6688 
6689 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6690 {
6691  json.EndArray();
6692  json.EndObject();
6693 }
6694 
6695 #endif // #if VMA_STATS_STRING_ENABLED
6696 
6698 // class VmaBlockMetadata_Generic
6699 
6700 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6701  VmaBlockMetadata(hAllocator),
6702  m_FreeCount(0),
6703  m_SumFreeSize(0),
6704  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6705  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6706 {
6707 }
6708 
6709 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6710 {
6711 }
6712 
6713 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6714 {
6715  VmaBlockMetadata::Init(size);
6716 
6717  m_FreeCount = 1;
6718  m_SumFreeSize = size;
6719 
6720  VmaSuballocation suballoc = {};
6721  suballoc.offset = 0;
6722  suballoc.size = size;
6723  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6724  suballoc.hAllocation = VK_NULL_HANDLE;
6725 
6726  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6727  m_Suballocations.push_back(suballoc);
6728  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6729  --suballocItem;
6730  m_FreeSuballocationsBySize.push_back(suballocItem);
6731 }
6732 
6733 bool VmaBlockMetadata_Generic::Validate() const
6734 {
6735  VMA_VALIDATE(!m_Suballocations.empty());
6736 
6737  // Expected offset of new suballocation as calculated from previous ones.
6738  VkDeviceSize calculatedOffset = 0;
6739  // Expected number of free suballocations as calculated from traversing their list.
6740  uint32_t calculatedFreeCount = 0;
6741  // Expected sum size of free suballocations as calculated from traversing their list.
6742  VkDeviceSize calculatedSumFreeSize = 0;
6743  // Expected number of free suballocations that should be registered in
6744  // m_FreeSuballocationsBySize calculated from traversing their list.
6745  size_t freeSuballocationsToRegister = 0;
6746  // True if previous visited suballocation was free.
6747  bool prevFree = false;
6748 
6749  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6750  suballocItem != m_Suballocations.cend();
6751  ++suballocItem)
6752  {
6753  const VmaSuballocation& subAlloc = *suballocItem;
6754 
6755  // Actual offset of this suballocation doesn't match expected one.
6756  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6757 
6758  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6759  // Two adjacent free suballocations are invalid. They should be merged.
6760  VMA_VALIDATE(!prevFree || !currFree);
6761 
6762  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6763 
6764  if(currFree)
6765  {
6766  calculatedSumFreeSize += subAlloc.size;
6767  ++calculatedFreeCount;
6768  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6769  {
6770  ++freeSuballocationsToRegister;
6771  }
6772 
6773  // Margin required between allocations - every free space must be at least that large.
6774  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6775  }
6776  else
6777  {
6778  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6779  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6780 
6781  // Margin required between allocations - previous allocation must be free.
6782  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6783  }
6784 
6785  calculatedOffset += subAlloc.size;
6786  prevFree = currFree;
6787  }
6788 
6789  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6790  // match expected one.
6791  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6792 
6793  VkDeviceSize lastSize = 0;
6794  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6795  {
6796  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6797 
6798  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6799  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6800  // They must be sorted by size ascending.
6801  VMA_VALIDATE(suballocItem->size >= lastSize);
6802 
6803  lastSize = suballocItem->size;
6804  }
6805 
6806  // Check if totals match calculacted values.
6807  VMA_VALIDATE(ValidateFreeSuballocationList());
6808  VMA_VALIDATE(calculatedOffset == GetSize());
6809  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6810  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6811 
6812  return true;
6813 }
6814 
6815 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6816 {
6817  if(!m_FreeSuballocationsBySize.empty())
6818  {
6819  return m_FreeSuballocationsBySize.back()->size;
6820  }
6821  else
6822  {
6823  return 0;
6824  }
6825 }
6826 
6827 bool VmaBlockMetadata_Generic::IsEmpty() const
6828 {
6829  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6830 }
6831 
6832 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6833 {
6834  outInfo.blockCount = 1;
6835 
6836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6837  outInfo.allocationCount = rangeCount - m_FreeCount;
6838  outInfo.unusedRangeCount = m_FreeCount;
6839 
6840  outInfo.unusedBytes = m_SumFreeSize;
6841  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6842 
6843  outInfo.allocationSizeMin = UINT64_MAX;
6844  outInfo.allocationSizeMax = 0;
6845  outInfo.unusedRangeSizeMin = UINT64_MAX;
6846  outInfo.unusedRangeSizeMax = 0;
6847 
6848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6849  suballocItem != m_Suballocations.cend();
6850  ++suballocItem)
6851  {
6852  const VmaSuballocation& suballoc = *suballocItem;
6853  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6854  {
6855  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6856  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6857  }
6858  else
6859  {
6860  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6861  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6862  }
6863  }
6864 }
6865 
6866 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6867 {
6868  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6869 
6870  inoutStats.size += GetSize();
6871  inoutStats.unusedSize += m_SumFreeSize;
6872  inoutStats.allocationCount += rangeCount - m_FreeCount;
6873  inoutStats.unusedRangeCount += m_FreeCount;
6874  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6875 }
6876 
6877 #if VMA_STATS_STRING_ENABLED
6878 
6879 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6880 {
6881  PrintDetailedMap_Begin(json,
6882  m_SumFreeSize, // unusedBytes
6883  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6884  m_FreeCount); // unusedRangeCount
6885 
6886  size_t i = 0;
6887  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6888  suballocItem != m_Suballocations.cend();
6889  ++suballocItem, ++i)
6890  {
6891  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6892  {
6893  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6894  }
6895  else
6896  {
6897  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6898  }
6899  }
6900 
6901  PrintDetailedMap_End(json);
6902 }
6903 
6904 #endif // #if VMA_STATS_STRING_ENABLED
6905 
6906 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6907  uint32_t currentFrameIndex,
6908  uint32_t frameInUseCount,
6909  VkDeviceSize bufferImageGranularity,
6910  VkDeviceSize allocSize,
6911  VkDeviceSize allocAlignment,
6912  bool upperAddress,
6913  VmaSuballocationType allocType,
6914  bool canMakeOtherLost,
6915  uint32_t strategy,
6916  VmaAllocationRequest* pAllocationRequest)
6917 {
6918  VMA_ASSERT(allocSize > 0);
6919  VMA_ASSERT(!upperAddress);
6920  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6921  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6922  VMA_HEAVY_ASSERT(Validate());
6923 
6924  // There is not enough total free space in this block to fullfill the request: Early return.
6925  if(canMakeOtherLost == false &&
6926  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6927  {
6928  return false;
6929  }
6930 
6931  // New algorithm, efficiently searching freeSuballocationsBySize.
6932  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6933  if(freeSuballocCount > 0)
6934  {
6936  {
6937  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6938  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6939  m_FreeSuballocationsBySize.data(),
6940  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6941  allocSize + 2 * VMA_DEBUG_MARGIN,
6942  VmaSuballocationItemSizeLess());
6943  size_t index = it - m_FreeSuballocationsBySize.data();
6944  for(; index < freeSuballocCount; ++index)
6945  {
6946  if(CheckAllocation(
6947  currentFrameIndex,
6948  frameInUseCount,
6949  bufferImageGranularity,
6950  allocSize,
6951  allocAlignment,
6952  allocType,
6953  m_FreeSuballocationsBySize[index],
6954  false, // canMakeOtherLost
6955  &pAllocationRequest->offset,
6956  &pAllocationRequest->itemsToMakeLostCount,
6957  &pAllocationRequest->sumFreeSize,
6958  &pAllocationRequest->sumItemSize))
6959  {
6960  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6961  return true;
6962  }
6963  }
6964  }
6965  else // WORST_FIT, FIRST_FIT
6966  {
6967  // Search staring from biggest suballocations.
6968  for(size_t index = freeSuballocCount; index--; )
6969  {
6970  if(CheckAllocation(
6971  currentFrameIndex,
6972  frameInUseCount,
6973  bufferImageGranularity,
6974  allocSize,
6975  allocAlignment,
6976  allocType,
6977  m_FreeSuballocationsBySize[index],
6978  false, // canMakeOtherLost
6979  &pAllocationRequest->offset,
6980  &pAllocationRequest->itemsToMakeLostCount,
6981  &pAllocationRequest->sumFreeSize,
6982  &pAllocationRequest->sumItemSize))
6983  {
6984  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6985  return true;
6986  }
6987  }
6988  }
6989  }
6990 
6991  if(canMakeOtherLost)
6992  {
6993  // Brute-force algorithm. TODO: Come up with something better.
6994 
6995  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6996  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6997 
6998  VmaAllocationRequest tmpAllocRequest = {};
6999  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7000  suballocIt != m_Suballocations.end();
7001  ++suballocIt)
7002  {
7003  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7004  suballocIt->hAllocation->CanBecomeLost())
7005  {
7006  if(CheckAllocation(
7007  currentFrameIndex,
7008  frameInUseCount,
7009  bufferImageGranularity,
7010  allocSize,
7011  allocAlignment,
7012  allocType,
7013  suballocIt,
7014  canMakeOtherLost,
7015  &tmpAllocRequest.offset,
7016  &tmpAllocRequest.itemsToMakeLostCount,
7017  &tmpAllocRequest.sumFreeSize,
7018  &tmpAllocRequest.sumItemSize))
7019  {
7020  tmpAllocRequest.item = suballocIt;
7021 
7022  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7024  {
7025  *pAllocationRequest = tmpAllocRequest;
7026  }
7027  }
7028  }
7029  }
7030 
7031  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7032  {
7033  return true;
7034  }
7035  }
7036 
7037  return false;
7038 }
7039 
7040 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7041  uint32_t currentFrameIndex,
7042  uint32_t frameInUseCount,
7043  VmaAllocationRequest* pAllocationRequest)
7044 {
7045  while(pAllocationRequest->itemsToMakeLostCount > 0)
7046  {
7047  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7048  {
7049  ++pAllocationRequest->item;
7050  }
7051  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7052  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7053  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7054  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7055  {
7056  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7057  --pAllocationRequest->itemsToMakeLostCount;
7058  }
7059  else
7060  {
7061  return false;
7062  }
7063  }
7064 
7065  VMA_HEAVY_ASSERT(Validate());
7066  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7067  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7068 
7069  return true;
7070 }
7071 
7072 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7073 {
7074  uint32_t lostAllocationCount = 0;
7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7076  it != m_Suballocations.end();
7077  ++it)
7078  {
7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7080  it->hAllocation->CanBecomeLost() &&
7081  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7082  {
7083  it = FreeSuballocation(it);
7084  ++lostAllocationCount;
7085  }
7086  }
7087  return lostAllocationCount;
7088 }
7089 
7090 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7091 {
7092  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7093  it != m_Suballocations.end();
7094  ++it)
7095  {
7096  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7097  {
7098  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7099  {
7100  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7101  return VK_ERROR_VALIDATION_FAILED_EXT;
7102  }
7103  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7104  {
7105  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7106  return VK_ERROR_VALIDATION_FAILED_EXT;
7107  }
7108  }
7109  }
7110 
7111  return VK_SUCCESS;
7112 }
7113 
7114 void VmaBlockMetadata_Generic::Alloc(
7115  const VmaAllocationRequest& request,
7116  VmaSuballocationType type,
7117  VkDeviceSize allocSize,
7118  bool upperAddress,
7119  VmaAllocation hAllocation)
7120 {
7121  VMA_ASSERT(!upperAddress);
7122  VMA_ASSERT(request.item != m_Suballocations.end());
7123  VmaSuballocation& suballoc = *request.item;
7124  // Given suballocation is a free block.
7125  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7126  // Given offset is inside this suballocation.
7127  VMA_ASSERT(request.offset >= suballoc.offset);
7128  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7129  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7130  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7131 
7132  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7133  // it to become used.
7134  UnregisterFreeSuballocation(request.item);
7135 
7136  suballoc.offset = request.offset;
7137  suballoc.size = allocSize;
7138  suballoc.type = type;
7139  suballoc.hAllocation = hAllocation;
7140 
7141  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7142  if(paddingEnd)
7143  {
7144  VmaSuballocation paddingSuballoc = {};
7145  paddingSuballoc.offset = request.offset + allocSize;
7146  paddingSuballoc.size = paddingEnd;
7147  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7148  VmaSuballocationList::iterator next = request.item;
7149  ++next;
7150  const VmaSuballocationList::iterator paddingEndItem =
7151  m_Suballocations.insert(next, paddingSuballoc);
7152  RegisterFreeSuballocation(paddingEndItem);
7153  }
7154 
7155  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7156  if(paddingBegin)
7157  {
7158  VmaSuballocation paddingSuballoc = {};
7159  paddingSuballoc.offset = request.offset - paddingBegin;
7160  paddingSuballoc.size = paddingBegin;
7161  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7162  const VmaSuballocationList::iterator paddingBeginItem =
7163  m_Suballocations.insert(request.item, paddingSuballoc);
7164  RegisterFreeSuballocation(paddingBeginItem);
7165  }
7166 
7167  // Update totals.
7168  m_FreeCount = m_FreeCount - 1;
7169  if(paddingBegin > 0)
7170  {
7171  ++m_FreeCount;
7172  }
7173  if(paddingEnd > 0)
7174  {
7175  ++m_FreeCount;
7176  }
7177  m_SumFreeSize -= allocSize;
7178 }
7179 
7180 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7181 {
7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7183  suballocItem != m_Suballocations.end();
7184  ++suballocItem)
7185  {
7186  VmaSuballocation& suballoc = *suballocItem;
7187  if(suballoc.hAllocation == allocation)
7188  {
7189  FreeSuballocation(suballocItem);
7190  VMA_HEAVY_ASSERT(Validate());
7191  return;
7192  }
7193  }
7194  VMA_ASSERT(0 && "Not found!");
7195 }
7196 
7197 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7198 {
7199  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7200  suballocItem != m_Suballocations.end();
7201  ++suballocItem)
7202  {
7203  VmaSuballocation& suballoc = *suballocItem;
7204  if(suballoc.offset == offset)
7205  {
7206  FreeSuballocation(suballocItem);
7207  return;
7208  }
7209  }
7210  VMA_ASSERT(0 && "Not found!");
7211 }
7212 
7213 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7214 {
7215  VkDeviceSize lastSize = 0;
7216  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7217  {
7218  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7219 
7220  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7221  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7222  VMA_VALIDATE(it->size >= lastSize);
7223  lastSize = it->size;
7224  }
7225  return true;
7226 }
7227 
7228 bool VmaBlockMetadata_Generic::CheckAllocation(
7229  uint32_t currentFrameIndex,
7230  uint32_t frameInUseCount,
7231  VkDeviceSize bufferImageGranularity,
7232  VkDeviceSize allocSize,
7233  VkDeviceSize allocAlignment,
7234  VmaSuballocationType allocType,
7235  VmaSuballocationList::const_iterator suballocItem,
7236  bool canMakeOtherLost,
7237  VkDeviceSize* pOffset,
7238  size_t* itemsToMakeLostCount,
7239  VkDeviceSize* pSumFreeSize,
7240  VkDeviceSize* pSumItemSize) const
7241 {
7242  VMA_ASSERT(allocSize > 0);
7243  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7244  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7245  VMA_ASSERT(pOffset != VMA_NULL);
7246 
7247  *itemsToMakeLostCount = 0;
7248  *pSumFreeSize = 0;
7249  *pSumItemSize = 0;
7250 
7251  if(canMakeOtherLost)
7252  {
7253  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7254  {
7255  *pSumFreeSize = suballocItem->size;
7256  }
7257  else
7258  {
7259  if(suballocItem->hAllocation->CanBecomeLost() &&
7260  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7261  {
7262  ++*itemsToMakeLostCount;
7263  *pSumItemSize = suballocItem->size;
7264  }
7265  else
7266  {
7267  return false;
7268  }
7269  }
7270 
7271  // Remaining size is too small for this request: Early return.
7272  if(GetSize() - suballocItem->offset < allocSize)
7273  {
7274  return false;
7275  }
7276 
7277  // Start from offset equal to beginning of this suballocation.
7278  *pOffset = suballocItem->offset;
7279 
7280  // Apply VMA_DEBUG_MARGIN at the beginning.
7281  if(VMA_DEBUG_MARGIN > 0)
7282  {
7283  *pOffset += VMA_DEBUG_MARGIN;
7284  }
7285 
7286  // Apply alignment.
7287  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7288 
7289  // Check previous suballocations for BufferImageGranularity conflicts.
7290  // Make bigger alignment if necessary.
7291  if(bufferImageGranularity > 1)
7292  {
7293  bool bufferImageGranularityConflict = false;
7294  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7295  while(prevSuballocItem != m_Suballocations.cbegin())
7296  {
7297  --prevSuballocItem;
7298  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7299  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7300  {
7301  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7302  {
7303  bufferImageGranularityConflict = true;
7304  break;
7305  }
7306  }
7307  else
7308  // Already on previous page.
7309  break;
7310  }
7311  if(bufferImageGranularityConflict)
7312  {
7313  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7314  }
7315  }
7316 
7317  // Now that we have final *pOffset, check if we are past suballocItem.
7318  // If yes, return false - this function should be called for another suballocItem as starting point.
7319  if(*pOffset >= suballocItem->offset + suballocItem->size)
7320  {
7321  return false;
7322  }
7323 
7324  // Calculate padding at the beginning based on current offset.
7325  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7326 
7327  // Calculate required margin at the end.
7328  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7329 
7330  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7331  // Another early return check.
7332  if(suballocItem->offset + totalSize > GetSize())
7333  {
7334  return false;
7335  }
7336 
7337  // Advance lastSuballocItem until desired size is reached.
7338  // Update itemsToMakeLostCount.
7339  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7340  if(totalSize > suballocItem->size)
7341  {
7342  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7343  while(remainingSize > 0)
7344  {
7345  ++lastSuballocItem;
7346  if(lastSuballocItem == m_Suballocations.cend())
7347  {
7348  return false;
7349  }
7350  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7351  {
7352  *pSumFreeSize += lastSuballocItem->size;
7353  }
7354  else
7355  {
7356  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7357  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7358  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7359  {
7360  ++*itemsToMakeLostCount;
7361  *pSumItemSize += lastSuballocItem->size;
7362  }
7363  else
7364  {
7365  return false;
7366  }
7367  }
7368  remainingSize = (lastSuballocItem->size < remainingSize) ?
7369  remainingSize - lastSuballocItem->size : 0;
7370  }
7371  }
7372 
7373  // Check next suballocations for BufferImageGranularity conflicts.
7374  // If conflict exists, we must mark more allocations lost or fail.
7375  if(bufferImageGranularity > 1)
7376  {
7377  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7378  ++nextSuballocItem;
7379  while(nextSuballocItem != m_Suballocations.cend())
7380  {
7381  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7382  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7383  {
7384  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7385  {
7386  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7387  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7388  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7389  {
7390  ++*itemsToMakeLostCount;
7391  }
7392  else
7393  {
7394  return false;
7395  }
7396  }
7397  }
7398  else
7399  {
7400  // Already on next page.
7401  break;
7402  }
7403  ++nextSuballocItem;
7404  }
7405  }
7406  }
7407  else
7408  {
7409  const VmaSuballocation& suballoc = *suballocItem;
7410  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7411 
7412  *pSumFreeSize = suballoc.size;
7413 
7414  // Size of this suballocation is too small for this request: Early return.
7415  if(suballoc.size < allocSize)
7416  {
7417  return false;
7418  }
7419 
7420  // Start from offset equal to beginning of this suballocation.
7421  *pOffset = suballoc.offset;
7422 
7423  // Apply VMA_DEBUG_MARGIN at the beginning.
7424  if(VMA_DEBUG_MARGIN > 0)
7425  {
7426  *pOffset += VMA_DEBUG_MARGIN;
7427  }
7428 
7429  // Apply alignment.
7430  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7431 
7432  // Check previous suballocations for BufferImageGranularity conflicts.
7433  // Make bigger alignment if necessary.
7434  if(bufferImageGranularity > 1)
7435  {
7436  bool bufferImageGranularityConflict = false;
7437  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7438  while(prevSuballocItem != m_Suballocations.cbegin())
7439  {
7440  --prevSuballocItem;
7441  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7442  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7443  {
7444  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7445  {
7446  bufferImageGranularityConflict = true;
7447  break;
7448  }
7449  }
7450  else
7451  // Already on previous page.
7452  break;
7453  }
7454  if(bufferImageGranularityConflict)
7455  {
7456  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7457  }
7458  }
7459 
7460  // Calculate padding at the beginning based on current offset.
7461  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7462 
7463  // Calculate required margin at the end.
7464  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7465 
7466  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7467  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7468  {
7469  return false;
7470  }
7471 
7472  // Check next suballocations for BufferImageGranularity conflicts.
7473  // If conflict exists, allocation cannot be made here.
7474  if(bufferImageGranularity > 1)
7475  {
7476  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7477  ++nextSuballocItem;
7478  while(nextSuballocItem != m_Suballocations.cend())
7479  {
7480  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7481  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7482  {
7483  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7484  {
7485  return false;
7486  }
7487  }
7488  else
7489  {
7490  // Already on next page.
7491  break;
7492  }
7493  ++nextSuballocItem;
7494  }
7495  }
7496  }
7497 
7498  // All tests passed: Success. pOffset is already filled.
7499  return true;
7500 }
7501 
7502 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7503 {
7504  VMA_ASSERT(item != m_Suballocations.end());
7505  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7506 
7507  VmaSuballocationList::iterator nextItem = item;
7508  ++nextItem;
7509  VMA_ASSERT(nextItem != m_Suballocations.end());
7510  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7511 
7512  item->size += nextItem->size;
7513  --m_FreeCount;
7514  m_Suballocations.erase(nextItem);
7515 }
7516 
7517 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7518 {
7519  // Change this suballocation to be marked as free.
7520  VmaSuballocation& suballoc = *suballocItem;
7521  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7522  suballoc.hAllocation = VK_NULL_HANDLE;
7523 
7524  // Update totals.
7525  ++m_FreeCount;
7526  m_SumFreeSize += suballoc.size;
7527 
7528  // Merge with previous and/or next suballocation if it's also free.
7529  bool mergeWithNext = false;
7530  bool mergeWithPrev = false;
7531 
7532  VmaSuballocationList::iterator nextItem = suballocItem;
7533  ++nextItem;
7534  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7535  {
7536  mergeWithNext = true;
7537  }
7538 
7539  VmaSuballocationList::iterator prevItem = suballocItem;
7540  if(suballocItem != m_Suballocations.begin())
7541  {
7542  --prevItem;
7543  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7544  {
7545  mergeWithPrev = true;
7546  }
7547  }
7548 
7549  if(mergeWithNext)
7550  {
7551  UnregisterFreeSuballocation(nextItem);
7552  MergeFreeWithNext(suballocItem);
7553  }
7554 
7555  if(mergeWithPrev)
7556  {
7557  UnregisterFreeSuballocation(prevItem);
7558  MergeFreeWithNext(prevItem);
7559  RegisterFreeSuballocation(prevItem);
7560  return prevItem;
7561  }
7562  else
7563  {
7564  RegisterFreeSuballocation(suballocItem);
7565  return suballocItem;
7566  }
7567 }
7568 
7569 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7570 {
7571  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7572  VMA_ASSERT(item->size > 0);
7573 
7574  // You may want to enable this validation at the beginning or at the end of
7575  // this function, depending on what do you want to check.
7576  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7577 
7578  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7579  {
7580  if(m_FreeSuballocationsBySize.empty())
7581  {
7582  m_FreeSuballocationsBySize.push_back(item);
7583  }
7584  else
7585  {
7586  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7587  }
7588  }
7589 
7590  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7591 }
7592 
7593 
7594 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7595 {
7596  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7597  VMA_ASSERT(item->size > 0);
7598 
7599  // You may want to enable this validation at the beginning or at the end of
7600  // this function, depending on what do you want to check.
7601  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7602 
7603  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7604  {
7605  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7606  m_FreeSuballocationsBySize.data(),
7607  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7608  item,
7609  VmaSuballocationItemSizeLess());
7610  for(size_t index = it - m_FreeSuballocationsBySize.data();
7611  index < m_FreeSuballocationsBySize.size();
7612  ++index)
7613  {
7614  if(m_FreeSuballocationsBySize[index] == item)
7615  {
7616  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7617  return;
7618  }
7619  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7620  }
7621  VMA_ASSERT(0 && "Not found.");
7622  }
7623 
7624  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7625 }
7626 
7628 // class VmaBlockMetadata_Linear
7629 
7630 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7631  VmaBlockMetadata(hAllocator),
7632  m_SumFreeSize(0),
7633  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7634  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7635  m_1stVectorIndex(0),
7636  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7637  m_1stNullItemsBeginCount(0),
7638  m_1stNullItemsMiddleCount(0),
7639  m_2ndNullItemsCount(0)
7640 {
7641 }
7642 
7643 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7644 {
7645 }
7646 
7647 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7648 {
7649  VmaBlockMetadata::Init(size);
7650  m_SumFreeSize = size;
7651 }
7652 
7653 bool VmaBlockMetadata_Linear::Validate() const
7654 {
7655  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7656  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7657 
7658  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7659  VMA_VALIDATE(!suballocations1st.empty() ||
7660  suballocations2nd.empty() ||
7661  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7662 
7663  if(!suballocations1st.empty())
7664  {
7665  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7666  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7667  // Null item at the end should be just pop_back().
7668  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7669  }
7670  if(!suballocations2nd.empty())
7671  {
7672  // Null item at the end should be just pop_back().
7673  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7674  }
7675 
7676  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7677  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7678 
7679  VkDeviceSize sumUsedSize = 0;
7680  const size_t suballoc1stCount = suballocations1st.size();
7681  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7682 
7683  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7684  {
7685  const size_t suballoc2ndCount = suballocations2nd.size();
7686  size_t nullItem2ndCount = 0;
7687  for(size_t i = 0; i < suballoc2ndCount; ++i)
7688  {
7689  const VmaSuballocation& suballoc = suballocations2nd[i];
7690  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7691 
7692  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7693  VMA_VALIDATE(suballoc.offset >= offset);
7694 
7695  if(!currFree)
7696  {
7697  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7698  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7699  sumUsedSize += suballoc.size;
7700  }
7701  else
7702  {
7703  ++nullItem2ndCount;
7704  }
7705 
7706  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7707  }
7708 
7709  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7710  }
7711 
7712  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7713  {
7714  const VmaSuballocation& suballoc = suballocations1st[i];
7715  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7716  suballoc.hAllocation == VK_NULL_HANDLE);
7717  }
7718 
7719  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7720 
7721  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7722  {
7723  const VmaSuballocation& suballoc = suballocations1st[i];
7724  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7725 
7726  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7727  VMA_VALIDATE(suballoc.offset >= offset);
7728  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7729 
7730  if(!currFree)
7731  {
7732  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7733  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7734  sumUsedSize += suballoc.size;
7735  }
7736  else
7737  {
7738  ++nullItem1stCount;
7739  }
7740 
7741  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7742  }
7743  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7744 
7745  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7746  {
7747  const size_t suballoc2ndCount = suballocations2nd.size();
7748  size_t nullItem2ndCount = 0;
7749  for(size_t i = suballoc2ndCount; i--; )
7750  {
7751  const VmaSuballocation& suballoc = suballocations2nd[i];
7752  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7753 
7754  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7755  VMA_VALIDATE(suballoc.offset >= offset);
7756 
7757  if(!currFree)
7758  {
7759  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7760  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7761  sumUsedSize += suballoc.size;
7762  }
7763  else
7764  {
7765  ++nullItem2ndCount;
7766  }
7767 
7768  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7769  }
7770 
7771  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7772  }
7773 
7774  VMA_VALIDATE(offset <= GetSize());
7775  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7776 
7777  return true;
7778 }
7779 
7780 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7781 {
7782  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7783  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7784 }
7785 
7786 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7787 {
7788  const VkDeviceSize size = GetSize();
7789 
7790  /*
7791  We don't consider gaps inside allocation vectors with freed allocations because
7792  they are not suitable for reuse in linear allocator. We consider only space that
7793  is available for new allocations.
7794  */
7795  if(IsEmpty())
7796  {
7797  return size;
7798  }
7799 
7800  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7801 
7802  switch(m_2ndVectorMode)
7803  {
7804  case SECOND_VECTOR_EMPTY:
7805  /*
7806  Available space is after end of 1st, as well as before beginning of 1st (which
7807  whould make it a ring buffer).
7808  */
7809  {
7810  const size_t suballocations1stCount = suballocations1st.size();
7811  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7812  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7813  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7814  return VMA_MAX(
7815  firstSuballoc.offset,
7816  size - (lastSuballoc.offset + lastSuballoc.size));
7817  }
7818  break;
7819 
7820  case SECOND_VECTOR_RING_BUFFER:
7821  /*
7822  Available space is only between end of 2nd and beginning of 1st.
7823  */
7824  {
7825  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7826  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7827  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7828  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7829  }
7830  break;
7831 
7832  case SECOND_VECTOR_DOUBLE_STACK:
7833  /*
7834  Available space is only between end of 1st and top of 2nd.
7835  */
7836  {
7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7838  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7839  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7840  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7841  }
7842  break;
7843 
7844  default:
7845  VMA_ASSERT(0);
7846  return 0;
7847  }
7848 }
7849 
7850 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7851 {
7852  const VkDeviceSize size = GetSize();
7853  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7854  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7855  const size_t suballoc1stCount = suballocations1st.size();
7856  const size_t suballoc2ndCount = suballocations2nd.size();
7857 
7858  outInfo.blockCount = 1;
7859  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7860  outInfo.unusedRangeCount = 0;
7861  outInfo.usedBytes = 0;
7862  outInfo.allocationSizeMin = UINT64_MAX;
7863  outInfo.allocationSizeMax = 0;
7864  outInfo.unusedRangeSizeMin = UINT64_MAX;
7865  outInfo.unusedRangeSizeMax = 0;
7866 
7867  VkDeviceSize lastOffset = 0;
7868 
7869  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7870  {
7871  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7872  size_t nextAlloc2ndIndex = 0;
7873  while(lastOffset < freeSpace2ndTo1stEnd)
7874  {
7875  // Find next non-null allocation or move nextAllocIndex to the end.
7876  while(nextAlloc2ndIndex < suballoc2ndCount &&
7877  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7878  {
7879  ++nextAlloc2ndIndex;
7880  }
7881 
7882  // Found non-null allocation.
7883  if(nextAlloc2ndIndex < suballoc2ndCount)
7884  {
7885  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7886 
7887  // 1. Process free space before this allocation.
7888  if(lastOffset < suballoc.offset)
7889  {
7890  // There is free space from lastOffset to suballoc.offset.
7891  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7892  ++outInfo.unusedRangeCount;
7893  outInfo.unusedBytes += unusedRangeSize;
7894  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7895  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7896  }
7897 
7898  // 2. Process this allocation.
7899  // There is allocation with suballoc.offset, suballoc.size.
7900  outInfo.usedBytes += suballoc.size;
7901  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7902  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7903 
7904  // 3. Prepare for next iteration.
7905  lastOffset = suballoc.offset + suballoc.size;
7906  ++nextAlloc2ndIndex;
7907  }
7908  // We are at the end.
7909  else
7910  {
7911  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7912  if(lastOffset < freeSpace2ndTo1stEnd)
7913  {
7914  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7915  ++outInfo.unusedRangeCount;
7916  outInfo.unusedBytes += unusedRangeSize;
7917  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7918  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7919  }
7920 
7921  // End of loop.
7922  lastOffset = freeSpace2ndTo1stEnd;
7923  }
7924  }
7925  }
7926 
7927  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7928  const VkDeviceSize freeSpace1stTo2ndEnd =
7929  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7930  while(lastOffset < freeSpace1stTo2ndEnd)
7931  {
7932  // Find next non-null allocation or move nextAllocIndex to the end.
7933  while(nextAlloc1stIndex < suballoc1stCount &&
7934  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7935  {
7936  ++nextAlloc1stIndex;
7937  }
7938 
7939  // Found non-null allocation.
7940  if(nextAlloc1stIndex < suballoc1stCount)
7941  {
7942  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7943 
7944  // 1. Process free space before this allocation.
7945  if(lastOffset < suballoc.offset)
7946  {
7947  // There is free space from lastOffset to suballoc.offset.
7948  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7949  ++outInfo.unusedRangeCount;
7950  outInfo.unusedBytes += unusedRangeSize;
7951  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7952  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7953  }
7954 
7955  // 2. Process this allocation.
7956  // There is allocation with suballoc.offset, suballoc.size.
7957  outInfo.usedBytes += suballoc.size;
7958  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7959  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7960 
7961  // 3. Prepare for next iteration.
7962  lastOffset = suballoc.offset + suballoc.size;
7963  ++nextAlloc1stIndex;
7964  }
7965  // We are at the end.
7966  else
7967  {
7968  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7969  if(lastOffset < freeSpace1stTo2ndEnd)
7970  {
7971  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7972  ++outInfo.unusedRangeCount;
7973  outInfo.unusedBytes += unusedRangeSize;
7974  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7975  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7976  }
7977 
7978  // End of loop.
7979  lastOffset = freeSpace1stTo2ndEnd;
7980  }
7981  }
7982 
7983  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7984  {
7985  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7986  while(lastOffset < size)
7987  {
7988  // Find next non-null allocation or move nextAllocIndex to the end.
7989  while(nextAlloc2ndIndex != SIZE_MAX &&
7990  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7991  {
7992  --nextAlloc2ndIndex;
7993  }
7994 
7995  // Found non-null allocation.
7996  if(nextAlloc2ndIndex != SIZE_MAX)
7997  {
7998  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7999 
8000  // 1. Process free space before this allocation.
8001  if(lastOffset < suballoc.offset)
8002  {
8003  // There is free space from lastOffset to suballoc.offset.
8004  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8005  ++outInfo.unusedRangeCount;
8006  outInfo.unusedBytes += unusedRangeSize;
8007  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8008  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8009  }
8010 
8011  // 2. Process this allocation.
8012  // There is allocation with suballoc.offset, suballoc.size.
8013  outInfo.usedBytes += suballoc.size;
8014  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8015  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8016 
8017  // 3. Prepare for next iteration.
8018  lastOffset = suballoc.offset + suballoc.size;
8019  --nextAlloc2ndIndex;
8020  }
8021  // We are at the end.
8022  else
8023  {
8024  // There is free space from lastOffset to size.
8025  if(lastOffset < size)
8026  {
8027  const VkDeviceSize unusedRangeSize = size - lastOffset;
8028  ++outInfo.unusedRangeCount;
8029  outInfo.unusedBytes += unusedRangeSize;
8030  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8031  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8032  }
8033 
8034  // End of loop.
8035  lastOffset = size;
8036  }
8037  }
8038  }
8039 
8040  outInfo.unusedBytes = size - outInfo.usedBytes;
8041 }
8042 
8043 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8044 {
8045  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8046  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8047  const VkDeviceSize size = GetSize();
8048  const size_t suballoc1stCount = suballocations1st.size();
8049  const size_t suballoc2ndCount = suballocations2nd.size();
8050 
8051  inoutStats.size += size;
8052 
8053  VkDeviceSize lastOffset = 0;
8054 
8055  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8056  {
8057  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8058  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8059  while(lastOffset < freeSpace2ndTo1stEnd)
8060  {
8061  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8062  while(nextAlloc2ndIndex < suballoc2ndCount &&
8063  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8064  {
8065  ++nextAlloc2ndIndex;
8066  }
8067 
8068  // Found non-null allocation.
8069  if(nextAlloc2ndIndex < suballoc2ndCount)
8070  {
8071  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8072 
8073  // 1. Process free space before this allocation.
8074  if(lastOffset < suballoc.offset)
8075  {
8076  // There is free space from lastOffset to suballoc.offset.
8077  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8078  inoutStats.unusedSize += unusedRangeSize;
8079  ++inoutStats.unusedRangeCount;
8080  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8081  }
8082 
8083  // 2. Process this allocation.
8084  // There is allocation with suballoc.offset, suballoc.size.
8085  ++inoutStats.allocationCount;
8086 
8087  // 3. Prepare for next iteration.
8088  lastOffset = suballoc.offset + suballoc.size;
8089  ++nextAlloc2ndIndex;
8090  }
8091  // We are at the end.
8092  else
8093  {
8094  if(lastOffset < freeSpace2ndTo1stEnd)
8095  {
8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8097  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8098  inoutStats.unusedSize += unusedRangeSize;
8099  ++inoutStats.unusedRangeCount;
8100  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8101  }
8102 
8103  // End of loop.
8104  lastOffset = freeSpace2ndTo1stEnd;
8105  }
8106  }
8107  }
8108 
8109  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8110  const VkDeviceSize freeSpace1stTo2ndEnd =
8111  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8112  while(lastOffset < freeSpace1stTo2ndEnd)
8113  {
8114  // Find next non-null allocation or move nextAllocIndex to the end.
8115  while(nextAlloc1stIndex < suballoc1stCount &&
8116  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8117  {
8118  ++nextAlloc1stIndex;
8119  }
8120 
8121  // Found non-null allocation.
8122  if(nextAlloc1stIndex < suballoc1stCount)
8123  {
8124  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8125 
8126  // 1. Process free space before this allocation.
8127  if(lastOffset < suballoc.offset)
8128  {
8129  // There is free space from lastOffset to suballoc.offset.
8130  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8131  inoutStats.unusedSize += unusedRangeSize;
8132  ++inoutStats.unusedRangeCount;
8133  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8134  }
8135 
8136  // 2. Process this allocation.
8137  // There is allocation with suballoc.offset, suballoc.size.
8138  ++inoutStats.allocationCount;
8139 
8140  // 3. Prepare for next iteration.
8141  lastOffset = suballoc.offset + suballoc.size;
8142  ++nextAlloc1stIndex;
8143  }
8144  // We are at the end.
8145  else
8146  {
8147  if(lastOffset < freeSpace1stTo2ndEnd)
8148  {
8149  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8150  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8151  inoutStats.unusedSize += unusedRangeSize;
8152  ++inoutStats.unusedRangeCount;
8153  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8154  }
8155 
8156  // End of loop.
8157  lastOffset = freeSpace1stTo2ndEnd;
8158  }
8159  }
8160 
8161  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8162  {
8163  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8164  while(lastOffset < size)
8165  {
8166  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8167  while(nextAlloc2ndIndex != SIZE_MAX &&
8168  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8169  {
8170  --nextAlloc2ndIndex;
8171  }
8172 
8173  // Found non-null allocation.
8174  if(nextAlloc2ndIndex != SIZE_MAX)
8175  {
8176  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8177 
8178  // 1. Process free space before this allocation.
8179  if(lastOffset < suballoc.offset)
8180  {
8181  // There is free space from lastOffset to suballoc.offset.
8182  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8183  inoutStats.unusedSize += unusedRangeSize;
8184  ++inoutStats.unusedRangeCount;
8185  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8186  }
8187 
8188  // 2. Process this allocation.
8189  // There is allocation with suballoc.offset, suballoc.size.
8190  ++inoutStats.allocationCount;
8191 
8192  // 3. Prepare for next iteration.
8193  lastOffset = suballoc.offset + suballoc.size;
8194  --nextAlloc2ndIndex;
8195  }
8196  // We are at the end.
8197  else
8198  {
8199  if(lastOffset < size)
8200  {
8201  // There is free space from lastOffset to size.
8202  const VkDeviceSize unusedRangeSize = size - lastOffset;
8203  inoutStats.unusedSize += unusedRangeSize;
8204  ++inoutStats.unusedRangeCount;
8205  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8206  }
8207 
8208  // End of loop.
8209  lastOffset = size;
8210  }
8211  }
8212  }
8213 }
8214 
8215 #if VMA_STATS_STRING_ENABLED
8216 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8217 {
8218  const VkDeviceSize size = GetSize();
8219  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8220  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8221  const size_t suballoc1stCount = suballocations1st.size();
8222  const size_t suballoc2ndCount = suballocations2nd.size();
8223 
8224  // FIRST PASS
8225 
8226  size_t unusedRangeCount = 0;
8227  VkDeviceSize usedBytes = 0;
8228 
8229  VkDeviceSize lastOffset = 0;
8230 
8231  size_t alloc2ndCount = 0;
8232  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8233  {
8234  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8235  size_t nextAlloc2ndIndex = 0;
8236  while(lastOffset < freeSpace2ndTo1stEnd)
8237  {
8238  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8239  while(nextAlloc2ndIndex < suballoc2ndCount &&
8240  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8241  {
8242  ++nextAlloc2ndIndex;
8243  }
8244 
8245  // Found non-null allocation.
8246  if(nextAlloc2ndIndex < suballoc2ndCount)
8247  {
8248  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8249 
8250  // 1. Process free space before this allocation.
8251  if(lastOffset < suballoc.offset)
8252  {
8253  // There is free space from lastOffset to suballoc.offset.
8254  ++unusedRangeCount;
8255  }
8256 
8257  // 2. Process this allocation.
8258  // There is allocation with suballoc.offset, suballoc.size.
8259  ++alloc2ndCount;
8260  usedBytes += suballoc.size;
8261 
8262  // 3. Prepare for next iteration.
8263  lastOffset = suballoc.offset + suballoc.size;
8264  ++nextAlloc2ndIndex;
8265  }
8266  // We are at the end.
8267  else
8268  {
8269  if(lastOffset < freeSpace2ndTo1stEnd)
8270  {
8271  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8272  ++unusedRangeCount;
8273  }
8274 
8275  // End of loop.
8276  lastOffset = freeSpace2ndTo1stEnd;
8277  }
8278  }
8279  }
8280 
8281  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8282  size_t alloc1stCount = 0;
8283  const VkDeviceSize freeSpace1stTo2ndEnd =
8284  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8285  while(lastOffset < freeSpace1stTo2ndEnd)
8286  {
8287  // Find next non-null allocation or move nextAllocIndex to the end.
8288  while(nextAlloc1stIndex < suballoc1stCount &&
8289  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8290  {
8291  ++nextAlloc1stIndex;
8292  }
8293 
8294  // Found non-null allocation.
8295  if(nextAlloc1stIndex < suballoc1stCount)
8296  {
8297  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8298 
8299  // 1. Process free space before this allocation.
8300  if(lastOffset < suballoc.offset)
8301  {
8302  // There is free space from lastOffset to suballoc.offset.
8303  ++unusedRangeCount;
8304  }
8305 
8306  // 2. Process this allocation.
8307  // There is allocation with suballoc.offset, suballoc.size.
8308  ++alloc1stCount;
8309  usedBytes += suballoc.size;
8310 
8311  // 3. Prepare for next iteration.
8312  lastOffset = suballoc.offset + suballoc.size;
8313  ++nextAlloc1stIndex;
8314  }
8315  // We are at the end.
8316  else
8317  {
8318  if(lastOffset < size)
8319  {
8320  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8321  ++unusedRangeCount;
8322  }
8323 
8324  // End of loop.
8325  lastOffset = freeSpace1stTo2ndEnd;
8326  }
8327  }
8328 
8329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8330  {
8331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8332  while(lastOffset < size)
8333  {
8334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8335  while(nextAlloc2ndIndex != SIZE_MAX &&
8336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8337  {
8338  --nextAlloc2ndIndex;
8339  }
8340 
8341  // Found non-null allocation.
8342  if(nextAlloc2ndIndex != SIZE_MAX)
8343  {
8344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8345 
8346  // 1. Process free space before this allocation.
8347  if(lastOffset < suballoc.offset)
8348  {
8349  // There is free space from lastOffset to suballoc.offset.
8350  ++unusedRangeCount;
8351  }
8352 
8353  // 2. Process this allocation.
8354  // There is allocation with suballoc.offset, suballoc.size.
8355  ++alloc2ndCount;
8356  usedBytes += suballoc.size;
8357 
8358  // 3. Prepare for next iteration.
8359  lastOffset = suballoc.offset + suballoc.size;
8360  --nextAlloc2ndIndex;
8361  }
8362  // We are at the end.
8363  else
8364  {
8365  if(lastOffset < size)
8366  {
8367  // There is free space from lastOffset to size.
8368  ++unusedRangeCount;
8369  }
8370 
8371  // End of loop.
8372  lastOffset = size;
8373  }
8374  }
8375  }
8376 
8377  const VkDeviceSize unusedBytes = size - usedBytes;
8378  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8379 
8380  // SECOND PASS
8381  lastOffset = 0;
8382 
8383  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8384  {
8385  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8386  size_t nextAlloc2ndIndex = 0;
8387  while(lastOffset < freeSpace2ndTo1stEnd)
8388  {
8389  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8390  while(nextAlloc2ndIndex < suballoc2ndCount &&
8391  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8392  {
8393  ++nextAlloc2ndIndex;
8394  }
8395 
8396  // Found non-null allocation.
8397  if(nextAlloc2ndIndex < suballoc2ndCount)
8398  {
8399  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8400 
8401  // 1. Process free space before this allocation.
8402  if(lastOffset < suballoc.offset)
8403  {
8404  // There is free space from lastOffset to suballoc.offset.
8405  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8406  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8407  }
8408 
8409  // 2. Process this allocation.
8410  // There is allocation with suballoc.offset, suballoc.size.
8411  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8412 
8413  // 3. Prepare for next iteration.
8414  lastOffset = suballoc.offset + suballoc.size;
8415  ++nextAlloc2ndIndex;
8416  }
8417  // We are at the end.
8418  else
8419  {
8420  if(lastOffset < freeSpace2ndTo1stEnd)
8421  {
8422  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8423  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8424  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8425  }
8426 
8427  // End of loop.
8428  lastOffset = freeSpace2ndTo1stEnd;
8429  }
8430  }
8431  }
8432 
8433  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8434  while(lastOffset < freeSpace1stTo2ndEnd)
8435  {
8436  // Find next non-null allocation or move nextAllocIndex to the end.
8437  while(nextAlloc1stIndex < suballoc1stCount &&
8438  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8439  {
8440  ++nextAlloc1stIndex;
8441  }
8442 
8443  // Found non-null allocation.
8444  if(nextAlloc1stIndex < suballoc1stCount)
8445  {
8446  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8447 
8448  // 1. Process free space before this allocation.
8449  if(lastOffset < suballoc.offset)
8450  {
8451  // There is free space from lastOffset to suballoc.offset.
8452  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8453  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8454  }
8455 
8456  // 2. Process this allocation.
8457  // There is allocation with suballoc.offset, suballoc.size.
8458  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8459 
8460  // 3. Prepare for next iteration.
8461  lastOffset = suballoc.offset + suballoc.size;
8462  ++nextAlloc1stIndex;
8463  }
8464  // We are at the end.
8465  else
8466  {
8467  if(lastOffset < freeSpace1stTo2ndEnd)
8468  {
8469  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8470  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8471  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8472  }
8473 
8474  // End of loop.
8475  lastOffset = freeSpace1stTo2ndEnd;
8476  }
8477  }
8478 
8479  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8480  {
8481  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8482  while(lastOffset < size)
8483  {
8484  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8485  while(nextAlloc2ndIndex != SIZE_MAX &&
8486  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8487  {
8488  --nextAlloc2ndIndex;
8489  }
8490 
8491  // Found non-null allocation.
8492  if(nextAlloc2ndIndex != SIZE_MAX)
8493  {
8494  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8495 
8496  // 1. Process free space before this allocation.
8497  if(lastOffset < suballoc.offset)
8498  {
8499  // There is free space from lastOffset to suballoc.offset.
8500  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8501  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8502  }
8503 
8504  // 2. Process this allocation.
8505  // There is allocation with suballoc.offset, suballoc.size.
8506  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8507 
8508  // 3. Prepare for next iteration.
8509  lastOffset = suballoc.offset + suballoc.size;
8510  --nextAlloc2ndIndex;
8511  }
8512  // We are at the end.
8513  else
8514  {
8515  if(lastOffset < size)
8516  {
8517  // There is free space from lastOffset to size.
8518  const VkDeviceSize unusedRangeSize = size - lastOffset;
8519  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8520  }
8521 
8522  // End of loop.
8523  lastOffset = size;
8524  }
8525  }
8526  }
8527 
8528  PrintDetailedMap_End(json);
8529 }
8530 #endif // #if VMA_STATS_STRING_ENABLED
8531 
8532 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8533  uint32_t currentFrameIndex,
8534  uint32_t frameInUseCount,
8535  VkDeviceSize bufferImageGranularity,
8536  VkDeviceSize allocSize,
8537  VkDeviceSize allocAlignment,
8538  bool upperAddress,
8539  VmaSuballocationType allocType,
8540  bool canMakeOtherLost,
8541  uint32_t strategy,
8542  VmaAllocationRequest* pAllocationRequest)
8543 {
8544  VMA_ASSERT(allocSize > 0);
8545  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8546  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8547  VMA_HEAVY_ASSERT(Validate());
8548 
8549  const VkDeviceSize size = GetSize();
8550  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8551  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8552 
8553  if(upperAddress)
8554  {
8555  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8556  {
8557  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8558  return false;
8559  }
8560 
8561  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8562  if(allocSize > size)
8563  {
8564  return false;
8565  }
8566  VkDeviceSize resultBaseOffset = size - allocSize;
8567  if(!suballocations2nd.empty())
8568  {
8569  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8570  resultBaseOffset = lastSuballoc.offset - allocSize;
8571  if(allocSize > lastSuballoc.offset)
8572  {
8573  return false;
8574  }
8575  }
8576 
8577  // Start from offset equal to end of free space.
8578  VkDeviceSize resultOffset = resultBaseOffset;
8579 
8580  // Apply VMA_DEBUG_MARGIN at the end.
8581  if(VMA_DEBUG_MARGIN > 0)
8582  {
8583  if(resultOffset < VMA_DEBUG_MARGIN)
8584  {
8585  return false;
8586  }
8587  resultOffset -= VMA_DEBUG_MARGIN;
8588  }
8589 
8590  // Apply alignment.
8591  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8592 
8593  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8594  // Make bigger alignment if necessary.
8595  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8596  {
8597  bool bufferImageGranularityConflict = false;
8598  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8599  {
8600  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8601  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8602  {
8603  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8604  {
8605  bufferImageGranularityConflict = true;
8606  break;
8607  }
8608  }
8609  else
8610  // Already on previous page.
8611  break;
8612  }
8613  if(bufferImageGranularityConflict)
8614  {
8615  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8616  }
8617  }
8618 
8619  // There is enough free space.
8620  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8621  suballocations1st.back().offset + suballocations1st.back().size :
8622  0;
8623  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8624  {
8625  // Check previous suballocations for BufferImageGranularity conflicts.
8626  // If conflict exists, allocation cannot be made here.
8627  if(bufferImageGranularity > 1)
8628  {
8629  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8630  {
8631  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8633  {
8634  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8635  {
8636  return false;
8637  }
8638  }
8639  else
8640  {
8641  // Already on next page.
8642  break;
8643  }
8644  }
8645  }
8646 
8647  // All tests passed: Success.
8648  pAllocationRequest->offset = resultOffset;
8649  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8650  pAllocationRequest->sumItemSize = 0;
8651  // pAllocationRequest->item unused.
8652  pAllocationRequest->itemsToMakeLostCount = 0;
8653  return true;
8654  }
8655  }
8656  else // !upperAddress
8657  {
8658  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8659  {
8660  // Try to allocate at the end of 1st vector.
8661 
8662  VkDeviceSize resultBaseOffset = 0;
8663  if(!suballocations1st.empty())
8664  {
8665  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8666  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8667  }
8668 
8669  // Start from offset equal to beginning of free space.
8670  VkDeviceSize resultOffset = resultBaseOffset;
8671 
8672  // Apply VMA_DEBUG_MARGIN at the beginning.
8673  if(VMA_DEBUG_MARGIN > 0)
8674  {
8675  resultOffset += VMA_DEBUG_MARGIN;
8676  }
8677 
8678  // Apply alignment.
8679  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8680 
8681  // Check previous suballocations for BufferImageGranularity conflicts.
8682  // Make bigger alignment if necessary.
8683  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8684  {
8685  bool bufferImageGranularityConflict = false;
8686  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8687  {
8688  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8689  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8690  {
8691  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8692  {
8693  bufferImageGranularityConflict = true;
8694  break;
8695  }
8696  }
8697  else
8698  // Already on previous page.
8699  break;
8700  }
8701  if(bufferImageGranularityConflict)
8702  {
8703  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8704  }
8705  }
8706 
8707  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8708  suballocations2nd.back().offset : size;
8709 
8710  // There is enough free space at the end after alignment.
8711  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8712  {
8713  // Check next suballocations for BufferImageGranularity conflicts.
8714  // If conflict exists, allocation cannot be made here.
8715  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8716  {
8717  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8718  {
8719  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8720  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8721  {
8722  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8723  {
8724  return false;
8725  }
8726  }
8727  else
8728  {
8729  // Already on previous page.
8730  break;
8731  }
8732  }
8733  }
8734 
8735  // All tests passed: Success.
8736  pAllocationRequest->offset = resultOffset;
8737  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8738  pAllocationRequest->sumItemSize = 0;
8739  // pAllocationRequest->item unused.
8740  pAllocationRequest->itemsToMakeLostCount = 0;
8741  return true;
8742  }
8743  }
8744 
8745  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8746  // beginning of 1st vector as the end of free space.
8747  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8748  {
8749  VMA_ASSERT(!suballocations1st.empty());
8750 
8751  VkDeviceSize resultBaseOffset = 0;
8752  if(!suballocations2nd.empty())
8753  {
8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8755  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8756  }
8757 
8758  // Start from offset equal to beginning of free space.
8759  VkDeviceSize resultOffset = resultBaseOffset;
8760 
8761  // Apply VMA_DEBUG_MARGIN at the beginning.
8762  if(VMA_DEBUG_MARGIN > 0)
8763  {
8764  resultOffset += VMA_DEBUG_MARGIN;
8765  }
8766 
8767  // Apply alignment.
8768  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8769 
8770  // Check previous suballocations for BufferImageGranularity conflicts.
8771  // Make bigger alignment if necessary.
8772  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8773  {
8774  bool bufferImageGranularityConflict = false;
8775  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8776  {
8777  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8778  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8779  {
8780  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8781  {
8782  bufferImageGranularityConflict = true;
8783  break;
8784  }
8785  }
8786  else
8787  // Already on previous page.
8788  break;
8789  }
8790  if(bufferImageGranularityConflict)
8791  {
8792  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8793  }
8794  }
8795 
8796  pAllocationRequest->itemsToMakeLostCount = 0;
8797  pAllocationRequest->sumItemSize = 0;
8798  size_t index1st = m_1stNullItemsBeginCount;
8799 
8800  if(canMakeOtherLost)
8801  {
8802  while(index1st < suballocations1st.size() &&
8803  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8804  {
8805  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8806  const VmaSuballocation& suballoc = suballocations1st[index1st];
8807  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8808  {
8809  // No problem.
8810  }
8811  else
8812  {
8813  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8814  if(suballoc.hAllocation->CanBecomeLost() &&
8815  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8816  {
8817  ++pAllocationRequest->itemsToMakeLostCount;
8818  pAllocationRequest->sumItemSize += suballoc.size;
8819  }
8820  else
8821  {
8822  return false;
8823  }
8824  }
8825  ++index1st;
8826  }
8827 
8828  // Check next suballocations for BufferImageGranularity conflicts.
8829  // If conflict exists, we must mark more allocations lost or fail.
8830  if(bufferImageGranularity > 1)
8831  {
8832  while(index1st < suballocations1st.size())
8833  {
8834  const VmaSuballocation& suballoc = suballocations1st[index1st];
8835  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8836  {
8837  if(suballoc.hAllocation != VK_NULL_HANDLE)
8838  {
8839  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8840  if(suballoc.hAllocation->CanBecomeLost() &&
8841  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8842  {
8843  ++pAllocationRequest->itemsToMakeLostCount;
8844  pAllocationRequest->sumItemSize += suballoc.size;
8845  }
8846  else
8847  {
8848  return false;
8849  }
8850  }
8851  }
8852  else
8853  {
8854  // Already on next page.
8855  break;
8856  }
8857  ++index1st;
8858  }
8859  }
8860  }
8861 
8862  // There is enough free space at the end after alignment.
8863  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8864  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8865  {
8866  // Check next suballocations for BufferImageGranularity conflicts.
8867  // If conflict exists, allocation cannot be made here.
8868  if(bufferImageGranularity > 1)
8869  {
8870  for(size_t nextSuballocIndex = index1st;
8871  nextSuballocIndex < suballocations1st.size();
8872  nextSuballocIndex++)
8873  {
8874  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8875  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8876  {
8877  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8878  {
8879  return false;
8880  }
8881  }
8882  else
8883  {
8884  // Already on next page.
8885  break;
8886  }
8887  }
8888  }
8889 
8890  // All tests passed: Success.
8891  pAllocationRequest->offset = resultOffset;
8892  pAllocationRequest->sumFreeSize =
8893  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8894  - resultBaseOffset
8895  - pAllocationRequest->sumItemSize;
8896  // pAllocationRequest->item unused.
8897  return true;
8898  }
8899  }
8900  }
8901 
8902  return false;
8903 }
8904 
8905 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8906  uint32_t currentFrameIndex,
8907  uint32_t frameInUseCount,
8908  VmaAllocationRequest* pAllocationRequest)
8909 {
8910  if(pAllocationRequest->itemsToMakeLostCount == 0)
8911  {
8912  return true;
8913  }
8914 
8915  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8916 
8917  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8918  size_t index1st = m_1stNullItemsBeginCount;
8919  size_t madeLostCount = 0;
8920  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8921  {
8922  VMA_ASSERT(index1st < suballocations1st.size());
8923  VmaSuballocation& suballoc = suballocations1st[index1st];
8924  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8925  {
8926  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8927  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8928  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8929  {
8930  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8931  suballoc.hAllocation = VK_NULL_HANDLE;
8932  m_SumFreeSize += suballoc.size;
8933  ++m_1stNullItemsMiddleCount;
8934  ++madeLostCount;
8935  }
8936  else
8937  {
8938  return false;
8939  }
8940  }
8941  ++index1st;
8942  }
8943 
8944  CleanupAfterFree();
8945  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8946 
8947  return true;
8948 }
8949 
8950 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8951 {
8952  uint32_t lostAllocationCount = 0;
8953 
8954  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8955  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8956  {
8957  VmaSuballocation& suballoc = suballocations1st[i];
8958  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8959  suballoc.hAllocation->CanBecomeLost() &&
8960  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8961  {
8962  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8963  suballoc.hAllocation = VK_NULL_HANDLE;
8964  ++m_1stNullItemsMiddleCount;
8965  m_SumFreeSize += suballoc.size;
8966  ++lostAllocationCount;
8967  }
8968  }
8969 
8970  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8971  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8972  {
8973  VmaSuballocation& suballoc = suballocations2nd[i];
8974  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8975  suballoc.hAllocation->CanBecomeLost() &&
8976  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8977  {
8978  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8979  suballoc.hAllocation = VK_NULL_HANDLE;
8980  ++m_2ndNullItemsCount;
8981  ++lostAllocationCount;
8982  }
8983  }
8984 
8985  if(lostAllocationCount)
8986  {
8987  CleanupAfterFree();
8988  }
8989 
8990  return lostAllocationCount;
8991 }
8992 
8993 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8994 {
8995  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8996  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8997  {
8998  const VmaSuballocation& suballoc = suballocations1st[i];
8999  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9000  {
9001  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9002  {
9003  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9004  return VK_ERROR_VALIDATION_FAILED_EXT;
9005  }
9006  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9007  {
9008  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9009  return VK_ERROR_VALIDATION_FAILED_EXT;
9010  }
9011  }
9012  }
9013 
9014  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9015  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9016  {
9017  const VmaSuballocation& suballoc = suballocations2nd[i];
9018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9019  {
9020  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9021  {
9022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9023  return VK_ERROR_VALIDATION_FAILED_EXT;
9024  }
9025  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9026  {
9027  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9028  return VK_ERROR_VALIDATION_FAILED_EXT;
9029  }
9030  }
9031  }
9032 
9033  return VK_SUCCESS;
9034 }
9035 
9036 void VmaBlockMetadata_Linear::Alloc(
9037  const VmaAllocationRequest& request,
9038  VmaSuballocationType type,
9039  VkDeviceSize allocSize,
9040  bool upperAddress,
9041  VmaAllocation hAllocation)
9042 {
9043  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9044 
9045  if(upperAddress)
9046  {
9047  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9048  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9049  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9050  suballocations2nd.push_back(newSuballoc);
9051  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9052  }
9053  else
9054  {
9055  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9056 
9057  // First allocation.
9058  if(suballocations1st.empty())
9059  {
9060  suballocations1st.push_back(newSuballoc);
9061  }
9062  else
9063  {
9064  // New allocation at the end of 1st vector.
9065  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9066  {
9067  // Check if it fits before the end of the block.
9068  VMA_ASSERT(request.offset + allocSize <= GetSize());
9069  suballocations1st.push_back(newSuballoc);
9070  }
9071  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9072  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9073  {
9074  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9075 
9076  switch(m_2ndVectorMode)
9077  {
9078  case SECOND_VECTOR_EMPTY:
9079  // First allocation from second part ring buffer.
9080  VMA_ASSERT(suballocations2nd.empty());
9081  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9082  break;
9083  case SECOND_VECTOR_RING_BUFFER:
9084  // 2-part ring buffer is already started.
9085  VMA_ASSERT(!suballocations2nd.empty());
9086  break;
9087  case SECOND_VECTOR_DOUBLE_STACK:
9088  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9089  break;
9090  default:
9091  VMA_ASSERT(0);
9092  }
9093 
9094  suballocations2nd.push_back(newSuballoc);
9095  }
9096  else
9097  {
9098  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9099  }
9100  }
9101  }
9102 
9103  m_SumFreeSize -= newSuballoc.size;
9104 }
9105 
9106 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9107 {
9108  FreeAtOffset(allocation->GetOffset());
9109 }
9110 
9111 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9112 {
9113  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9114  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9115 
9116  if(!suballocations1st.empty())
9117  {
9118  // First allocation: Mark it as next empty at the beginning.
9119  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9120  if(firstSuballoc.offset == offset)
9121  {
9122  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9123  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9124  m_SumFreeSize += firstSuballoc.size;
9125  ++m_1stNullItemsBeginCount;
9126  CleanupAfterFree();
9127  return;
9128  }
9129  }
9130 
9131  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9132  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9133  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9134  {
9135  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9136  if(lastSuballoc.offset == offset)
9137  {
9138  m_SumFreeSize += lastSuballoc.size;
9139  suballocations2nd.pop_back();
9140  CleanupAfterFree();
9141  return;
9142  }
9143  }
9144  // Last allocation in 1st vector.
9145  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9146  {
9147  VmaSuballocation& lastSuballoc = suballocations1st.back();
9148  if(lastSuballoc.offset == offset)
9149  {
9150  m_SumFreeSize += lastSuballoc.size;
9151  suballocations1st.pop_back();
9152  CleanupAfterFree();
9153  return;
9154  }
9155  }
9156 
9157  // Item from the middle of 1st vector.
9158  {
9159  VmaSuballocation refSuballoc;
9160  refSuballoc.offset = offset;
9161  // Rest of members stays uninitialized intentionally for better performance.
9162  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9163  suballocations1st.begin() + m_1stNullItemsBeginCount,
9164  suballocations1st.end(),
9165  refSuballoc);
9166  if(it != suballocations1st.end())
9167  {
9168  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9169  it->hAllocation = VK_NULL_HANDLE;
9170  ++m_1stNullItemsMiddleCount;
9171  m_SumFreeSize += it->size;
9172  CleanupAfterFree();
9173  return;
9174  }
9175  }
9176 
9177  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9178  {
9179  // Item from the middle of 2nd vector.
9180  VmaSuballocation refSuballoc;
9181  refSuballoc.offset = offset;
9182  // Rest of members stays uninitialized intentionally for better performance.
9183  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9184  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9185  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9186  if(it != suballocations2nd.end())
9187  {
9188  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9189  it->hAllocation = VK_NULL_HANDLE;
9190  ++m_2ndNullItemsCount;
9191  m_SumFreeSize += it->size;
9192  CleanupAfterFree();
9193  return;
9194  }
9195  }
9196 
9197  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9198 }
9199 
9200 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9201 {
9202  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9203  const size_t suballocCount = AccessSuballocations1st().size();
9204  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9205 }
9206 
9207 void VmaBlockMetadata_Linear::CleanupAfterFree()
9208 {
9209  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9210  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9211 
9212  if(IsEmpty())
9213  {
9214  suballocations1st.clear();
9215  suballocations2nd.clear();
9216  m_1stNullItemsBeginCount = 0;
9217  m_1stNullItemsMiddleCount = 0;
9218  m_2ndNullItemsCount = 0;
9219  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9220  }
9221  else
9222  {
9223  const size_t suballoc1stCount = suballocations1st.size();
9224  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9225  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9226 
9227  // Find more null items at the beginning of 1st vector.
9228  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9229  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9230  {
9231  ++m_1stNullItemsBeginCount;
9232  --m_1stNullItemsMiddleCount;
9233  }
9234 
9235  // Find more null items at the end of 1st vector.
9236  while(m_1stNullItemsMiddleCount > 0 &&
9237  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9238  {
9239  --m_1stNullItemsMiddleCount;
9240  suballocations1st.pop_back();
9241  }
9242 
9243  // Find more null items at the end of 2nd vector.
9244  while(m_2ndNullItemsCount > 0 &&
9245  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9246  {
9247  --m_2ndNullItemsCount;
9248  suballocations2nd.pop_back();
9249  }
9250 
9251  if(ShouldCompact1st())
9252  {
9253  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9254  size_t srcIndex = m_1stNullItemsBeginCount;
9255  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9256  {
9257  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9258  {
9259  ++srcIndex;
9260  }
9261  if(dstIndex != srcIndex)
9262  {
9263  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9264  }
9265  ++srcIndex;
9266  }
9267  suballocations1st.resize(nonNullItemCount);
9268  m_1stNullItemsBeginCount = 0;
9269  m_1stNullItemsMiddleCount = 0;
9270  }
9271 
9272  // 2nd vector became empty.
9273  if(suballocations2nd.empty())
9274  {
9275  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9276  }
9277 
9278  // 1st vector became empty.
9279  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9280  {
9281  suballocations1st.clear();
9282  m_1stNullItemsBeginCount = 0;
9283 
9284  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9285  {
9286  // Swap 1st with 2nd. Now 2nd is empty.
9287  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9288  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9289  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9290  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9291  {
9292  ++m_1stNullItemsBeginCount;
9293  --m_1stNullItemsMiddleCount;
9294  }
9295  m_2ndNullItemsCount = 0;
9296  m_1stVectorIndex ^= 1;
9297  }
9298  }
9299  }
9300 
9301  VMA_HEAVY_ASSERT(Validate());
9302 }
9303 
9304 
9306 // class VmaBlockMetadata_Buddy
9307 
9308 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9309  VmaBlockMetadata(hAllocator),
9310  m_Root(VMA_NULL),
9311  m_AllocationCount(0),
9312  m_FreeCount(1),
9313  m_SumFreeSize(0)
9314 {
9315  memset(m_FreeList, 0, sizeof(m_FreeList));
9316 }
9317 
9318 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9319 {
9320  DeleteNode(m_Root);
9321 }
9322 
9323 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9324 {
9325  VmaBlockMetadata::Init(size);
9326 
9327  m_UsableSize = VmaPrevPow2(size);
9328  m_SumFreeSize = m_UsableSize;
9329 
9330  // Calculate m_LevelCount.
9331  m_LevelCount = 1;
9332  while(m_LevelCount < MAX_LEVELS &&
9333  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9334  {
9335  ++m_LevelCount;
9336  }
9337 
9338  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9339  rootNode->offset = 0;
9340  rootNode->type = Node::TYPE_FREE;
9341  rootNode->parent = VMA_NULL;
9342  rootNode->buddy = VMA_NULL;
9343 
9344  m_Root = rootNode;
9345  AddToFreeListFront(0, rootNode);
9346 }
9347 
9348 bool VmaBlockMetadata_Buddy::Validate() const
9349 {
9350  // Validate tree.
9351  ValidationContext ctx;
9352  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9353  {
9354  VMA_VALIDATE(false && "ValidateNode failed.");
9355  }
9356  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9357  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9358 
9359  // Validate free node lists.
9360  for(uint32_t level = 0; level < m_LevelCount; ++level)
9361  {
9362  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9363  m_FreeList[level].front->free.prev == VMA_NULL);
9364 
9365  for(Node* node = m_FreeList[level].front;
9366  node != VMA_NULL;
9367  node = node->free.next)
9368  {
9369  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9370 
9371  if(node->free.next == VMA_NULL)
9372  {
9373  VMA_VALIDATE(m_FreeList[level].back == node);
9374  }
9375  else
9376  {
9377  VMA_VALIDATE(node->free.next->free.prev == node);
9378  }
9379  }
9380  }
9381 
9382  // Validate that free lists ar higher levels are empty.
9383  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9384  {
9385  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9386  }
9387 
9388  return true;
9389 }
9390 
9391 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9392 {
9393  for(uint32_t level = 0; level < m_LevelCount; ++level)
9394  {
9395  if(m_FreeList[level].front != VMA_NULL)
9396  {
9397  return LevelToNodeSize(level);
9398  }
9399  }
9400  return 0;
9401 }
9402 
9403 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9404 {
9405  const VkDeviceSize unusableSize = GetUnusableSize();
9406 
9407  outInfo.blockCount = 1;
9408 
9409  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9410  outInfo.usedBytes = outInfo.unusedBytes = 0;
9411 
9412  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9413  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9414  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9415 
9416  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9417 
9418  if(unusableSize > 0)
9419  {
9420  ++outInfo.unusedRangeCount;
9421  outInfo.unusedBytes += unusableSize;
9422  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9423  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9424  }
9425 }
9426 
9427 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9428 {
9429  const VkDeviceSize unusableSize = GetUnusableSize();
9430 
9431  inoutStats.size += GetSize();
9432  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9433  inoutStats.allocationCount += m_AllocationCount;
9434  inoutStats.unusedRangeCount += m_FreeCount;
9435  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9436 
9437  if(unusableSize > 0)
9438  {
9439  ++inoutStats.unusedRangeCount;
9440  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9441  }
9442 }
9443 
9444 #if VMA_STATS_STRING_ENABLED
9445 
9446 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9447 {
9448  // TODO optimize
9449  VmaStatInfo stat;
9450  CalcAllocationStatInfo(stat);
9451 
9452  PrintDetailedMap_Begin(
9453  json,
9454  stat.unusedBytes,
9455  stat.allocationCount,
9456  stat.unusedRangeCount);
9457 
9458  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9459 
9460  const VkDeviceSize unusableSize = GetUnusableSize();
9461  if(unusableSize > 0)
9462  {
9463  PrintDetailedMap_UnusedRange(json,
9464  m_UsableSize, // offset
9465  unusableSize); // size
9466  }
9467 
9468  PrintDetailedMap_End(json);
9469 }
9470 
9471 #endif // #if VMA_STATS_STRING_ENABLED
9472 
9473 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9474  uint32_t currentFrameIndex,
9475  uint32_t frameInUseCount,
9476  VkDeviceSize bufferImageGranularity,
9477  VkDeviceSize allocSize,
9478  VkDeviceSize allocAlignment,
9479  bool upperAddress,
9480  VmaSuballocationType allocType,
9481  bool canMakeOtherLost,
9482  uint32_t strategy,
9483  VmaAllocationRequest* pAllocationRequest)
9484 {
9485  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9486 
9487  // Simple way to respect bufferImageGranularity. May be optimized some day.
9488  // Whenever it might be an OPTIMAL image...
9489  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9490  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9491  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9492  {
9493  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9494  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9495  }
9496 
9497  if(allocSize > m_UsableSize)
9498  {
9499  return false;
9500  }
9501 
9502  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9503  for(uint32_t level = targetLevel + 1; level--; )
9504  {
9505  for(Node* freeNode = m_FreeList[level].front;
9506  freeNode != VMA_NULL;
9507  freeNode = freeNode->free.next)
9508  {
9509  if(freeNode->offset % allocAlignment == 0)
9510  {
9511  pAllocationRequest->offset = freeNode->offset;
9512  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9513  pAllocationRequest->sumItemSize = 0;
9514  pAllocationRequest->itemsToMakeLostCount = 0;
9515  pAllocationRequest->customData = (void*)(uintptr_t)level;
9516  return true;
9517  }
9518  }
9519  }
9520 
9521  return false;
9522 }
9523 
9524 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9525  uint32_t currentFrameIndex,
9526  uint32_t frameInUseCount,
9527  VmaAllocationRequest* pAllocationRequest)
9528 {
9529  /*
9530  Lost allocations are not supported in buddy allocator at the moment.
9531  Support might be added in the future.
9532  */
9533  return pAllocationRequest->itemsToMakeLostCount == 0;
9534 }
9535 
9536 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9537 {
9538  /*
9539  Lost allocations are not supported in buddy allocator at the moment.
9540  Support might be added in the future.
9541  */
9542  return 0;
9543 }
9544 
9545 void VmaBlockMetadata_Buddy::Alloc(
9546  const VmaAllocationRequest& request,
9547  VmaSuballocationType type,
9548  VkDeviceSize allocSize,
9549  bool upperAddress,
9550  VmaAllocation hAllocation)
9551 {
9552  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9553  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9554 
9555  Node* currNode = m_FreeList[currLevel].front;
9556  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9557  while(currNode->offset != request.offset)
9558  {
9559  currNode = currNode->free.next;
9560  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9561  }
9562 
9563  // Go down, splitting free nodes.
9564  while(currLevel < targetLevel)
9565  {
9566  // currNode is already first free node at currLevel.
9567  // Remove it from list of free nodes at this currLevel.
9568  RemoveFromFreeList(currLevel, currNode);
9569 
9570  const uint32_t childrenLevel = currLevel + 1;
9571 
9572  // Create two free sub-nodes.
9573  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9574  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9575 
9576  leftChild->offset = currNode->offset;
9577  leftChild->type = Node::TYPE_FREE;
9578  leftChild->parent = currNode;
9579  leftChild->buddy = rightChild;
9580 
9581  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9582  rightChild->type = Node::TYPE_FREE;
9583  rightChild->parent = currNode;
9584  rightChild->buddy = leftChild;
9585 
9586  // Convert current currNode to split type.
9587  currNode->type = Node::TYPE_SPLIT;
9588  currNode->split.leftChild = leftChild;
9589 
9590  // Add child nodes to free list. Order is important!
9591  AddToFreeListFront(childrenLevel, rightChild);
9592  AddToFreeListFront(childrenLevel, leftChild);
9593 
9594  ++m_FreeCount;
9595  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9596  ++currLevel;
9597  currNode = m_FreeList[currLevel].front;
9598 
9599  /*
9600  We can be sure that currNode, as left child of node previously split,
9601  also fullfills the alignment requirement.
9602  */
9603  }
9604 
9605  // Remove from free list.
9606  VMA_ASSERT(currLevel == targetLevel &&
9607  currNode != VMA_NULL &&
9608  currNode->type == Node::TYPE_FREE);
9609  RemoveFromFreeList(currLevel, currNode);
9610 
9611  // Convert to allocation node.
9612  currNode->type = Node::TYPE_ALLOCATION;
9613  currNode->allocation.alloc = hAllocation;
9614 
9615  ++m_AllocationCount;
9616  --m_FreeCount;
9617  m_SumFreeSize -= allocSize;
9618 }
9619 
9620 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9621 {
9622  if(node->type == Node::TYPE_SPLIT)
9623  {
9624  DeleteNode(node->split.leftChild->buddy);
9625  DeleteNode(node->split.leftChild);
9626  }
9627 
9628  vma_delete(GetAllocationCallbacks(), node);
9629 }
9630 
9631 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9632 {
9633  VMA_VALIDATE(level < m_LevelCount);
9634  VMA_VALIDATE(curr->parent == parent);
9635  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9636  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9637  switch(curr->type)
9638  {
9639  case Node::TYPE_FREE:
9640  // curr->free.prev, next are validated separately.
9641  ctx.calculatedSumFreeSize += levelNodeSize;
9642  ++ctx.calculatedFreeCount;
9643  break;
9644  case Node::TYPE_ALLOCATION:
9645  ++ctx.calculatedAllocationCount;
9646  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9647  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9648  break;
9649  case Node::TYPE_SPLIT:
9650  {
9651  const uint32_t childrenLevel = level + 1;
9652  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9653  const Node* const leftChild = curr->split.leftChild;
9654  VMA_VALIDATE(leftChild != VMA_NULL);
9655  VMA_VALIDATE(leftChild->offset == curr->offset);
9656  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9657  {
9658  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9659  }
9660  const Node* const rightChild = leftChild->buddy;
9661  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9662  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9663  {
9664  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9665  }
9666  }
9667  break;
9668  default:
9669  return false;
9670  }
9671 
9672  return true;
9673 }
9674 
9675 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9676 {
9677  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9678  uint32_t level = 0;
9679  VkDeviceSize currLevelNodeSize = m_UsableSize;
9680  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9681  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9682  {
9683  ++level;
9684  currLevelNodeSize = nextLevelNodeSize;
9685  nextLevelNodeSize = currLevelNodeSize >> 1;
9686  }
9687  return level;
9688 }
9689 
9690 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9691 {
9692  // Find node and level.
9693  Node* node = m_Root;
9694  VkDeviceSize nodeOffset = 0;
9695  uint32_t level = 0;
9696  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9697  while(node->type == Node::TYPE_SPLIT)
9698  {
9699  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9700  if(offset < nodeOffset + nextLevelSize)
9701  {
9702  node = node->split.leftChild;
9703  }
9704  else
9705  {
9706  node = node->split.leftChild->buddy;
9707  nodeOffset += nextLevelSize;
9708  }
9709  ++level;
9710  levelNodeSize = nextLevelSize;
9711  }
9712 
9713  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9714  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9715 
9716  ++m_FreeCount;
9717  --m_AllocationCount;
9718  m_SumFreeSize += alloc->GetSize();
9719 
9720  node->type = Node::TYPE_FREE;
9721 
9722  // Join free nodes if possible.
9723  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9724  {
9725  RemoveFromFreeList(level, node->buddy);
9726  Node* const parent = node->parent;
9727 
9728  vma_delete(GetAllocationCallbacks(), node->buddy);
9729  vma_delete(GetAllocationCallbacks(), node);
9730  parent->type = Node::TYPE_FREE;
9731 
9732  node = parent;
9733  --level;
9734  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9735  --m_FreeCount;
9736  }
9737 
9738  AddToFreeListFront(level, node);
9739 }
9740 
9741 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9742 {
9743  switch(node->type)
9744  {
9745  case Node::TYPE_FREE:
9746  ++outInfo.unusedRangeCount;
9747  outInfo.unusedBytes += levelNodeSize;
9748  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9749  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9750  break;
9751  case Node::TYPE_ALLOCATION:
9752  {
9753  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9754  ++outInfo.allocationCount;
9755  outInfo.usedBytes += allocSize;
9756  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9757  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9758 
9759  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9760  if(unusedRangeSize > 0)
9761  {
9762  ++outInfo.unusedRangeCount;
9763  outInfo.unusedBytes += unusedRangeSize;
9764  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9765  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9766  }
9767  }
9768  break;
9769  case Node::TYPE_SPLIT:
9770  {
9771  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9772  const Node* const leftChild = node->split.leftChild;
9773  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9774  const Node* const rightChild = leftChild->buddy;
9775  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9776  }
9777  break;
9778  default:
9779  VMA_ASSERT(0);
9780  }
9781 }
9782 
9783 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9784 {
9785  VMA_ASSERT(node->type == Node::TYPE_FREE);
9786 
9787  // List is empty.
9788  Node* const frontNode = m_FreeList[level].front;
9789  if(frontNode == VMA_NULL)
9790  {
9791  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9792  node->free.prev = node->free.next = VMA_NULL;
9793  m_FreeList[level].front = m_FreeList[level].back = node;
9794  }
9795  else
9796  {
9797  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9798  node->free.prev = VMA_NULL;
9799  node->free.next = frontNode;
9800  frontNode->free.prev = node;
9801  m_FreeList[level].front = node;
9802  }
9803 }
9804 
9805 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9806 {
9807  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9808 
9809  // It is at the front.
9810  if(node->free.prev == VMA_NULL)
9811  {
9812  VMA_ASSERT(m_FreeList[level].front == node);
9813  m_FreeList[level].front = node->free.next;
9814  }
9815  else
9816  {
9817  Node* const prevFreeNode = node->free.prev;
9818  VMA_ASSERT(prevFreeNode->free.next == node);
9819  prevFreeNode->free.next = node->free.next;
9820  }
9821 
9822  // It is at the back.
9823  if(node->free.next == VMA_NULL)
9824  {
9825  VMA_ASSERT(m_FreeList[level].back == node);
9826  m_FreeList[level].back = node->free.prev;
9827  }
9828  else
9829  {
9830  Node* const nextFreeNode = node->free.next;
9831  VMA_ASSERT(nextFreeNode->free.prev == node);
9832  nextFreeNode->free.prev = node->free.prev;
9833  }
9834 }
9835 
9836 #if VMA_STATS_STRING_ENABLED
9837 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9838 {
9839  switch(node->type)
9840  {
9841  case Node::TYPE_FREE:
9842  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9843  break;
9844  case Node::TYPE_ALLOCATION:
9845  {
9846  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9847  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9848  if(allocSize < levelNodeSize)
9849  {
9850  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
9851  }
9852  }
9853  break;
9854  case Node::TYPE_SPLIT:
9855  {
9856  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9857  const Node* const leftChild = node->split.leftChild;
9858  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9859  const Node* const rightChild = leftChild->buddy;
9860  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9861  }
9862  break;
9863  default:
9864  VMA_ASSERT(0);
9865  }
9866 }
9867 #endif // #if VMA_STATS_STRING_ENABLED
9868 
9869 
9871 // class VmaDeviceMemoryBlock
9872 
9873 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9874  m_pMetadata(VMA_NULL),
9875  m_MemoryTypeIndex(UINT32_MAX),
9876  m_Id(0),
9877  m_hMemory(VK_NULL_HANDLE),
9878  m_MapCount(0),
9879  m_pMappedData(VMA_NULL)
9880 {
9881 }
9882 
9883 void VmaDeviceMemoryBlock::Init(
9884  VmaAllocator hAllocator,
9885  uint32_t newMemoryTypeIndex,
9886  VkDeviceMemory newMemory,
9887  VkDeviceSize newSize,
9888  uint32_t id,
9889  uint32_t algorithm)
9890 {
9891  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9892 
9893  m_MemoryTypeIndex = newMemoryTypeIndex;
9894  m_Id = id;
9895  m_hMemory = newMemory;
9896 
9897  switch(algorithm)
9898  {
9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9901  break;
9903  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
9904  break;
9905  default:
9906  VMA_ASSERT(0);
9907  // Fall-through.
9908  case 0:
9909  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9910  }
9911  m_pMetadata->Init(newSize);
9912 }
9913 
9914 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9915 {
9916  // This is the most important assert in the entire library.
9917  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9918  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9919 
9920  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9921  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9922  m_hMemory = VK_NULL_HANDLE;
9923 
9924  vma_delete(allocator, m_pMetadata);
9925  m_pMetadata = VMA_NULL;
9926 }
9927 
9928 bool VmaDeviceMemoryBlock::Validate() const
9929 {
9930  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
9931  (m_pMetadata->GetSize() != 0));
9932 
9933  return m_pMetadata->Validate();
9934 }
9935 
9936 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9937 {
9938  void* pData = nullptr;
9939  VkResult res = Map(hAllocator, 1, &pData);
9940  if(res != VK_SUCCESS)
9941  {
9942  return res;
9943  }
9944 
9945  res = m_pMetadata->CheckCorruption(pData);
9946 
9947  Unmap(hAllocator, 1);
9948 
9949  return res;
9950 }
9951 
9952 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9953 {
9954  if(count == 0)
9955  {
9956  return VK_SUCCESS;
9957  }
9958 
9959  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9960  if(m_MapCount != 0)
9961  {
9962  m_MapCount += count;
9963  VMA_ASSERT(m_pMappedData != VMA_NULL);
9964  if(ppData != VMA_NULL)
9965  {
9966  *ppData = m_pMappedData;
9967  }
9968  return VK_SUCCESS;
9969  }
9970  else
9971  {
9972  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9973  hAllocator->m_hDevice,
9974  m_hMemory,
9975  0, // offset
9976  VK_WHOLE_SIZE,
9977  0, // flags
9978  &m_pMappedData);
9979  if(result == VK_SUCCESS)
9980  {
9981  if(ppData != VMA_NULL)
9982  {
9983  *ppData = m_pMappedData;
9984  }
9985  m_MapCount = count;
9986  }
9987  return result;
9988  }
9989 }
9990 
9991 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9992 {
9993  if(count == 0)
9994  {
9995  return;
9996  }
9997 
9998  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9999  if(m_MapCount >= count)
10000  {
10001  m_MapCount -= count;
10002  if(m_MapCount == 0)
10003  {
10004  m_pMappedData = VMA_NULL;
10005  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
10006  }
10007  }
10008  else
10009  {
10010  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
10011  }
10012 }
10013 
10014 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10015 {
10016  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10017  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10018 
10019  void* pData;
10020  VkResult res = Map(hAllocator, 1, &pData);
10021  if(res != VK_SUCCESS)
10022  {
10023  return res;
10024  }
10025 
10026  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10027  VmaWriteMagicValue(pData, allocOffset + allocSize);
10028 
10029  Unmap(hAllocator, 1);
10030 
10031  return VK_SUCCESS;
10032 }
10033 
10034 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10035 {
10036  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10037  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10038 
10039  void* pData;
10040  VkResult res = Map(hAllocator, 1, &pData);
10041  if(res != VK_SUCCESS)
10042  {
10043  return res;
10044  }
10045 
10046  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10047  {
10048  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10049  }
10050  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10051  {
10052  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10053  }
10054 
10055  Unmap(hAllocator, 1);
10056 
10057  return VK_SUCCESS;
10058 }
10059 
10060 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10061  const VmaAllocator hAllocator,
10062  const VmaAllocation hAllocation,
10063  VkBuffer hBuffer)
10064 {
10065  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10066  hAllocation->GetBlock() == this);
10067  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10068  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10069  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10070  hAllocator->m_hDevice,
10071  hBuffer,
10072  m_hMemory,
10073  hAllocation->GetOffset());
10074 }
10075 
10076 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10077  const VmaAllocator hAllocator,
10078  const VmaAllocation hAllocation,
10079  VkImage hImage)
10080 {
10081  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10082  hAllocation->GetBlock() == this);
10083  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10084  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10085  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10086  hAllocator->m_hDevice,
10087  hImage,
10088  m_hMemory,
10089  hAllocation->GetOffset());
10090 }
10091 
10092 static void InitStatInfo(VmaStatInfo& outInfo)
10093 {
10094  memset(&outInfo, 0, sizeof(outInfo));
10095  outInfo.allocationSizeMin = UINT64_MAX;
10096  outInfo.unusedRangeSizeMin = UINT64_MAX;
10097 }
10098 
10099 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10100 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10101 {
10102  inoutInfo.blockCount += srcInfo.blockCount;
10103  inoutInfo.allocationCount += srcInfo.allocationCount;
10104  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10105  inoutInfo.usedBytes += srcInfo.usedBytes;
10106  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10107  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10108  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10109  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10110  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10111 }
10112 
10113 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10114 {
10115  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10116  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10117  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10118  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10119 }
10120 
10121 VmaPool_T::VmaPool_T(
10122  VmaAllocator hAllocator,
10123  const VmaPoolCreateInfo& createInfo,
10124  VkDeviceSize preferredBlockSize) :
10125  m_BlockVector(
10126  hAllocator,
10127  createInfo.memoryTypeIndex,
10128  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10129  createInfo.minBlockCount,
10130  createInfo.maxBlockCount,
10131  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10132  createInfo.frameInUseCount,
10133  true, // isCustomPool
10134  createInfo.blockSize != 0, // explicitBlockSize
10135  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10136  m_Id(0)
10137 {
10138 }
10139 
10140 VmaPool_T::~VmaPool_T()
10141 {
10142 }
10143 
10144 #if VMA_STATS_STRING_ENABLED
10145 
10146 #endif // #if VMA_STATS_STRING_ENABLED
10147 
10148 VmaBlockVector::VmaBlockVector(
10149  VmaAllocator hAllocator,
10150  uint32_t memoryTypeIndex,
10151  VkDeviceSize preferredBlockSize,
10152  size_t minBlockCount,
10153  size_t maxBlockCount,
10154  VkDeviceSize bufferImageGranularity,
10155  uint32_t frameInUseCount,
10156  bool isCustomPool,
10157  bool explicitBlockSize,
10158  uint32_t algorithm) :
10159  m_hAllocator(hAllocator),
10160  m_MemoryTypeIndex(memoryTypeIndex),
10161  m_PreferredBlockSize(preferredBlockSize),
10162  m_MinBlockCount(minBlockCount),
10163  m_MaxBlockCount(maxBlockCount),
10164  m_BufferImageGranularity(bufferImageGranularity),
10165  m_FrameInUseCount(frameInUseCount),
10166  m_IsCustomPool(isCustomPool),
10167  m_ExplicitBlockSize(explicitBlockSize),
10168  m_Algorithm(algorithm),
10169  m_HasEmptyBlock(false),
10170  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10171  m_pDefragmentator(VMA_NULL),
10172  m_NextBlockId(0)
10173 {
10174 }
10175 
10176 VmaBlockVector::~VmaBlockVector()
10177 {
10178  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10179 
10180  for(size_t i = m_Blocks.size(); i--; )
10181  {
10182  m_Blocks[i]->Destroy(m_hAllocator);
10183  vma_delete(m_hAllocator, m_Blocks[i]);
10184  }
10185 }
10186 
10187 VkResult VmaBlockVector::CreateMinBlocks()
10188 {
10189  for(size_t i = 0; i < m_MinBlockCount; ++i)
10190  {
10191  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10192  if(res != VK_SUCCESS)
10193  {
10194  return res;
10195  }
10196  }
10197  return VK_SUCCESS;
10198 }
10199 
10200 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10201 {
10202  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10203 
10204  const size_t blockCount = m_Blocks.size();
10205 
10206  pStats->size = 0;
10207  pStats->unusedSize = 0;
10208  pStats->allocationCount = 0;
10209  pStats->unusedRangeCount = 0;
10210  pStats->unusedRangeSizeMax = 0;
10211  pStats->blockCount = blockCount;
10212 
10213  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10214  {
10215  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10216  VMA_ASSERT(pBlock);
10217  VMA_HEAVY_ASSERT(pBlock->Validate());
10218  pBlock->m_pMetadata->AddPoolStats(*pStats);
10219  }
10220 }
10221 
10222 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10223 {
10224  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10225  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10226  (VMA_DEBUG_MARGIN > 0) &&
10227  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10228 }
10229 
10230 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10231 
10232 VkResult VmaBlockVector::Allocate(
10233  VmaPool hCurrentPool,
10234  uint32_t currentFrameIndex,
10235  VkDeviceSize size,
10236  VkDeviceSize alignment,
10237  const VmaAllocationCreateInfo& createInfo,
10238  VmaSuballocationType suballocType,
10239  VmaAllocation* pAllocation)
10240 {
10241  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10242  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10243  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10244  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10245  const bool canCreateNewBlock =
10246  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10247  (m_Blocks.size() < m_MaxBlockCount);
10248  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10249 
10250  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10251  // Which in turn is available only when maxBlockCount = 1.
10252  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10253  {
10254  canMakeOtherLost = false;
10255  }
10256 
10257  // Upper address can only be used with linear allocator and within single memory block.
10258  if(isUpperAddress &&
10259  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10260  {
10261  return VK_ERROR_FEATURE_NOT_PRESENT;
10262  }
10263 
10264  // Validate strategy.
10265  switch(strategy)
10266  {
10267  case 0:
10269  break;
10273  break;
10274  default:
10275  return VK_ERROR_FEATURE_NOT_PRESENT;
10276  }
10277 
10278  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10279  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10280  {
10281  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10282  }
10283 
10284  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10285 
10286  /*
10287  Under certain condition, this whole section can be skipped for optimization, so
10288  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10289  e.g. for custom pools with linear algorithm.
10290  */
10291  if(!canMakeOtherLost || canCreateNewBlock)
10292  {
10293  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10294  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10296 
10297  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10298  {
10299  // Use only last block.
10300  if(!m_Blocks.empty())
10301  {
10302  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10303  VMA_ASSERT(pCurrBlock);
10304  VkResult res = AllocateFromBlock(
10305  pCurrBlock,
10306  hCurrentPool,
10307  currentFrameIndex,
10308  size,
10309  alignment,
10310  allocFlagsCopy,
10311  createInfo.pUserData,
10312  suballocType,
10313  strategy,
10314  pAllocation);
10315  if(res == VK_SUCCESS)
10316  {
10317  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10318  return VK_SUCCESS;
10319  }
10320  }
10321  }
10322  else
10323  {
10325  {
10326  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10327  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10328  {
10329  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10330  VMA_ASSERT(pCurrBlock);
10331  VkResult res = AllocateFromBlock(
10332  pCurrBlock,
10333  hCurrentPool,
10334  currentFrameIndex,
10335  size,
10336  alignment,
10337  allocFlagsCopy,
10338  createInfo.pUserData,
10339  suballocType,
10340  strategy,
10341  pAllocation);
10342  if(res == VK_SUCCESS)
10343  {
10344  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10345  return VK_SUCCESS;
10346  }
10347  }
10348  }
10349  else // WORST_FIT, FIRST_FIT
10350  {
10351  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10352  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10353  {
10354  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10355  VMA_ASSERT(pCurrBlock);
10356  VkResult res = AllocateFromBlock(
10357  pCurrBlock,
10358  hCurrentPool,
10359  currentFrameIndex,
10360  size,
10361  alignment,
10362  allocFlagsCopy,
10363  createInfo.pUserData,
10364  suballocType,
10365  strategy,
10366  pAllocation);
10367  if(res == VK_SUCCESS)
10368  {
10369  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10370  return VK_SUCCESS;
10371  }
10372  }
10373  }
10374  }
10375 
10376  // 2. Try to create new block.
10377  if(canCreateNewBlock)
10378  {
10379  // Calculate optimal size for new block.
10380  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10381  uint32_t newBlockSizeShift = 0;
10382  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10383 
10384  if(!m_ExplicitBlockSize)
10385  {
10386  // Allocate 1/8, 1/4, 1/2 as first blocks.
10387  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10388  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10389  {
10390  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10391  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10392  {
10393  newBlockSize = smallerNewBlockSize;
10394  ++newBlockSizeShift;
10395  }
10396  else
10397  {
10398  break;
10399  }
10400  }
10401  }
10402 
10403  size_t newBlockIndex = 0;
10404  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10405  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10406  if(!m_ExplicitBlockSize)
10407  {
10408  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10409  {
10410  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10411  if(smallerNewBlockSize >= size)
10412  {
10413  newBlockSize = smallerNewBlockSize;
10414  ++newBlockSizeShift;
10415  res = CreateBlock(newBlockSize, &newBlockIndex);
10416  }
10417  else
10418  {
10419  break;
10420  }
10421  }
10422  }
10423 
10424  if(res == VK_SUCCESS)
10425  {
10426  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10427  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10428 
10429  res = AllocateFromBlock(
10430  pBlock,
10431  hCurrentPool,
10432  currentFrameIndex,
10433  size,
10434  alignment,
10435  allocFlagsCopy,
10436  createInfo.pUserData,
10437  suballocType,
10438  strategy,
10439  pAllocation);
10440  if(res == VK_SUCCESS)
10441  {
10442  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10443  return VK_SUCCESS;
10444  }
10445  else
10446  {
10447  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10449  }
10450  }
10451  }
10452  }
10453 
10454  // 3. Try to allocate from existing blocks with making other allocations lost.
10455  if(canMakeOtherLost)
10456  {
10457  uint32_t tryIndex = 0;
10458  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10459  {
10460  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10461  VmaAllocationRequest bestRequest = {};
10462  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10463 
10464  // 1. Search existing allocations.
10466  {
10467  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10468  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10469  {
10470  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10471  VMA_ASSERT(pCurrBlock);
10472  VmaAllocationRequest currRequest = {};
10473  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10474  currentFrameIndex,
10475  m_FrameInUseCount,
10476  m_BufferImageGranularity,
10477  size,
10478  alignment,
10479  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10480  suballocType,
10481  canMakeOtherLost,
10482  strategy,
10483  &currRequest))
10484  {
10485  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10486  if(pBestRequestBlock == VMA_NULL ||
10487  currRequestCost < bestRequestCost)
10488  {
10489  pBestRequestBlock = pCurrBlock;
10490  bestRequest = currRequest;
10491  bestRequestCost = currRequestCost;
10492 
10493  if(bestRequestCost == 0)
10494  {
10495  break;
10496  }
10497  }
10498  }
10499  }
10500  }
10501  else // WORST_FIT, FIRST_FIT
10502  {
10503  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10504  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10505  {
10506  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10507  VMA_ASSERT(pCurrBlock);
10508  VmaAllocationRequest currRequest = {};
10509  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10510  currentFrameIndex,
10511  m_FrameInUseCount,
10512  m_BufferImageGranularity,
10513  size,
10514  alignment,
10515  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10516  suballocType,
10517  canMakeOtherLost,
10518  strategy,
10519  &currRequest))
10520  {
10521  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10522  if(pBestRequestBlock == VMA_NULL ||
10523  currRequestCost < bestRequestCost ||
10525  {
10526  pBestRequestBlock = pCurrBlock;
10527  bestRequest = currRequest;
10528  bestRequestCost = currRequestCost;
10529 
10530  if(bestRequestCost == 0 ||
10532  {
10533  break;
10534  }
10535  }
10536  }
10537  }
10538  }
10539 
10540  if(pBestRequestBlock != VMA_NULL)
10541  {
10542  if(mapped)
10543  {
10544  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10545  if(res != VK_SUCCESS)
10546  {
10547  return res;
10548  }
10549  }
10550 
10551  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10552  currentFrameIndex,
10553  m_FrameInUseCount,
10554  &bestRequest))
10555  {
10556  // We no longer have an empty Allocation.
10557  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10558  {
10559  m_HasEmptyBlock = false;
10560  }
10561  // Allocate from this pBlock.
10562  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10563  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10564  (*pAllocation)->InitBlockAllocation(
10565  hCurrentPool,
10566  pBestRequestBlock,
10567  bestRequest.offset,
10568  alignment,
10569  size,
10570  suballocType,
10571  mapped,
10572  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10573  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10574  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10575  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10576  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10577  {
10578  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10579  }
10580  if(IsCorruptionDetectionEnabled())
10581  {
10582  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10583  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10584  }
10585  return VK_SUCCESS;
10586  }
10587  // else: Some allocations must have been touched while we are here. Next try.
10588  }
10589  else
10590  {
10591  // Could not find place in any of the blocks - break outer loop.
10592  break;
10593  }
10594  }
10595  /* Maximum number of tries exceeded - a very unlike event when many other
10596  threads are simultaneously touching allocations making it impossible to make
10597  lost at the same time as we try to allocate. */
10598  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10599  {
10600  return VK_ERROR_TOO_MANY_OBJECTS;
10601  }
10602  }
10603 
10604  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10605 }
10606 
10607 void VmaBlockVector::Free(
10608  VmaAllocation hAllocation)
10609 {
10610  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10611 
10612  // Scope for lock.
10613  {
10614  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10615 
10616  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10617 
10618  if(IsCorruptionDetectionEnabled())
10619  {
10620  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10621  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10622  }
10623 
10624  if(hAllocation->IsPersistentMap())
10625  {
10626  pBlock->Unmap(m_hAllocator, 1);
10627  }
10628 
10629  pBlock->m_pMetadata->Free(hAllocation);
10630  VMA_HEAVY_ASSERT(pBlock->Validate());
10631 
10632  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10633 
10634  // pBlock became empty after this deallocation.
10635  if(pBlock->m_pMetadata->IsEmpty())
10636  {
10637  // Already has empty Allocation. We don't want to have two, so delete this one.
10638  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10639  {
10640  pBlockToDelete = pBlock;
10641  Remove(pBlock);
10642  }
10643  // We now have first empty block.
10644  else
10645  {
10646  m_HasEmptyBlock = true;
10647  }
10648  }
10649  // pBlock didn't become empty, but we have another empty block - find and free that one.
10650  // (This is optional, heuristics.)
10651  else if(m_HasEmptyBlock)
10652  {
10653  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10654  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10655  {
10656  pBlockToDelete = pLastBlock;
10657  m_Blocks.pop_back();
10658  m_HasEmptyBlock = false;
10659  }
10660  }
10661 
10662  IncrementallySortBlocks();
10663  }
10664 
10665  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10666  // lock, for performance reason.
10667  if(pBlockToDelete != VMA_NULL)
10668  {
10669  VMA_DEBUG_LOG(" Deleted empty allocation");
10670  pBlockToDelete->Destroy(m_hAllocator);
10671  vma_delete(m_hAllocator, pBlockToDelete);
10672  }
10673 }
10674 
10675 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10676 {
10677  VkDeviceSize result = 0;
10678  for(size_t i = m_Blocks.size(); i--; )
10679  {
10680  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10681  if(result >= m_PreferredBlockSize)
10682  {
10683  break;
10684  }
10685  }
10686  return result;
10687 }
10688 
10689 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10690 {
10691  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10692  {
10693  if(m_Blocks[blockIndex] == pBlock)
10694  {
10695  VmaVectorRemove(m_Blocks, blockIndex);
10696  return;
10697  }
10698  }
10699  VMA_ASSERT(0);
10700 }
10701 
10702 void VmaBlockVector::IncrementallySortBlocks()
10703 {
10704  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10705  {
10706  // Bubble sort only until first swap.
10707  for(size_t i = 1; i < m_Blocks.size(); ++i)
10708  {
10709  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10710  {
10711  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10712  return;
10713  }
10714  }
10715  }
10716 }
10717 
10718 VkResult VmaBlockVector::AllocateFromBlock(
10719  VmaDeviceMemoryBlock* pBlock,
10720  VmaPool hCurrentPool,
10721  uint32_t currentFrameIndex,
10722  VkDeviceSize size,
10723  VkDeviceSize alignment,
10724  VmaAllocationCreateFlags allocFlags,
10725  void* pUserData,
10726  VmaSuballocationType suballocType,
10727  uint32_t strategy,
10728  VmaAllocation* pAllocation)
10729 {
10730  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10731  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10732  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10733  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10734 
10735  VmaAllocationRequest currRequest = {};
10736  if(pBlock->m_pMetadata->CreateAllocationRequest(
10737  currentFrameIndex,
10738  m_FrameInUseCount,
10739  m_BufferImageGranularity,
10740  size,
10741  alignment,
10742  isUpperAddress,
10743  suballocType,
10744  false, // canMakeOtherLost
10745  strategy,
10746  &currRequest))
10747  {
10748  // Allocate from pCurrBlock.
10749  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10750 
10751  if(mapped)
10752  {
10753  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10754  if(res != VK_SUCCESS)
10755  {
10756  return res;
10757  }
10758  }
10759 
10760  // We no longer have an empty Allocation.
10761  if(pBlock->m_pMetadata->IsEmpty())
10762  {
10763  m_HasEmptyBlock = false;
10764  }
10765 
10766  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10767  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10768  (*pAllocation)->InitBlockAllocation(
10769  hCurrentPool,
10770  pBlock,
10771  currRequest.offset,
10772  alignment,
10773  size,
10774  suballocType,
10775  mapped,
10776  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10777  VMA_HEAVY_ASSERT(pBlock->Validate());
10778  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10779  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10780  {
10781  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10782  }
10783  if(IsCorruptionDetectionEnabled())
10784  {
10785  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10786  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10787  }
10788  return VK_SUCCESS;
10789  }
10790  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10791 }
10792 
10793 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10794 {
10795  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10796  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10797  allocInfo.allocationSize = blockSize;
10798  VkDeviceMemory mem = VK_NULL_HANDLE;
10799  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10800  if(res < 0)
10801  {
10802  return res;
10803  }
10804 
10805  // New VkDeviceMemory successfully created.
10806 
10807  // Create new Allocation for it.
10808  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10809  pBlock->Init(
10810  m_hAllocator,
10811  m_MemoryTypeIndex,
10812  mem,
10813  allocInfo.allocationSize,
10814  m_NextBlockId++,
10815  m_Algorithm);
10816 
10817  m_Blocks.push_back(pBlock);
10818  if(pNewBlockIndex != VMA_NULL)
10819  {
10820  *pNewBlockIndex = m_Blocks.size() - 1;
10821  }
10822 
10823  return VK_SUCCESS;
10824 }
10825 
10826 #if VMA_STATS_STRING_ENABLED
10827 
10828 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10829 {
10830  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10831 
10832  json.BeginObject();
10833 
10834  if(m_IsCustomPool)
10835  {
10836  json.WriteString("MemoryTypeIndex");
10837  json.WriteNumber(m_MemoryTypeIndex);
10838 
10839  json.WriteString("BlockSize");
10840  json.WriteNumber(m_PreferredBlockSize);
10841 
10842  json.WriteString("BlockCount");
10843  json.BeginObject(true);
10844  if(m_MinBlockCount > 0)
10845  {
10846  json.WriteString("Min");
10847  json.WriteNumber((uint64_t)m_MinBlockCount);
10848  }
10849  if(m_MaxBlockCount < SIZE_MAX)
10850  {
10851  json.WriteString("Max");
10852  json.WriteNumber((uint64_t)m_MaxBlockCount);
10853  }
10854  json.WriteString("Cur");
10855  json.WriteNumber((uint64_t)m_Blocks.size());
10856  json.EndObject();
10857 
10858  if(m_FrameInUseCount > 0)
10859  {
10860  json.WriteString("FrameInUseCount");
10861  json.WriteNumber(m_FrameInUseCount);
10862  }
10863 
10864  if(m_Algorithm != 0)
10865  {
10866  json.WriteString("Algorithm");
10867  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10868  }
10869  }
10870  else
10871  {
10872  json.WriteString("PreferredBlockSize");
10873  json.WriteNumber(m_PreferredBlockSize);
10874  }
10875 
10876  json.WriteString("Blocks");
10877  json.BeginObject();
10878  for(size_t i = 0; i < m_Blocks.size(); ++i)
10879  {
10880  json.BeginString();
10881  json.ContinueString(m_Blocks[i]->GetId());
10882  json.EndString();
10883 
10884  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10885  }
10886  json.EndObject();
10887 
10888  json.EndObject();
10889 }
10890 
10891 #endif // #if VMA_STATS_STRING_ENABLED
10892 
10893 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10894  VmaAllocator hAllocator,
10895  uint32_t currentFrameIndex)
10896 {
10897  if(m_pDefragmentator == VMA_NULL)
10898  {
10899  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
10900  hAllocator,
10901  this,
10902  currentFrameIndex);
10903  }
10904 
10905  return m_pDefragmentator;
10906 }
10907 
10908 VkResult VmaBlockVector::Defragment(
10909  VmaDefragmentationStats* pDefragmentationStats,
10910  VkDeviceSize& maxBytesToMove,
10911  uint32_t& maxAllocationsToMove)
10912 {
10913  if(m_pDefragmentator == VMA_NULL)
10914  {
10915  return VK_SUCCESS;
10916  }
10917 
10918  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10919 
10920  // Defragment.
10921  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
10922 
10923  // Accumulate statistics.
10924  if(pDefragmentationStats != VMA_NULL)
10925  {
10926  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
10927  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
10928  pDefragmentationStats->bytesMoved += bytesMoved;
10929  pDefragmentationStats->allocationsMoved += allocationsMoved;
10930  VMA_ASSERT(bytesMoved <= maxBytesToMove);
10931  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
10932  maxBytesToMove -= bytesMoved;
10933  maxAllocationsToMove -= allocationsMoved;
10934  }
10935 
10936  // Free empty blocks.
10937  m_HasEmptyBlock = false;
10938  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10939  {
10940  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
10941  if(pBlock->m_pMetadata->IsEmpty())
10942  {
10943  if(m_Blocks.size() > m_MinBlockCount)
10944  {
10945  if(pDefragmentationStats != VMA_NULL)
10946  {
10947  ++pDefragmentationStats->deviceMemoryBlocksFreed;
10948  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
10949  }
10950 
10951  VmaVectorRemove(m_Blocks, blockIndex);
10952  pBlock->Destroy(m_hAllocator);
10953  vma_delete(m_hAllocator, pBlock);
10954  }
10955  else
10956  {
10957  m_HasEmptyBlock = true;
10958  }
10959  }
10960  }
10961 
10962  return result;
10963 }
10964 
10965 void VmaBlockVector::DestroyDefragmentator()
10966 {
10967  if(m_pDefragmentator != VMA_NULL)
10968  {
10969  vma_delete(m_hAllocator, m_pDefragmentator);
10970  m_pDefragmentator = VMA_NULL;
10971  }
10972 }
10973 
10974 void VmaBlockVector::MakePoolAllocationsLost(
10975  uint32_t currentFrameIndex,
10976  size_t* pLostAllocationCount)
10977 {
10978  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10979  size_t lostAllocationCount = 0;
10980  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10981  {
10982  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10983  VMA_ASSERT(pBlock);
10984  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10985  }
10986  if(pLostAllocationCount != VMA_NULL)
10987  {
10988  *pLostAllocationCount = lostAllocationCount;
10989  }
10990 }
10991 
10992 VkResult VmaBlockVector::CheckCorruption()
10993 {
10994  if(!IsCorruptionDetectionEnabled())
10995  {
10996  return VK_ERROR_FEATURE_NOT_PRESENT;
10997  }
10998 
10999  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11000  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11001  {
11002  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11003  VMA_ASSERT(pBlock);
11004  VkResult res = pBlock->CheckCorruption(m_hAllocator);
11005  if(res != VK_SUCCESS)
11006  {
11007  return res;
11008  }
11009  }
11010  return VK_SUCCESS;
11011 }
11012 
11013 void VmaBlockVector::AddStats(VmaStats* pStats)
11014 {
11015  const uint32_t memTypeIndex = m_MemoryTypeIndex;
11016  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11017 
11018  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11019 
11020  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11021  {
11022  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11023  VMA_ASSERT(pBlock);
11024  VMA_HEAVY_ASSERT(pBlock->Validate());
11025  VmaStatInfo allocationStatInfo;
11026  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11027  VmaAddStatInfo(pStats->total, allocationStatInfo);
11028  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11029  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11030  }
11031 }
11032 
11034 // VmaDefragmentator members definition
11035 
11036 VmaDefragmentator::VmaDefragmentator(
11037  VmaAllocator hAllocator,
11038  VmaBlockVector* pBlockVector,
11039  uint32_t currentFrameIndex) :
11040  m_hAllocator(hAllocator),
11041  m_pBlockVector(pBlockVector),
11042  m_CurrentFrameIndex(currentFrameIndex),
11043  m_BytesMoved(0),
11044  m_AllocationsMoved(0),
11045  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11046  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11047 {
11048  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11049 }
11050 
11051 VmaDefragmentator::~VmaDefragmentator()
11052 {
11053  for(size_t i = m_Blocks.size(); i--; )
11054  {
11055  vma_delete(m_hAllocator, m_Blocks[i]);
11056  }
11057 }
11058 
11059 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11060 {
11061  AllocationInfo allocInfo;
11062  allocInfo.m_hAllocation = hAlloc;
11063  allocInfo.m_pChanged = pChanged;
11064  m_Allocations.push_back(allocInfo);
11065 }
11066 
11067 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11068 {
11069  // It has already been mapped for defragmentation.
11070  if(m_pMappedDataForDefragmentation)
11071  {
11072  *ppMappedData = m_pMappedDataForDefragmentation;
11073  return VK_SUCCESS;
11074  }
11075 
11076  // It is originally mapped.
11077  if(m_pBlock->GetMappedData())
11078  {
11079  *ppMappedData = m_pBlock->GetMappedData();
11080  return VK_SUCCESS;
11081  }
11082 
11083  // Map on first usage.
11084  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11085  *ppMappedData = m_pMappedDataForDefragmentation;
11086  return res;
11087 }
11088 
11089 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11090 {
11091  if(m_pMappedDataForDefragmentation != VMA_NULL)
11092  {
11093  m_pBlock->Unmap(hAllocator, 1);
11094  }
11095 }
11096 
11097 VkResult VmaDefragmentator::DefragmentRound(
11098  VkDeviceSize maxBytesToMove,
11099  uint32_t maxAllocationsToMove)
11100 {
11101  if(m_Blocks.empty())
11102  {
11103  return VK_SUCCESS;
11104  }
11105 
11106  size_t srcBlockIndex = m_Blocks.size() - 1;
11107  size_t srcAllocIndex = SIZE_MAX;
11108  for(;;)
11109  {
11110  // 1. Find next allocation to move.
11111  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11112  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11113  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11114  {
11115  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11116  {
11117  // Finished: no more allocations to process.
11118  if(srcBlockIndex == 0)
11119  {
11120  return VK_SUCCESS;
11121  }
11122  else
11123  {
11124  --srcBlockIndex;
11125  srcAllocIndex = SIZE_MAX;
11126  }
11127  }
11128  else
11129  {
11130  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11131  }
11132  }
11133 
11134  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11135  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11136 
11137  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11138  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11139  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11140  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11141 
11142  // 2. Try to find new place for this allocation in preceding or current block.
11143  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11144  {
11145  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11146  VmaAllocationRequest dstAllocRequest;
11147  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11148  m_CurrentFrameIndex,
11149  m_pBlockVector->GetFrameInUseCount(),
11150  m_pBlockVector->GetBufferImageGranularity(),
11151  size,
11152  alignment,
11153  false, // upperAddress
11154  suballocType,
11155  false, // canMakeOtherLost
11157  &dstAllocRequest) &&
11158  MoveMakesSense(
11159  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11160  {
11161  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11162 
11163  // Reached limit on number of allocations or bytes to move.
11164  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11165  (m_BytesMoved + size > maxBytesToMove))
11166  {
11167  return VK_INCOMPLETE;
11168  }
11169 
11170  void* pDstMappedData = VMA_NULL;
11171  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11172  if(res != VK_SUCCESS)
11173  {
11174  return res;
11175  }
11176 
11177  void* pSrcMappedData = VMA_NULL;
11178  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11179  if(res != VK_SUCCESS)
11180  {
11181  return res;
11182  }
11183 
11184  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11185  memcpy(
11186  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11187  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11188  static_cast<size_t>(size));
11189 
11190  if(VMA_DEBUG_MARGIN > 0)
11191  {
11192  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11193  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11194  }
11195 
11196  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11197  dstAllocRequest,
11198  suballocType,
11199  size,
11200  false, // upperAddress
11201  allocInfo.m_hAllocation);
11202  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11203 
11204  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11205 
11206  if(allocInfo.m_pChanged != VMA_NULL)
11207  {
11208  *allocInfo.m_pChanged = VK_TRUE;
11209  }
11210 
11211  ++m_AllocationsMoved;
11212  m_BytesMoved += size;
11213 
11214  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11215 
11216  break;
11217  }
11218  }
11219 
11220  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11221 
11222  if(srcAllocIndex > 0)
11223  {
11224  --srcAllocIndex;
11225  }
11226  else
11227  {
11228  if(srcBlockIndex > 0)
11229  {
11230  --srcBlockIndex;
11231  srcAllocIndex = SIZE_MAX;
11232  }
11233  else
11234  {
11235  return VK_SUCCESS;
11236  }
11237  }
11238  }
11239 }
11240 
11241 VkResult VmaDefragmentator::Defragment(
11242  VkDeviceSize maxBytesToMove,
11243  uint32_t maxAllocationsToMove)
11244 {
11245  if(m_Allocations.empty())
11246  {
11247  return VK_SUCCESS;
11248  }
11249 
11250  // Create block info for each block.
11251  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11252  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11253  {
11254  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11255  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11256  m_Blocks.push_back(pBlockInfo);
11257  }
11258 
11259  // Sort them by m_pBlock pointer value.
11260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11261 
11262  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11263  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11264  {
11265  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11266  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11267  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11268  {
11269  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11270  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11271  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11272  {
11273  (*it)->m_Allocations.push_back(allocInfo);
11274  }
11275  else
11276  {
11277  VMA_ASSERT(0);
11278  }
11279  }
11280  }
11281  m_Allocations.clear();
11282 
11283  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11284  {
11285  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11286  pBlockInfo->CalcHasNonMovableAllocations();
11287  pBlockInfo->SortAllocationsBySizeDescecnding();
11288  }
11289 
11290  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11291  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11292 
11293  // Execute defragmentation rounds (the main part).
11294  VkResult result = VK_SUCCESS;
11295  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11296  {
11297  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11298  }
11299 
11300  // Unmap blocks that were mapped for defragmentation.
11301  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11302  {
11303  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11304  }
11305 
11306  return result;
11307 }
11308 
11309 bool VmaDefragmentator::MoveMakesSense(
11310  size_t dstBlockIndex, VkDeviceSize dstOffset,
11311  size_t srcBlockIndex, VkDeviceSize srcOffset)
11312 {
11313  if(dstBlockIndex < srcBlockIndex)
11314  {
11315  return true;
11316  }
11317  if(dstBlockIndex > srcBlockIndex)
11318  {
11319  return false;
11320  }
11321  if(dstOffset < srcOffset)
11322  {
11323  return true;
11324  }
11325  return false;
11326 }
11327 
11329 // VmaRecorder
11330 
11331 #if VMA_RECORDING_ENABLED
11332 
11333 VmaRecorder::VmaRecorder() :
11334  m_UseMutex(true),
11335  m_Flags(0),
11336  m_File(VMA_NULL),
11337  m_Freq(INT64_MAX),
11338  m_StartCounter(INT64_MAX)
11339 {
11340 }
11341 
11342 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11343 {
11344  m_UseMutex = useMutex;
11345  m_Flags = settings.flags;
11346 
11347  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11348  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11349 
11350  // Open file for writing.
11351  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11352  if(err != 0)
11353  {
11354  return VK_ERROR_INITIALIZATION_FAILED;
11355  }
11356 
11357  // Write header.
11358  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11359  fprintf(m_File, "%s\n", "1,3");
11360 
11361  return VK_SUCCESS;
11362 }
11363 
11364 VmaRecorder::~VmaRecorder()
11365 {
11366  if(m_File != VMA_NULL)
11367  {
11368  fclose(m_File);
11369  }
11370 }
11371 
11372 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11373 {
11374  CallParams callParams;
11375  GetBasicParams(callParams);
11376 
11377  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11378  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11379  Flush();
11380 }
11381 
11382 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11383 {
11384  CallParams callParams;
11385  GetBasicParams(callParams);
11386 
11387  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11388  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11389  Flush();
11390 }
11391 
11392 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11393 {
11394  CallParams callParams;
11395  GetBasicParams(callParams);
11396 
11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11398  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11399  createInfo.memoryTypeIndex,
11400  createInfo.flags,
11401  createInfo.blockSize,
11402  (uint64_t)createInfo.minBlockCount,
11403  (uint64_t)createInfo.maxBlockCount,
11404  createInfo.frameInUseCount,
11405  pool);
11406  Flush();
11407 }
11408 
11409 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11410 {
11411  CallParams callParams;
11412  GetBasicParams(callParams);
11413 
11414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11415  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11416  pool);
11417  Flush();
11418 }
11419 
11420 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11421  const VkMemoryRequirements& vkMemReq,
11422  const VmaAllocationCreateInfo& createInfo,
11423  VmaAllocation allocation)
11424 {
11425  CallParams callParams;
11426  GetBasicParams(callParams);
11427 
11428  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11429  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11430  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11431  vkMemReq.size,
11432  vkMemReq.alignment,
11433  vkMemReq.memoryTypeBits,
11434  createInfo.flags,
11435  createInfo.usage,
11436  createInfo.requiredFlags,
11437  createInfo.preferredFlags,
11438  createInfo.memoryTypeBits,
11439  createInfo.pool,
11440  allocation,
11441  userDataStr.GetString());
11442  Flush();
11443 }
11444 
11445 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11446  const VkMemoryRequirements& vkMemReq,
11447  bool requiresDedicatedAllocation,
11448  bool prefersDedicatedAllocation,
11449  const VmaAllocationCreateInfo& createInfo,
11450  VmaAllocation allocation)
11451 {
11452  CallParams callParams;
11453  GetBasicParams(callParams);
11454 
11455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11456  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11457  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11458  vkMemReq.size,
11459  vkMemReq.alignment,
11460  vkMemReq.memoryTypeBits,
11461  requiresDedicatedAllocation ? 1 : 0,
11462  prefersDedicatedAllocation ? 1 : 0,
11463  createInfo.flags,
11464  createInfo.usage,
11465  createInfo.requiredFlags,
11466  createInfo.preferredFlags,
11467  createInfo.memoryTypeBits,
11468  createInfo.pool,
11469  allocation,
11470  userDataStr.GetString());
11471  Flush();
11472 }
11473 
11474 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11475  const VkMemoryRequirements& vkMemReq,
11476  bool requiresDedicatedAllocation,
11477  bool prefersDedicatedAllocation,
11478  const VmaAllocationCreateInfo& createInfo,
11479  VmaAllocation allocation)
11480 {
11481  CallParams callParams;
11482  GetBasicParams(callParams);
11483 
11484  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11485  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11486  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11487  vkMemReq.size,
11488  vkMemReq.alignment,
11489  vkMemReq.memoryTypeBits,
11490  requiresDedicatedAllocation ? 1 : 0,
11491  prefersDedicatedAllocation ? 1 : 0,
11492  createInfo.flags,
11493  createInfo.usage,
11494  createInfo.requiredFlags,
11495  createInfo.preferredFlags,
11496  createInfo.memoryTypeBits,
11497  createInfo.pool,
11498  allocation,
11499  userDataStr.GetString());
11500  Flush();
11501 }
11502 
11503 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11504  VmaAllocation allocation)
11505 {
11506  CallParams callParams;
11507  GetBasicParams(callParams);
11508 
11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11510  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11511  allocation);
11512  Flush();
11513 }
11514 
11515 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11516  VmaAllocation allocation,
11517  const void* pUserData)
11518 {
11519  CallParams callParams;
11520  GetBasicParams(callParams);
11521 
11522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11523  UserDataString userDataStr(
11524  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11525  pUserData);
11526  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11527  allocation,
11528  userDataStr.GetString());
11529  Flush();
11530 }
11531 
11532 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11533  VmaAllocation allocation)
11534 {
11535  CallParams callParams;
11536  GetBasicParams(callParams);
11537 
11538  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11539  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11540  allocation);
11541  Flush();
11542 }
11543 
11544 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11545  VmaAllocation allocation)
11546 {
11547  CallParams callParams;
11548  GetBasicParams(callParams);
11549 
11550  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11551  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11552  allocation);
11553  Flush();
11554 }
11555 
11556 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11557  VmaAllocation allocation)
11558 {
11559  CallParams callParams;
11560  GetBasicParams(callParams);
11561 
11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11563  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11564  allocation);
11565  Flush();
11566 }
11567 
11568 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11569  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11570 {
11571  CallParams callParams;
11572  GetBasicParams(callParams);
11573 
11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11575  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11576  allocation,
11577  offset,
11578  size);
11579  Flush();
11580 }
11581 
11582 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11584 {
11585  CallParams callParams;
11586  GetBasicParams(callParams);
11587 
11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11589  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11590  allocation,
11591  offset,
11592  size);
11593  Flush();
11594 }
11595 
11596 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11597  const VkBufferCreateInfo& bufCreateInfo,
11598  const VmaAllocationCreateInfo& allocCreateInfo,
11599  VmaAllocation allocation)
11600 {
11601  CallParams callParams;
11602  GetBasicParams(callParams);
11603 
11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11605  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11606  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11607  bufCreateInfo.flags,
11608  bufCreateInfo.size,
11609  bufCreateInfo.usage,
11610  bufCreateInfo.sharingMode,
11611  allocCreateInfo.flags,
11612  allocCreateInfo.usage,
11613  allocCreateInfo.requiredFlags,
11614  allocCreateInfo.preferredFlags,
11615  allocCreateInfo.memoryTypeBits,
11616  allocCreateInfo.pool,
11617  allocation,
11618  userDataStr.GetString());
11619  Flush();
11620 }
11621 
11622 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11623  const VkImageCreateInfo& imageCreateInfo,
11624  const VmaAllocationCreateInfo& allocCreateInfo,
11625  VmaAllocation allocation)
11626 {
11627  CallParams callParams;
11628  GetBasicParams(callParams);
11629 
11630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11631  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11632  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11633  imageCreateInfo.flags,
11634  imageCreateInfo.imageType,
11635  imageCreateInfo.format,
11636  imageCreateInfo.extent.width,
11637  imageCreateInfo.extent.height,
11638  imageCreateInfo.extent.depth,
11639  imageCreateInfo.mipLevels,
11640  imageCreateInfo.arrayLayers,
11641  imageCreateInfo.samples,
11642  imageCreateInfo.tiling,
11643  imageCreateInfo.usage,
11644  imageCreateInfo.sharingMode,
11645  imageCreateInfo.initialLayout,
11646  allocCreateInfo.flags,
11647  allocCreateInfo.usage,
11648  allocCreateInfo.requiredFlags,
11649  allocCreateInfo.preferredFlags,
11650  allocCreateInfo.memoryTypeBits,
11651  allocCreateInfo.pool,
11652  allocation,
11653  userDataStr.GetString());
11654  Flush();
11655 }
11656 
11657 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11658  VmaAllocation allocation)
11659 {
11660  CallParams callParams;
11661  GetBasicParams(callParams);
11662 
11663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11665  allocation);
11666  Flush();
11667 }
11668 
11669 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11670  VmaAllocation allocation)
11671 {
11672  CallParams callParams;
11673  GetBasicParams(callParams);
11674 
11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11676  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11677  allocation);
11678  Flush();
11679 }
11680 
11681 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11682  VmaAllocation allocation)
11683 {
11684  CallParams callParams;
11685  GetBasicParams(callParams);
11686 
11687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11688  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11689  allocation);
11690  Flush();
11691 }
11692 
11693 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11694  VmaAllocation allocation)
11695 {
11696  CallParams callParams;
11697  GetBasicParams(callParams);
11698 
11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11700  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11701  allocation);
11702  Flush();
11703 }
11704 
11705 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11706  VmaPool pool)
11707 {
11708  CallParams callParams;
11709  GetBasicParams(callParams);
11710 
11711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11712  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11713  pool);
11714  Flush();
11715 }
11716 
11717 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11718 {
11719  if(pUserData != VMA_NULL)
11720  {
11721  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11722  {
11723  m_Str = (const char*)pUserData;
11724  }
11725  else
11726  {
11727  sprintf_s(m_PtrStr, "%p", pUserData);
11728  m_Str = m_PtrStr;
11729  }
11730  }
11731  else
11732  {
11733  m_Str = "";
11734  }
11735 }
11736 
11737 void VmaRecorder::WriteConfiguration(
11738  const VkPhysicalDeviceProperties& devProps,
11739  const VkPhysicalDeviceMemoryProperties& memProps,
11740  bool dedicatedAllocationExtensionEnabled)
11741 {
11742  fprintf(m_File, "Config,Begin\n");
11743 
11744  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11745  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11746  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11747  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11748  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11749  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11750 
11751  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11752  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11753  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11754 
11755  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11756  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11757  {
11758  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11759  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11760  }
11761  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11762  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11763  {
11764  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11765  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11766  }
11767 
11768  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11769 
11770  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11771  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11772  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11773  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11774  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11775  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11776  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11777  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11778  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11779 
11780  fprintf(m_File, "Config,End\n");
11781 }
11782 
11783 void VmaRecorder::GetBasicParams(CallParams& outParams)
11784 {
11785  outParams.threadId = GetCurrentThreadId();
11786 
11787  LARGE_INTEGER counter;
11788  QueryPerformanceCounter(&counter);
11789  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11790 }
11791 
11792 void VmaRecorder::Flush()
11793 {
11794  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11795  {
11796  fflush(m_File);
11797  }
11798 }
11799 
11800 #endif // #if VMA_RECORDING_ENABLED
11801 
11803 // VmaAllocator_T
11804 
11805 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11806  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11807  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11808  m_hDevice(pCreateInfo->device),
11809  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11810  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11811  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11812  m_PreferredLargeHeapBlockSize(0),
11813  m_PhysicalDevice(pCreateInfo->physicalDevice),
11814  m_CurrentFrameIndex(0),
11815  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11816  m_NextPoolId(0)
11818  ,m_pRecorder(VMA_NULL)
11819 #endif
11820 {
11821  if(VMA_DEBUG_DETECT_CORRUPTION)
11822  {
11823  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11824  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11825  }
11826 
11827  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11828 
11829 #if !(VMA_DEDICATED_ALLOCATION)
11831  {
11832  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11833  }
11834 #endif
11835 
11836  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11837  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11838  memset(&m_MemProps, 0, sizeof(m_MemProps));
11839 
11840  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11841  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11842 
11843  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11844  {
11845  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11846  }
11847 
11848  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11849  {
11850  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11851  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11852  }
11853 
11854  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11855 
11856  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11857  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11858 
11859  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11860  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11861  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11862  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11863 
11864  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11865  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11866 
11867  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11868  {
11869  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
11870  {
11871  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
11872  if(limit != VK_WHOLE_SIZE)
11873  {
11874  m_HeapSizeLimit[heapIndex] = limit;
11875  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
11876  {
11877  m_MemProps.memoryHeaps[heapIndex].size = limit;
11878  }
11879  }
11880  }
11881  }
11882 
11883  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11884  {
11885  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
11886 
11887  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
11888  this,
11889  memTypeIndex,
11890  preferredBlockSize,
11891  0,
11892  SIZE_MAX,
11893  GetBufferImageGranularity(),
11894  pCreateInfo->frameInUseCount,
11895  false, // isCustomPool
11896  false, // explicitBlockSize
11897  false); // linearAlgorithm
11898  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
11899  // becase minBlockCount is 0.
11900  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
11901 
11902  }
11903 }
11904 
11905 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
11906 {
11907  VkResult res = VK_SUCCESS;
11908 
11909  if(pCreateInfo->pRecordSettings != VMA_NULL &&
11910  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
11911  {
11912 #if VMA_RECORDING_ENABLED
11913  m_pRecorder = vma_new(this, VmaRecorder)();
11914  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
11915  if(res != VK_SUCCESS)
11916  {
11917  return res;
11918  }
11919  m_pRecorder->WriteConfiguration(
11920  m_PhysicalDeviceProperties,
11921  m_MemProps,
11922  m_UseKhrDedicatedAllocation);
11923  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
11924 #else
11925  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
11926  return VK_ERROR_FEATURE_NOT_PRESENT;
11927 #endif
11928  }
11929 
11930  return res;
11931 }
11932 
11933 VmaAllocator_T::~VmaAllocator_T()
11934 {
11935 #if VMA_RECORDING_ENABLED
11936  if(m_pRecorder != VMA_NULL)
11937  {
11938  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
11939  vma_delete(this, m_pRecorder);
11940  }
11941 #endif
11942 
11943  VMA_ASSERT(m_Pools.empty());
11944 
11945  for(size_t i = GetMemoryTypeCount(); i--; )
11946  {
11947  vma_delete(this, m_pDedicatedAllocations[i]);
11948  vma_delete(this, m_pBlockVectors[i]);
11949  }
11950 }
11951 
11952 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
11953 {
11954 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11955  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
11956  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
11957  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
11958  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
11959  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
11960  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
11961  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
11962  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
11963  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
11964  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
11965  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
11966  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
11967  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
11968  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
11969  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
11970  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
11971 #if VMA_DEDICATED_ALLOCATION
11972  if(m_UseKhrDedicatedAllocation)
11973  {
11974  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
11975  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11976  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11977  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11978  }
11979 #endif // #if VMA_DEDICATED_ALLOCATION
11980 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11981 
11982 #define VMA_COPY_IF_NOT_NULL(funcName) \
11983  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11984 
11985  if(pVulkanFunctions != VMA_NULL)
11986  {
11987  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11988  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11989  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11990  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11991  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11992  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11993  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11994  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11995  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11996  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11997  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11998  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11999  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
12000  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
12001  VMA_COPY_IF_NOT_NULL(vkCreateImage);
12002  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
12003 #if VMA_DEDICATED_ALLOCATION
12004  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
12005  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
12006 #endif
12007  }
12008 
12009 #undef VMA_COPY_IF_NOT_NULL
12010 
12011  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
12012  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
12013  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
12014  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
12015  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
12016  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12017  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12018  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12019  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12020  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12021  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12022  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12025  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12026  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12027  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12028  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12029 #if VMA_DEDICATED_ALLOCATION
12030  if(m_UseKhrDedicatedAllocation)
12031  {
12032  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12033  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12034  }
12035 #endif
12036 }
12037 
12038 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12039 {
12040  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12041  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12042  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12043  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12044 }
12045 
12046 VkResult VmaAllocator_T::AllocateMemoryOfType(
12047  VkDeviceSize size,
12048  VkDeviceSize alignment,
12049  bool dedicatedAllocation,
12050  VkBuffer dedicatedBuffer,
12051  VkImage dedicatedImage,
12052  const VmaAllocationCreateInfo& createInfo,
12053  uint32_t memTypeIndex,
12054  VmaSuballocationType suballocType,
12055  VmaAllocation* pAllocation)
12056 {
12057  VMA_ASSERT(pAllocation != VMA_NULL);
12058  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12059 
12060  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12061 
12062  // If memory type is not HOST_VISIBLE, disable MAPPED.
12063  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12064  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12065  {
12066  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12067  }
12068 
12069  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12070  VMA_ASSERT(blockVector);
12071 
12072  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12073  bool preferDedicatedMemory =
12074  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12075  dedicatedAllocation ||
12076  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12077  size > preferredBlockSize / 2;
12078 
12079  if(preferDedicatedMemory &&
12080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12081  finalCreateInfo.pool == VK_NULL_HANDLE)
12082  {
12084  }
12085 
12086  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12087  {
12088  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12089  {
12090  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12091  }
12092  else
12093  {
12094  return AllocateDedicatedMemory(
12095  size,
12096  suballocType,
12097  memTypeIndex,
12098  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12099  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12100  finalCreateInfo.pUserData,
12101  dedicatedBuffer,
12102  dedicatedImage,
12103  pAllocation);
12104  }
12105  }
12106  else
12107  {
12108  VkResult res = blockVector->Allocate(
12109  VK_NULL_HANDLE, // hCurrentPool
12110  m_CurrentFrameIndex.load(),
12111  size,
12112  alignment,
12113  finalCreateInfo,
12114  suballocType,
12115  pAllocation);
12116  if(res == VK_SUCCESS)
12117  {
12118  return res;
12119  }
12120 
12121  // 5. Try dedicated memory.
12122  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12123  {
12124  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12125  }
12126  else
12127  {
12128  res = AllocateDedicatedMemory(
12129  size,
12130  suballocType,
12131  memTypeIndex,
12132  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12133  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12134  finalCreateInfo.pUserData,
12135  dedicatedBuffer,
12136  dedicatedImage,
12137  pAllocation);
12138  if(res == VK_SUCCESS)
12139  {
12140  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12141  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12142  return VK_SUCCESS;
12143  }
12144  else
12145  {
12146  // Everything failed: Return error code.
12147  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12148  return res;
12149  }
12150  }
12151  }
12152 }
12153 
12154 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12155  VkDeviceSize size,
12156  VmaSuballocationType suballocType,
12157  uint32_t memTypeIndex,
12158  bool map,
12159  bool isUserDataString,
12160  void* pUserData,
12161  VkBuffer dedicatedBuffer,
12162  VkImage dedicatedImage,
12163  VmaAllocation* pAllocation)
12164 {
12165  VMA_ASSERT(pAllocation);
12166 
12167  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12168  allocInfo.memoryTypeIndex = memTypeIndex;
12169  allocInfo.allocationSize = size;
12170 
12171 #if VMA_DEDICATED_ALLOCATION
12172  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12173  if(m_UseKhrDedicatedAllocation)
12174  {
12175  if(dedicatedBuffer != VK_NULL_HANDLE)
12176  {
12177  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12178  dedicatedAllocInfo.buffer = dedicatedBuffer;
12179  allocInfo.pNext = &dedicatedAllocInfo;
12180  }
12181  else if(dedicatedImage != VK_NULL_HANDLE)
12182  {
12183  dedicatedAllocInfo.image = dedicatedImage;
12184  allocInfo.pNext = &dedicatedAllocInfo;
12185  }
12186  }
12187 #endif // #if VMA_DEDICATED_ALLOCATION
12188 
12189  // Allocate VkDeviceMemory.
12190  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12191  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12192  if(res < 0)
12193  {
12194  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12195  return res;
12196  }
12197 
12198  void* pMappedData = VMA_NULL;
12199  if(map)
12200  {
12201  res = (*m_VulkanFunctions.vkMapMemory)(
12202  m_hDevice,
12203  hMemory,
12204  0,
12205  VK_WHOLE_SIZE,
12206  0,
12207  &pMappedData);
12208  if(res < 0)
12209  {
12210  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12211  FreeVulkanMemory(memTypeIndex, size, hMemory);
12212  return res;
12213  }
12214  }
12215 
12216  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12217  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12218  (*pAllocation)->SetUserData(this, pUserData);
12219  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12220  {
12221  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12222  }
12223 
12224  // Register it in m_pDedicatedAllocations.
12225  {
12226  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12227  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12228  VMA_ASSERT(pDedicatedAllocations);
12229  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12230  }
12231 
12232  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12233 
12234  return VK_SUCCESS;
12235 }
12236 
12237 void VmaAllocator_T::GetBufferMemoryRequirements(
12238  VkBuffer hBuffer,
12239  VkMemoryRequirements& memReq,
12240  bool& requiresDedicatedAllocation,
12241  bool& prefersDedicatedAllocation) const
12242 {
12243 #if VMA_DEDICATED_ALLOCATION
12244  if(m_UseKhrDedicatedAllocation)
12245  {
12246  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12247  memReqInfo.buffer = hBuffer;
12248 
12249  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12250 
12251  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12252  memReq2.pNext = &memDedicatedReq;
12253 
12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12255 
12256  memReq = memReq2.memoryRequirements;
12257  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12258  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12259  }
12260  else
12261 #endif // #if VMA_DEDICATED_ALLOCATION
12262  {
12263  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12264  requiresDedicatedAllocation = false;
12265  prefersDedicatedAllocation = false;
12266  }
12267 }
12268 
12269 void VmaAllocator_T::GetImageMemoryRequirements(
12270  VkImage hImage,
12271  VkMemoryRequirements& memReq,
12272  bool& requiresDedicatedAllocation,
12273  bool& prefersDedicatedAllocation) const
12274 {
12275 #if VMA_DEDICATED_ALLOCATION
12276  if(m_UseKhrDedicatedAllocation)
12277  {
12278  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12279  memReqInfo.image = hImage;
12280 
12281  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12282 
12283  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12284  memReq2.pNext = &memDedicatedReq;
12285 
12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12287 
12288  memReq = memReq2.memoryRequirements;
12289  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12290  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12291  }
12292  else
12293 #endif // #if VMA_DEDICATED_ALLOCATION
12294  {
12295  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12296  requiresDedicatedAllocation = false;
12297  prefersDedicatedAllocation = false;
12298  }
12299 }
12300 
12301 VkResult VmaAllocator_T::AllocateMemory(
12302  const VkMemoryRequirements& vkMemReq,
12303  bool requiresDedicatedAllocation,
12304  bool prefersDedicatedAllocation,
12305  VkBuffer dedicatedBuffer,
12306  VkImage dedicatedImage,
12307  const VmaAllocationCreateInfo& createInfo,
12308  VmaSuballocationType suballocType,
12309  VmaAllocation* pAllocation)
12310 {
12311  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12312 
12313  if(vkMemReq.size == 0)
12314  {
12315  return VK_ERROR_VALIDATION_FAILED_EXT;
12316  }
12317  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12318  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12319  {
12320  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12322  }
12323  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12325  {
12326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12328  }
12329  if(requiresDedicatedAllocation)
12330  {
12331  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12332  {
12333  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12334  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12335  }
12336  if(createInfo.pool != VK_NULL_HANDLE)
12337  {
12338  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12339  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12340  }
12341  }
12342  if((createInfo.pool != VK_NULL_HANDLE) &&
12343  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12344  {
12345  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12346  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12347  }
12348 
12349  if(createInfo.pool != VK_NULL_HANDLE)
12350  {
12351  const VkDeviceSize alignmentForPool = VMA_MAX(
12352  vkMemReq.alignment,
12353  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12354  return createInfo.pool->m_BlockVector.Allocate(
12355  createInfo.pool,
12356  m_CurrentFrameIndex.load(),
12357  vkMemReq.size,
12358  alignmentForPool,
12359  createInfo,
12360  suballocType,
12361  pAllocation);
12362  }
12363  else
12364  {
12365  // Bit mask of memory Vulkan types acceptable for this allocation.
12366  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12367  uint32_t memTypeIndex = UINT32_MAX;
12368  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12369  if(res == VK_SUCCESS)
12370  {
12371  VkDeviceSize alignmentForMemType = VMA_MAX(
12372  vkMemReq.alignment,
12373  GetMemoryTypeMinAlignment(memTypeIndex));
12374 
12375  res = AllocateMemoryOfType(
12376  vkMemReq.size,
12377  alignmentForMemType,
12378  requiresDedicatedAllocation || prefersDedicatedAllocation,
12379  dedicatedBuffer,
12380  dedicatedImage,
12381  createInfo,
12382  memTypeIndex,
12383  suballocType,
12384  pAllocation);
12385  // Succeeded on first try.
12386  if(res == VK_SUCCESS)
12387  {
12388  return res;
12389  }
12390  // Allocation from this memory type failed. Try other compatible memory types.
12391  else
12392  {
12393  for(;;)
12394  {
12395  // Remove old memTypeIndex from list of possibilities.
12396  memoryTypeBits &= ~(1u << memTypeIndex);
12397  // Find alternative memTypeIndex.
12398  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12399  if(res == VK_SUCCESS)
12400  {
12401  alignmentForMemType = VMA_MAX(
12402  vkMemReq.alignment,
12403  GetMemoryTypeMinAlignment(memTypeIndex));
12404 
12405  res = AllocateMemoryOfType(
12406  vkMemReq.size,
12407  alignmentForMemType,
12408  requiresDedicatedAllocation || prefersDedicatedAllocation,
12409  dedicatedBuffer,
12410  dedicatedImage,
12411  createInfo,
12412  memTypeIndex,
12413  suballocType,
12414  pAllocation);
12415  // Allocation from this alternative memory type succeeded.
12416  if(res == VK_SUCCESS)
12417  {
12418  return res;
12419  }
12420  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12421  }
12422  // No other matching memory type index could be found.
12423  else
12424  {
12425  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12426  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12427  }
12428  }
12429  }
12430  }
12431  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12432  else
12433  return res;
12434  }
12435 }
12436 
12437 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12438 {
12439  VMA_ASSERT(allocation);
12440 
12441  if(TouchAllocation(allocation))
12442  {
12443  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12444  {
12445  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12446  }
12447 
12448  switch(allocation->GetType())
12449  {
12450  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12451  {
12452  VmaBlockVector* pBlockVector = VMA_NULL;
12453  VmaPool hPool = allocation->GetPool();
12454  if(hPool != VK_NULL_HANDLE)
12455  {
12456  pBlockVector = &hPool->m_BlockVector;
12457  }
12458  else
12459  {
12460  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12461  pBlockVector = m_pBlockVectors[memTypeIndex];
12462  }
12463  pBlockVector->Free(allocation);
12464  }
12465  break;
12466  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12467  FreeDedicatedMemory(allocation);
12468  break;
12469  default:
12470  VMA_ASSERT(0);
12471  }
12472  }
12473 
12474  allocation->SetUserData(this, VMA_NULL);
12475  vma_delete(this, allocation);
12476 }
12477 
12478 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12479 {
12480  // Initialize.
12481  InitStatInfo(pStats->total);
12482  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12483  InitStatInfo(pStats->memoryType[i]);
12484  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12485  InitStatInfo(pStats->memoryHeap[i]);
12486 
12487  // Process default pools.
12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12489  {
12490  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12491  VMA_ASSERT(pBlockVector);
12492  pBlockVector->AddStats(pStats);
12493  }
12494 
12495  // Process custom pools.
12496  {
12497  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12498  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12499  {
12500  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12501  }
12502  }
12503 
12504  // Process dedicated allocations.
12505  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12506  {
12507  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12508  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12509  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12510  VMA_ASSERT(pDedicatedAllocVector);
12511  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12512  {
12513  VmaStatInfo allocationStatInfo;
12514  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12515  VmaAddStatInfo(pStats->total, allocationStatInfo);
12516  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12517  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12518  }
12519  }
12520 
12521  // Postprocess.
12522  VmaPostprocessCalcStatInfo(pStats->total);
12523  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12524  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12525  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12526  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12527 }
12528 
12529 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12530 
12531 VkResult VmaAllocator_T::Defragment(
12532  VmaAllocation* pAllocations,
12533  size_t allocationCount,
12534  VkBool32* pAllocationsChanged,
12535  const VmaDefragmentationInfo* pDefragmentationInfo,
12536  VmaDefragmentationStats* pDefragmentationStats)
12537 {
12538  if(pAllocationsChanged != VMA_NULL)
12539  {
12540  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
12541  }
12542  if(pDefragmentationStats != VMA_NULL)
12543  {
12544  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12545  }
12546 
12547  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12548 
12549  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12550 
12551  const size_t poolCount = m_Pools.size();
12552 
12553  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12554  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12555  {
12556  VmaAllocation hAlloc = pAllocations[allocIndex];
12557  VMA_ASSERT(hAlloc);
12558  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12559  // DedicatedAlloc cannot be defragmented.
12560  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12561  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12562  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12563  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12564  // Lost allocation cannot be defragmented.
12565  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12566  {
12567  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12568 
12569  const VmaPool hAllocPool = hAlloc->GetPool();
12570  // This allocation belongs to custom pool.
12571  if(hAllocPool != VK_NULL_HANDLE)
12572  {
12573  // Pools with linear or buddy algorithm are not defragmented.
12574  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12575  {
12576  pAllocBlockVector = &hAllocPool->m_BlockVector;
12577  }
12578  }
12579  // This allocation belongs to general pool.
12580  else
12581  {
12582  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12583  }
12584 
12585  if(pAllocBlockVector != VMA_NULL)
12586  {
12587  VmaDefragmentator* const pDefragmentator =
12588  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12589  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12590  &pAllocationsChanged[allocIndex] : VMA_NULL;
12591  pDefragmentator->AddAllocation(hAlloc, pChanged);
12592  }
12593  }
12594  }
12595 
12596  VkResult result = VK_SUCCESS;
12597 
12598  // ======== Main processing.
12599 
12600  VkDeviceSize maxBytesToMove = SIZE_MAX;
12601  uint32_t maxAllocationsToMove = UINT32_MAX;
12602  if(pDefragmentationInfo != VMA_NULL)
12603  {
12604  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12605  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12606  }
12607 
12608  // Process standard memory.
12609  for(uint32_t memTypeIndex = 0;
12610  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12611  ++memTypeIndex)
12612  {
12613  // Only HOST_VISIBLE memory types can be defragmented.
12614  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12615  {
12616  result = m_pBlockVectors[memTypeIndex]->Defragment(
12617  pDefragmentationStats,
12618  maxBytesToMove,
12619  maxAllocationsToMove);
12620  }
12621  }
12622 
12623  // Process custom pools.
12624  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12625  {
12626  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12627  pDefragmentationStats,
12628  maxBytesToMove,
12629  maxAllocationsToMove);
12630  }
12631 
12632  // ======== Destroy defragmentators.
12633 
12634  // Process custom pools.
12635  for(size_t poolIndex = poolCount; poolIndex--; )
12636  {
12637  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12638  }
12639 
12640  // Process standard memory.
12641  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12642  {
12643  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12644  {
12645  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12646  }
12647  }
12648 
12649  return result;
12650 }
12651 
12652 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12653 {
12654  if(hAllocation->CanBecomeLost())
12655  {
12656  /*
12657  Warning: This is a carefully designed algorithm.
12658  Do not modify unless you really know what you're doing :)
12659  */
12660  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12661  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12662  for(;;)
12663  {
12664  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12665  {
12666  pAllocationInfo->memoryType = UINT32_MAX;
12667  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12668  pAllocationInfo->offset = 0;
12669  pAllocationInfo->size = hAllocation->GetSize();
12670  pAllocationInfo->pMappedData = VMA_NULL;
12671  pAllocationInfo->pUserData = hAllocation->GetUserData();
12672  return;
12673  }
12674  else if(localLastUseFrameIndex == localCurrFrameIndex)
12675  {
12676  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12677  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12678  pAllocationInfo->offset = hAllocation->GetOffset();
12679  pAllocationInfo->size = hAllocation->GetSize();
12680  pAllocationInfo->pMappedData = VMA_NULL;
12681  pAllocationInfo->pUserData = hAllocation->GetUserData();
12682  return;
12683  }
12684  else // Last use time earlier than current time.
12685  {
12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12687  {
12688  localLastUseFrameIndex = localCurrFrameIndex;
12689  }
12690  }
12691  }
12692  }
12693  else
12694  {
12695 #if VMA_STATS_STRING_ENABLED
12696  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12697  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12698  for(;;)
12699  {
12700  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12701  if(localLastUseFrameIndex == localCurrFrameIndex)
12702  {
12703  break;
12704  }
12705  else // Last use time earlier than current time.
12706  {
12707  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12708  {
12709  localLastUseFrameIndex = localCurrFrameIndex;
12710  }
12711  }
12712  }
12713 #endif
12714 
12715  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12716  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12717  pAllocationInfo->offset = hAllocation->GetOffset();
12718  pAllocationInfo->size = hAllocation->GetSize();
12719  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12720  pAllocationInfo->pUserData = hAllocation->GetUserData();
12721  }
12722 }
12723 
12724 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12725 {
12726  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12727  if(hAllocation->CanBecomeLost())
12728  {
12729  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12730  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12731  for(;;)
12732  {
12733  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12734  {
12735  return false;
12736  }
12737  else if(localLastUseFrameIndex == localCurrFrameIndex)
12738  {
12739  return true;
12740  }
12741  else // Last use time earlier than current time.
12742  {
12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12744  {
12745  localLastUseFrameIndex = localCurrFrameIndex;
12746  }
12747  }
12748  }
12749  }
12750  else
12751  {
12752 #if VMA_STATS_STRING_ENABLED
12753  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12754  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12755  for(;;)
12756  {
12757  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12758  if(localLastUseFrameIndex == localCurrFrameIndex)
12759  {
12760  break;
12761  }
12762  else // Last use time earlier than current time.
12763  {
12764  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12765  {
12766  localLastUseFrameIndex = localCurrFrameIndex;
12767  }
12768  }
12769  }
12770 #endif
12771 
12772  return true;
12773  }
12774 }
12775 
12776 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12777 {
12778  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12779 
12780  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12781 
12782  if(newCreateInfo.maxBlockCount == 0)
12783  {
12784  newCreateInfo.maxBlockCount = SIZE_MAX;
12785  }
12786  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12787  {
12788  return VK_ERROR_INITIALIZATION_FAILED;
12789  }
12790 
12791  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12792 
12793  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12794 
12795  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12796  if(res != VK_SUCCESS)
12797  {
12798  vma_delete(this, *pPool);
12799  *pPool = VMA_NULL;
12800  return res;
12801  }
12802 
12803  // Add to m_Pools.
12804  {
12805  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12806  (*pPool)->SetId(m_NextPoolId++);
12807  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12808  }
12809 
12810  return VK_SUCCESS;
12811 }
12812 
12813 void VmaAllocator_T::DestroyPool(VmaPool pool)
12814 {
12815  // Remove from m_Pools.
12816  {
12817  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12818  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
12819  VMA_ASSERT(success && "Pool not found in Allocator.");
12820  }
12821 
12822  vma_delete(this, pool);
12823 }
12824 
12825 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
12826 {
12827  pool->m_BlockVector.GetPoolStats(pPoolStats);
12828 }
12829 
12830 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
12831 {
12832  m_CurrentFrameIndex.store(frameIndex);
12833 }
12834 
12835 void VmaAllocator_T::MakePoolAllocationsLost(
12836  VmaPool hPool,
12837  size_t* pLostAllocationCount)
12838 {
12839  hPool->m_BlockVector.MakePoolAllocationsLost(
12840  m_CurrentFrameIndex.load(),
12841  pLostAllocationCount);
12842 }
12843 
12844 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
12845 {
12846  return hPool->m_BlockVector.CheckCorruption();
12847 }
12848 
12849 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
12850 {
12851  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
12852 
12853  // Process default pools.
12854  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12855  {
12856  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
12857  {
12858  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12859  VMA_ASSERT(pBlockVector);
12860  VkResult localRes = pBlockVector->CheckCorruption();
12861  switch(localRes)
12862  {
12863  case VK_ERROR_FEATURE_NOT_PRESENT:
12864  break;
12865  case VK_SUCCESS:
12866  finalRes = VK_SUCCESS;
12867  break;
12868  default:
12869  return localRes;
12870  }
12871  }
12872  }
12873 
12874  // Process custom pools.
12875  {
12876  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12877  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12878  {
12879  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
12880  {
12881  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
12882  switch(localRes)
12883  {
12884  case VK_ERROR_FEATURE_NOT_PRESENT:
12885  break;
12886  case VK_SUCCESS:
12887  finalRes = VK_SUCCESS;
12888  break;
12889  default:
12890  return localRes;
12891  }
12892  }
12893  }
12894  }
12895 
12896  return finalRes;
12897 }
12898 
12899 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
12900 {
12901  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
12902  (*pAllocation)->InitLost();
12903 }
12904 
12905 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
12906 {
12907  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
12908 
12909  VkResult res;
12910  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12911  {
12912  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12913  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
12914  {
12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12916  if(res == VK_SUCCESS)
12917  {
12918  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
12919  }
12920  }
12921  else
12922  {
12923  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
12924  }
12925  }
12926  else
12927  {
12928  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12929  }
12930 
12931  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
12932  {
12933  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
12934  }
12935 
12936  return res;
12937 }
12938 
12939 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
12940 {
12941  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
12942  {
12943  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
12944  }
12945 
12946  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
12947 
12948  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
12949  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12950  {
12951  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12952  m_HeapSizeLimit[heapIndex] += size;
12953  }
12954 }
12955 
12956 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
12957 {
12958  if(hAllocation->CanBecomeLost())
12959  {
12960  return VK_ERROR_MEMORY_MAP_FAILED;
12961  }
12962 
12963  switch(hAllocation->GetType())
12964  {
12965  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12966  {
12967  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12968  char *pBytes = VMA_NULL;
12969  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
12970  if(res == VK_SUCCESS)
12971  {
12972  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
12973  hAllocation->BlockAllocMap();
12974  }
12975  return res;
12976  }
12977  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12978  return hAllocation->DedicatedAllocMap(this, ppData);
12979  default:
12980  VMA_ASSERT(0);
12981  return VK_ERROR_MEMORY_MAP_FAILED;
12982  }
12983 }
12984 
12985 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12986 {
12987  switch(hAllocation->GetType())
12988  {
12989  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12990  {
12991  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12992  hAllocation->BlockAllocUnmap();
12993  pBlock->Unmap(this, 1);
12994  }
12995  break;
12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12997  hAllocation->DedicatedAllocUnmap(this);
12998  break;
12999  default:
13000  VMA_ASSERT(0);
13001  }
13002 }
13003 
13004 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
13005 {
13006  VkResult res = VK_SUCCESS;
13007  switch(hAllocation->GetType())
13008  {
13009  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13010  res = GetVulkanFunctions().vkBindBufferMemory(
13011  m_hDevice,
13012  hBuffer,
13013  hAllocation->GetMemory(),
13014  0); //memoryOffset
13015  break;
13016  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13017  {
13018  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13019  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
13020  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13021  break;
13022  }
13023  default:
13024  VMA_ASSERT(0);
13025  }
13026  return res;
13027 }
13028 
13029 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13030 {
13031  VkResult res = VK_SUCCESS;
13032  switch(hAllocation->GetType())
13033  {
13034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13035  res = GetVulkanFunctions().vkBindImageMemory(
13036  m_hDevice,
13037  hImage,
13038  hAllocation->GetMemory(),
13039  0); //memoryOffset
13040  break;
13041  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13042  {
13043  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13044  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13045  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13046  break;
13047  }
13048  default:
13049  VMA_ASSERT(0);
13050  }
13051  return res;
13052 }
13053 
13054 void VmaAllocator_T::FlushOrInvalidateAllocation(
13055  VmaAllocation hAllocation,
13056  VkDeviceSize offset, VkDeviceSize size,
13057  VMA_CACHE_OPERATION op)
13058 {
13059  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13060  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13061  {
13062  const VkDeviceSize allocationSize = hAllocation->GetSize();
13063  VMA_ASSERT(offset <= allocationSize);
13064 
13065  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13066 
13067  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13068  memRange.memory = hAllocation->GetMemory();
13069 
13070  switch(hAllocation->GetType())
13071  {
13072  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13074  if(size == VK_WHOLE_SIZE)
13075  {
13076  memRange.size = allocationSize - memRange.offset;
13077  }
13078  else
13079  {
13080  VMA_ASSERT(offset + size <= allocationSize);
13081  memRange.size = VMA_MIN(
13082  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13083  allocationSize - memRange.offset);
13084  }
13085  break;
13086 
13087  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13088  {
13089  // 1. Still within this allocation.
13090  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13091  if(size == VK_WHOLE_SIZE)
13092  {
13093  size = allocationSize - offset;
13094  }
13095  else
13096  {
13097  VMA_ASSERT(offset + size <= allocationSize);
13098  }
13099  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13100 
13101  // 2. Adjust to whole block.
13102  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13103  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13104  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13105  memRange.offset += allocationOffset;
13106  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13107 
13108  break;
13109  }
13110 
13111  default:
13112  VMA_ASSERT(0);
13113  }
13114 
13115  switch(op)
13116  {
13117  case VMA_CACHE_FLUSH:
13118  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13119  break;
13120  case VMA_CACHE_INVALIDATE:
13121  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13122  break;
13123  default:
13124  VMA_ASSERT(0);
13125  }
13126  }
13127  // else: Just ignore this call.
13128 }
13129 
13130 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13131 {
13132  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13133 
13134  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13135  {
13136  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13137  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13138  VMA_ASSERT(pDedicatedAllocations);
13139  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13140  VMA_ASSERT(success);
13141  }
13142 
13143  VkDeviceMemory hMemory = allocation->GetMemory();
13144 
13145  /*
13146  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13147  before vkFreeMemory.
13148 
13149  if(allocation->GetMappedData() != VMA_NULL)
13150  {
13151  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13152  }
13153  */
13154 
13155  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13156 
13157  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13158 }
13159 
13160 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13161 {
13162  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13163  !hAllocation->CanBecomeLost() &&
13164  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13165  {
13166  void* pData = VMA_NULL;
13167  VkResult res = Map(hAllocation, &pData);
13168  if(res == VK_SUCCESS)
13169  {
13170  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13171  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13172  Unmap(hAllocation);
13173  }
13174  else
13175  {
13176  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13177  }
13178  }
13179 }
13180 
13181 #if VMA_STATS_STRING_ENABLED
13182 
13183 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13184 {
13185  bool dedicatedAllocationsStarted = false;
13186  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13187  {
13188  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13189  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13190  VMA_ASSERT(pDedicatedAllocVector);
13191  if(pDedicatedAllocVector->empty() == false)
13192  {
13193  if(dedicatedAllocationsStarted == false)
13194  {
13195  dedicatedAllocationsStarted = true;
13196  json.WriteString("DedicatedAllocations");
13197  json.BeginObject();
13198  }
13199 
13200  json.BeginString("Type ");
13201  json.ContinueString(memTypeIndex);
13202  json.EndString();
13203 
13204  json.BeginArray();
13205 
13206  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13207  {
13208  json.BeginObject(true);
13209  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13210  hAlloc->PrintParameters(json);
13211  json.EndObject();
13212  }
13213 
13214  json.EndArray();
13215  }
13216  }
13217  if(dedicatedAllocationsStarted)
13218  {
13219  json.EndObject();
13220  }
13221 
13222  {
13223  bool allocationsStarted = false;
13224  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13225  {
13226  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13227  {
13228  if(allocationsStarted == false)
13229  {
13230  allocationsStarted = true;
13231  json.WriteString("DefaultPools");
13232  json.BeginObject();
13233  }
13234 
13235  json.BeginString("Type ");
13236  json.ContinueString(memTypeIndex);
13237  json.EndString();
13238 
13239  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13240  }
13241  }
13242  if(allocationsStarted)
13243  {
13244  json.EndObject();
13245  }
13246  }
13247 
13248  // Custom pools
13249  {
13250  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13251  const size_t poolCount = m_Pools.size();
13252  if(poolCount > 0)
13253  {
13254  json.WriteString("Pools");
13255  json.BeginObject();
13256  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13257  {
13258  json.BeginString();
13259  json.ContinueString(m_Pools[poolIndex]->GetId());
13260  json.EndString();
13261 
13262  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13263  }
13264  json.EndObject();
13265  }
13266  }
13267 }
13268 
13269 #endif // #if VMA_STATS_STRING_ENABLED
13270 
13272 // Public interface
13273 
13274 VkResult vmaCreateAllocator(
13275  const VmaAllocatorCreateInfo* pCreateInfo,
13276  VmaAllocator* pAllocator)
13277 {
13278  VMA_ASSERT(pCreateInfo && pAllocator);
13279  VMA_DEBUG_LOG("vmaCreateAllocator");
13280  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13281  return (*pAllocator)->Init(pCreateInfo);
13282 }
13283 
13284 void vmaDestroyAllocator(
13285  VmaAllocator allocator)
13286 {
13287  if(allocator != VK_NULL_HANDLE)
13288  {
13289  VMA_DEBUG_LOG("vmaDestroyAllocator");
13290  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13291  vma_delete(&allocationCallbacks, allocator);
13292  }
13293 }
13294 
13296  VmaAllocator allocator,
13297  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13298 {
13299  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13300  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13301 }
13302 
13304  VmaAllocator allocator,
13305  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13306 {
13307  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13308  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13309 }
13310 
13312  VmaAllocator allocator,
13313  uint32_t memoryTypeIndex,
13314  VkMemoryPropertyFlags* pFlags)
13315 {
13316  VMA_ASSERT(allocator && pFlags);
13317  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13318  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13319 }
13320 
13322  VmaAllocator allocator,
13323  uint32_t frameIndex)
13324 {
13325  VMA_ASSERT(allocator);
13326  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13327 
13328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13329 
13330  allocator->SetCurrentFrameIndex(frameIndex);
13331 }
13332 
13333 void vmaCalculateStats(
13334  VmaAllocator allocator,
13335  VmaStats* pStats)
13336 {
13337  VMA_ASSERT(allocator && pStats);
13338  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13339  allocator->CalculateStats(pStats);
13340 }
13341 
13342 #if VMA_STATS_STRING_ENABLED
13343 
13344 void vmaBuildStatsString(
13345  VmaAllocator allocator,
13346  char** ppStatsString,
13347  VkBool32 detailedMap)
13348 {
13349  VMA_ASSERT(allocator && ppStatsString);
13350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13351 
13352  VmaStringBuilder sb(allocator);
13353  {
13354  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13355  json.BeginObject();
13356 
13357  VmaStats stats;
13358  allocator->CalculateStats(&stats);
13359 
13360  json.WriteString("Total");
13361  VmaPrintStatInfo(json, stats.total);
13362 
13363  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13364  {
13365  json.BeginString("Heap ");
13366  json.ContinueString(heapIndex);
13367  json.EndString();
13368  json.BeginObject();
13369 
13370  json.WriteString("Size");
13371  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13372 
13373  json.WriteString("Flags");
13374  json.BeginArray(true);
13375  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13376  {
13377  json.WriteString("DEVICE_LOCAL");
13378  }
13379  json.EndArray();
13380 
13381  if(stats.memoryHeap[heapIndex].blockCount > 0)
13382  {
13383  json.WriteString("Stats");
13384  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13385  }
13386 
13387  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13388  {
13389  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13390  {
13391  json.BeginString("Type ");
13392  json.ContinueString(typeIndex);
13393  json.EndString();
13394 
13395  json.BeginObject();
13396 
13397  json.WriteString("Flags");
13398  json.BeginArray(true);
13399  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13400  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13401  {
13402  json.WriteString("DEVICE_LOCAL");
13403  }
13404  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13405  {
13406  json.WriteString("HOST_VISIBLE");
13407  }
13408  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13409  {
13410  json.WriteString("HOST_COHERENT");
13411  }
13412  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13413  {
13414  json.WriteString("HOST_CACHED");
13415  }
13416  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13417  {
13418  json.WriteString("LAZILY_ALLOCATED");
13419  }
13420  json.EndArray();
13421 
13422  if(stats.memoryType[typeIndex].blockCount > 0)
13423  {
13424  json.WriteString("Stats");
13425  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13426  }
13427 
13428  json.EndObject();
13429  }
13430  }
13431 
13432  json.EndObject();
13433  }
13434  if(detailedMap == VK_TRUE)
13435  {
13436  allocator->PrintDetailedMap(json);
13437  }
13438 
13439  json.EndObject();
13440  }
13441 
13442  const size_t len = sb.GetLength();
13443  char* const pChars = vma_new_array(allocator, char, len + 1);
13444  if(len > 0)
13445  {
13446  memcpy(pChars, sb.GetData(), len);
13447  }
13448  pChars[len] = '\0';
13449  *ppStatsString = pChars;
13450 }
13451 
13452 void vmaFreeStatsString(
13453  VmaAllocator allocator,
13454  char* pStatsString)
13455 {
13456  if(pStatsString != VMA_NULL)
13457  {
13458  VMA_ASSERT(allocator);
13459  size_t len = strlen(pStatsString);
13460  vma_delete_array(allocator, pStatsString, len + 1);
13461  }
13462 }
13463 
13464 #endif // #if VMA_STATS_STRING_ENABLED
13465 
13466 /*
13467 This function is not protected by any mutex because it just reads immutable data.
13468 */
13469 VkResult vmaFindMemoryTypeIndex(
13470  VmaAllocator allocator,
13471  uint32_t memoryTypeBits,
13472  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13473  uint32_t* pMemoryTypeIndex)
13474 {
13475  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13476  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13477  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13478 
13479  if(pAllocationCreateInfo->memoryTypeBits != 0)
13480  {
13481  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13482  }
13483 
13484  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13485  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13486 
13487  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13488  if(mapped)
13489  {
13490  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13491  }
13492 
13493  // Convert usage to requiredFlags and preferredFlags.
13494  switch(pAllocationCreateInfo->usage)
13495  {
13497  break;
13499  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13500  {
13501  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13502  }
13503  break;
13505  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13506  break;
13508  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13509  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13510  {
13511  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13512  }
13513  break;
13515  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13516  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13517  break;
13518  default:
13519  break;
13520  }
13521 
13522  *pMemoryTypeIndex = UINT32_MAX;
13523  uint32_t minCost = UINT32_MAX;
13524  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13525  memTypeIndex < allocator->GetMemoryTypeCount();
13526  ++memTypeIndex, memTypeBit <<= 1)
13527  {
13528  // This memory type is acceptable according to memoryTypeBits bitmask.
13529  if((memTypeBit & memoryTypeBits) != 0)
13530  {
13531  const VkMemoryPropertyFlags currFlags =
13532  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13533  // This memory type contains requiredFlags.
13534  if((requiredFlags & ~currFlags) == 0)
13535  {
13536  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13537  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13538  // Remember memory type with lowest cost.
13539  if(currCost < minCost)
13540  {
13541  *pMemoryTypeIndex = memTypeIndex;
13542  if(currCost == 0)
13543  {
13544  return VK_SUCCESS;
13545  }
13546  minCost = currCost;
13547  }
13548  }
13549  }
13550  }
13551  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13552 }
13553 
13555  VmaAllocator allocator,
13556  const VkBufferCreateInfo* pBufferCreateInfo,
13557  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13558  uint32_t* pMemoryTypeIndex)
13559 {
13560  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13561  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13562  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13563  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13564 
13565  const VkDevice hDev = allocator->m_hDevice;
13566  VkBuffer hBuffer = VK_NULL_HANDLE;
13567  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13568  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13569  if(res == VK_SUCCESS)
13570  {
13571  VkMemoryRequirements memReq = {};
13572  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13573  hDev, hBuffer, &memReq);
13574 
13575  res = vmaFindMemoryTypeIndex(
13576  allocator,
13577  memReq.memoryTypeBits,
13578  pAllocationCreateInfo,
13579  pMemoryTypeIndex);
13580 
13581  allocator->GetVulkanFunctions().vkDestroyBuffer(
13582  hDev, hBuffer, allocator->GetAllocationCallbacks());
13583  }
13584  return res;
13585 }
13586 
13588  VmaAllocator allocator,
13589  const VkImageCreateInfo* pImageCreateInfo,
13590  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13591  uint32_t* pMemoryTypeIndex)
13592 {
13593  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13594  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13597 
13598  const VkDevice hDev = allocator->m_hDevice;
13599  VkImage hImage = VK_NULL_HANDLE;
13600  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13601  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13602  if(res == VK_SUCCESS)
13603  {
13604  VkMemoryRequirements memReq = {};
13605  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13606  hDev, hImage, &memReq);
13607 
13608  res = vmaFindMemoryTypeIndex(
13609  allocator,
13610  memReq.memoryTypeBits,
13611  pAllocationCreateInfo,
13612  pMemoryTypeIndex);
13613 
13614  allocator->GetVulkanFunctions().vkDestroyImage(
13615  hDev, hImage, allocator->GetAllocationCallbacks());
13616  }
13617  return res;
13618 }
13619 
13620 VkResult vmaCreatePool(
13621  VmaAllocator allocator,
13622  const VmaPoolCreateInfo* pCreateInfo,
13623  VmaPool* pPool)
13624 {
13625  VMA_ASSERT(allocator && pCreateInfo && pPool);
13626 
13627  VMA_DEBUG_LOG("vmaCreatePool");
13628 
13629  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13630 
13631  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13632 
13633 #if VMA_RECORDING_ENABLED
13634  if(allocator->GetRecorder() != VMA_NULL)
13635  {
13636  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13637  }
13638 #endif
13639 
13640  return res;
13641 }
13642 
13643 void vmaDestroyPool(
13644  VmaAllocator allocator,
13645  VmaPool pool)
13646 {
13647  VMA_ASSERT(allocator);
13648 
13649  if(pool == VK_NULL_HANDLE)
13650  {
13651  return;
13652  }
13653 
13654  VMA_DEBUG_LOG("vmaDestroyPool");
13655 
13656  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13657 
13658 #if VMA_RECORDING_ENABLED
13659  if(allocator->GetRecorder() != VMA_NULL)
13660  {
13661  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13662  }
13663 #endif
13664 
13665  allocator->DestroyPool(pool);
13666 }
13667 
13668 void vmaGetPoolStats(
13669  VmaAllocator allocator,
13670  VmaPool pool,
13671  VmaPoolStats* pPoolStats)
13672 {
13673  VMA_ASSERT(allocator && pool && pPoolStats);
13674 
13675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13676 
13677  allocator->GetPoolStats(pool, pPoolStats);
13678 }
13679 
13681  VmaAllocator allocator,
13682  VmaPool pool,
13683  size_t* pLostAllocationCount)
13684 {
13685  VMA_ASSERT(allocator && pool);
13686 
13687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13688 
13689 #if VMA_RECORDING_ENABLED
13690  if(allocator->GetRecorder() != VMA_NULL)
13691  {
13692  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13693  }
13694 #endif
13695 
13696  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13697 }
13698 
13699 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13700 {
13701  VMA_ASSERT(allocator && pool);
13702 
13703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13704 
13705  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13706 
13707  return allocator->CheckPoolCorruption(pool);
13708 }
13709 
13710 VkResult vmaAllocateMemory(
13711  VmaAllocator allocator,
13712  const VkMemoryRequirements* pVkMemoryRequirements,
13713  const VmaAllocationCreateInfo* pCreateInfo,
13714  VmaAllocation* pAllocation,
13715  VmaAllocationInfo* pAllocationInfo)
13716 {
13717  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13718 
13719  VMA_DEBUG_LOG("vmaAllocateMemory");
13720 
13721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13722 
13723  VkResult result = allocator->AllocateMemory(
13724  *pVkMemoryRequirements,
13725  false, // requiresDedicatedAllocation
13726  false, // prefersDedicatedAllocation
13727  VK_NULL_HANDLE, // dedicatedBuffer
13728  VK_NULL_HANDLE, // dedicatedImage
13729  *pCreateInfo,
13730  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13731  pAllocation);
13732 
13733 #if VMA_RECORDING_ENABLED
13734  if(allocator->GetRecorder() != VMA_NULL)
13735  {
13736  allocator->GetRecorder()->RecordAllocateMemory(
13737  allocator->GetCurrentFrameIndex(),
13738  *pVkMemoryRequirements,
13739  *pCreateInfo,
13740  *pAllocation);
13741  }
13742 #endif
13743 
13744  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13745  {
13746  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13747  }
13748 
13749  return result;
13750 }
13751 
13753  VmaAllocator allocator,
13754  VkBuffer buffer,
13755  const VmaAllocationCreateInfo* pCreateInfo,
13756  VmaAllocation* pAllocation,
13757  VmaAllocationInfo* pAllocationInfo)
13758 {
13759  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13760 
13761  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13762 
13763  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13764 
13765  VkMemoryRequirements vkMemReq = {};
13766  bool requiresDedicatedAllocation = false;
13767  bool prefersDedicatedAllocation = false;
13768  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
13769  requiresDedicatedAllocation,
13770  prefersDedicatedAllocation);
13771 
13772  VkResult result = allocator->AllocateMemory(
13773  vkMemReq,
13774  requiresDedicatedAllocation,
13775  prefersDedicatedAllocation,
13776  buffer, // dedicatedBuffer
13777  VK_NULL_HANDLE, // dedicatedImage
13778  *pCreateInfo,
13779  VMA_SUBALLOCATION_TYPE_BUFFER,
13780  pAllocation);
13781 
13782 #if VMA_RECORDING_ENABLED
13783  if(allocator->GetRecorder() != VMA_NULL)
13784  {
13785  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
13786  allocator->GetCurrentFrameIndex(),
13787  vkMemReq,
13788  requiresDedicatedAllocation,
13789  prefersDedicatedAllocation,
13790  *pCreateInfo,
13791  *pAllocation);
13792  }
13793 #endif
13794 
13795  if(pAllocationInfo && result == VK_SUCCESS)
13796  {
13797  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13798  }
13799 
13800  return result;
13801 }
13802 
13803 VkResult vmaAllocateMemoryForImage(
13804  VmaAllocator allocator,
13805  VkImage image,
13806  const VmaAllocationCreateInfo* pCreateInfo,
13807  VmaAllocation* pAllocation,
13808  VmaAllocationInfo* pAllocationInfo)
13809 {
13810  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13811 
13812  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
13813 
13814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13815 
13816  VkMemoryRequirements vkMemReq = {};
13817  bool requiresDedicatedAllocation = false;
13818  bool prefersDedicatedAllocation = false;
13819  allocator->GetImageMemoryRequirements(image, vkMemReq,
13820  requiresDedicatedAllocation, prefersDedicatedAllocation);
13821 
13822  VkResult result = allocator->AllocateMemory(
13823  vkMemReq,
13824  requiresDedicatedAllocation,
13825  prefersDedicatedAllocation,
13826  VK_NULL_HANDLE, // dedicatedBuffer
13827  image, // dedicatedImage
13828  *pCreateInfo,
13829  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
13830  pAllocation);
13831 
13832 #if VMA_RECORDING_ENABLED
13833  if(allocator->GetRecorder() != VMA_NULL)
13834  {
13835  allocator->GetRecorder()->RecordAllocateMemoryForImage(
13836  allocator->GetCurrentFrameIndex(),
13837  vkMemReq,
13838  requiresDedicatedAllocation,
13839  prefersDedicatedAllocation,
13840  *pCreateInfo,
13841  *pAllocation);
13842  }
13843 #endif
13844 
13845  if(pAllocationInfo && result == VK_SUCCESS)
13846  {
13847  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13848  }
13849 
13850  return result;
13851 }
13852 
13853 void vmaFreeMemory(
13854  VmaAllocator allocator,
13855  VmaAllocation allocation)
13856 {
13857  VMA_ASSERT(allocator);
13858 
13859  if(allocation == VK_NULL_HANDLE)
13860  {
13861  return;
13862  }
13863 
13864  VMA_DEBUG_LOG("vmaFreeMemory");
13865 
13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13867 
13868 #if VMA_RECORDING_ENABLED
13869  if(allocator->GetRecorder() != VMA_NULL)
13870  {
13871  allocator->GetRecorder()->RecordFreeMemory(
13872  allocator->GetCurrentFrameIndex(),
13873  allocation);
13874  }
13875 #endif
13876 
13877  allocator->FreeMemory(allocation);
13878 }
13879 
13881  VmaAllocator allocator,
13882  VmaAllocation allocation,
13883  VmaAllocationInfo* pAllocationInfo)
13884 {
13885  VMA_ASSERT(allocator && allocation && pAllocationInfo);
13886 
13887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13888 
13889 #if VMA_RECORDING_ENABLED
13890  if(allocator->GetRecorder() != VMA_NULL)
13891  {
13892  allocator->GetRecorder()->RecordGetAllocationInfo(
13893  allocator->GetCurrentFrameIndex(),
13894  allocation);
13895  }
13896 #endif
13897 
13898  allocator->GetAllocationInfo(allocation, pAllocationInfo);
13899 }
13900 
13901 VkBool32 vmaTouchAllocation(
13902  VmaAllocator allocator,
13903  VmaAllocation allocation)
13904 {
13905  VMA_ASSERT(allocator && allocation);
13906 
13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13908 
13909 #if VMA_RECORDING_ENABLED
13910  if(allocator->GetRecorder() != VMA_NULL)
13911  {
13912  allocator->GetRecorder()->RecordTouchAllocation(
13913  allocator->GetCurrentFrameIndex(),
13914  allocation);
13915  }
13916 #endif
13917 
13918  return allocator->TouchAllocation(allocation);
13919 }
13920 
13922  VmaAllocator allocator,
13923  VmaAllocation allocation,
13924  void* pUserData)
13925 {
13926  VMA_ASSERT(allocator && allocation);
13927 
13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13929 
13930  allocation->SetUserData(allocator, pUserData);
13931 
13932 #if VMA_RECORDING_ENABLED
13933  if(allocator->GetRecorder() != VMA_NULL)
13934  {
13935  allocator->GetRecorder()->RecordSetAllocationUserData(
13936  allocator->GetCurrentFrameIndex(),
13937  allocation,
13938  pUserData);
13939  }
13940 #endif
13941 }
13942 
13944  VmaAllocator allocator,
13945  VmaAllocation* pAllocation)
13946 {
13947  VMA_ASSERT(allocator && pAllocation);
13948 
13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
13950 
13951  allocator->CreateLostAllocation(pAllocation);
13952 
13953 #if VMA_RECORDING_ENABLED
13954  if(allocator->GetRecorder() != VMA_NULL)
13955  {
13956  allocator->GetRecorder()->RecordCreateLostAllocation(
13957  allocator->GetCurrentFrameIndex(),
13958  *pAllocation);
13959  }
13960 #endif
13961 }
13962 
13963 VkResult vmaMapMemory(
13964  VmaAllocator allocator,
13965  VmaAllocation allocation,
13966  void** ppData)
13967 {
13968  VMA_ASSERT(allocator && allocation && ppData);
13969 
13970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13971 
13972  VkResult res = allocator->Map(allocation, ppData);
13973 
13974 #if VMA_RECORDING_ENABLED
13975  if(allocator->GetRecorder() != VMA_NULL)
13976  {
13977  allocator->GetRecorder()->RecordMapMemory(
13978  allocator->GetCurrentFrameIndex(),
13979  allocation);
13980  }
13981 #endif
13982 
13983  return res;
13984 }
13985 
13986 void vmaUnmapMemory(
13987  VmaAllocator allocator,
13988  VmaAllocation allocation)
13989 {
13990  VMA_ASSERT(allocator && allocation);
13991 
13992  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13993 
13994 #if VMA_RECORDING_ENABLED
13995  if(allocator->GetRecorder() != VMA_NULL)
13996  {
13997  allocator->GetRecorder()->RecordUnmapMemory(
13998  allocator->GetCurrentFrameIndex(),
13999  allocation);
14000  }
14001 #endif
14002 
14003  allocator->Unmap(allocation);
14004 }
14005 
14006 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14007 {
14008  VMA_ASSERT(allocator && allocation);
14009 
14010  VMA_DEBUG_LOG("vmaFlushAllocation");
14011 
14012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14013 
14014  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
14015 
14016 #if VMA_RECORDING_ENABLED
14017  if(allocator->GetRecorder() != VMA_NULL)
14018  {
14019  allocator->GetRecorder()->RecordFlushAllocation(
14020  allocator->GetCurrentFrameIndex(),
14021  allocation, offset, size);
14022  }
14023 #endif
14024 }
14025 
14026 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14027 {
14028  VMA_ASSERT(allocator && allocation);
14029 
14030  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14031 
14032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14033 
14034  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14035 
14036 #if VMA_RECORDING_ENABLED
14037  if(allocator->GetRecorder() != VMA_NULL)
14038  {
14039  allocator->GetRecorder()->RecordInvalidateAllocation(
14040  allocator->GetCurrentFrameIndex(),
14041  allocation, offset, size);
14042  }
14043 #endif
14044 }
14045 
14046 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14047 {
14048  VMA_ASSERT(allocator);
14049 
14050  VMA_DEBUG_LOG("vmaCheckCorruption");
14051 
14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14053 
14054  return allocator->CheckCorruption(memoryTypeBits);
14055 }
14056 
14057 VkResult vmaDefragment(
14058  VmaAllocator allocator,
14059  VmaAllocation* pAllocations,
14060  size_t allocationCount,
14061  VkBool32* pAllocationsChanged,
14062  const VmaDefragmentationInfo *pDefragmentationInfo,
14063  VmaDefragmentationStats* pDefragmentationStats)
14064 {
14065  VMA_ASSERT(allocator && pAllocations);
14066 
14067  VMA_DEBUG_LOG("vmaDefragment");
14068 
14069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14070 
14071  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14072 }
14073 
14074 VkResult vmaBindBufferMemory(
14075  VmaAllocator allocator,
14076  VmaAllocation allocation,
14077  VkBuffer buffer)
14078 {
14079  VMA_ASSERT(allocator && allocation && buffer);
14080 
14081  VMA_DEBUG_LOG("vmaBindBufferMemory");
14082 
14083  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14084 
14085  return allocator->BindBufferMemory(allocation, buffer);
14086 }
14087 
14088 VkResult vmaBindImageMemory(
14089  VmaAllocator allocator,
14090  VmaAllocation allocation,
14091  VkImage image)
14092 {
14093  VMA_ASSERT(allocator && allocation && image);
14094 
14095  VMA_DEBUG_LOG("vmaBindImageMemory");
14096 
14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14098 
14099  return allocator->BindImageMemory(allocation, image);
14100 }
14101 
14102 VkResult vmaCreateBuffer(
14103  VmaAllocator allocator,
14104  const VkBufferCreateInfo* pBufferCreateInfo,
14105  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14106  VkBuffer* pBuffer,
14107  VmaAllocation* pAllocation,
14108  VmaAllocationInfo* pAllocationInfo)
14109 {
14110  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14111 
14112  if(pBufferCreateInfo->size == 0)
14113  {
14114  return VK_ERROR_VALIDATION_FAILED_EXT;
14115  }
14116 
14117  VMA_DEBUG_LOG("vmaCreateBuffer");
14118 
14119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14120 
14121  *pBuffer = VK_NULL_HANDLE;
14122  *pAllocation = VK_NULL_HANDLE;
14123 
14124  // 1. Create VkBuffer.
14125  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14126  allocator->m_hDevice,
14127  pBufferCreateInfo,
14128  allocator->GetAllocationCallbacks(),
14129  pBuffer);
14130  if(res >= 0)
14131  {
14132  // 2. vkGetBufferMemoryRequirements.
14133  VkMemoryRequirements vkMemReq = {};
14134  bool requiresDedicatedAllocation = false;
14135  bool prefersDedicatedAllocation = false;
14136  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14137  requiresDedicatedAllocation, prefersDedicatedAllocation);
14138 
14139  // Make sure alignment requirements for specific buffer usages reported
14140  // in Physical Device Properties are included in alignment reported by memory requirements.
14141  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14142  {
14143  VMA_ASSERT(vkMemReq.alignment %
14144  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14145  }
14146  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14147  {
14148  VMA_ASSERT(vkMemReq.alignment %
14149  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14150  }
14151  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14152  {
14153  VMA_ASSERT(vkMemReq.alignment %
14154  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14155  }
14156 
14157  // 3. Allocate memory using allocator.
14158  res = allocator->AllocateMemory(
14159  vkMemReq,
14160  requiresDedicatedAllocation,
14161  prefersDedicatedAllocation,
14162  *pBuffer, // dedicatedBuffer
14163  VK_NULL_HANDLE, // dedicatedImage
14164  *pAllocationCreateInfo,
14165  VMA_SUBALLOCATION_TYPE_BUFFER,
14166  pAllocation);
14167 
14168 #if VMA_RECORDING_ENABLED
14169  if(allocator->GetRecorder() != VMA_NULL)
14170  {
14171  allocator->GetRecorder()->RecordCreateBuffer(
14172  allocator->GetCurrentFrameIndex(),
14173  *pBufferCreateInfo,
14174  *pAllocationCreateInfo,
14175  *pAllocation);
14176  }
14177 #endif
14178 
14179  if(res >= 0)
14180  {
14181  // 3. Bind buffer with memory.
14182  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14183  if(res >= 0)
14184  {
14185  // All steps succeeded.
14186  #if VMA_STATS_STRING_ENABLED
14187  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14188  #endif
14189  if(pAllocationInfo != VMA_NULL)
14190  {
14191  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14192  }
14193 
14194  return VK_SUCCESS;
14195  }
14196  allocator->FreeMemory(*pAllocation);
14197  *pAllocation = VK_NULL_HANDLE;
14198  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14199  *pBuffer = VK_NULL_HANDLE;
14200  return res;
14201  }
14202  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14203  *pBuffer = VK_NULL_HANDLE;
14204  return res;
14205  }
14206  return res;
14207 }
14208 
14209 void vmaDestroyBuffer(
14210  VmaAllocator allocator,
14211  VkBuffer buffer,
14212  VmaAllocation allocation)
14213 {
14214  VMA_ASSERT(allocator);
14215 
14216  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14217  {
14218  return;
14219  }
14220 
14221  VMA_DEBUG_LOG("vmaDestroyBuffer");
14222 
14223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14224 
14225 #if VMA_RECORDING_ENABLED
14226  if(allocator->GetRecorder() != VMA_NULL)
14227  {
14228  allocator->GetRecorder()->RecordDestroyBuffer(
14229  allocator->GetCurrentFrameIndex(),
14230  allocation);
14231  }
14232 #endif
14233 
14234  if(buffer != VK_NULL_HANDLE)
14235  {
14236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14237  }
14238 
14239  if(allocation != VK_NULL_HANDLE)
14240  {
14241  allocator->FreeMemory(allocation);
14242  }
14243 }
14244 
14245 VkResult vmaCreateImage(
14246  VmaAllocator allocator,
14247  const VkImageCreateInfo* pImageCreateInfo,
14248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14249  VkImage* pImage,
14250  VmaAllocation* pAllocation,
14251  VmaAllocationInfo* pAllocationInfo)
14252 {
14253  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14254 
14255  if(pImageCreateInfo->extent.width == 0 ||
14256  pImageCreateInfo->extent.height == 0 ||
14257  pImageCreateInfo->extent.depth == 0 ||
14258  pImageCreateInfo->mipLevels == 0 ||
14259  pImageCreateInfo->arrayLayers == 0)
14260  {
14261  return VK_ERROR_VALIDATION_FAILED_EXT;
14262  }
14263 
14264  VMA_DEBUG_LOG("vmaCreateImage");
14265 
14266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14267 
14268  *pImage = VK_NULL_HANDLE;
14269  *pAllocation = VK_NULL_HANDLE;
14270 
14271  // 1. Create VkImage.
14272  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14273  allocator->m_hDevice,
14274  pImageCreateInfo,
14275  allocator->GetAllocationCallbacks(),
14276  pImage);
14277  if(res >= 0)
14278  {
14279  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14280  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14281  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14282 
14283  // 2. Allocate memory using allocator.
14284  VkMemoryRequirements vkMemReq = {};
14285  bool requiresDedicatedAllocation = false;
14286  bool prefersDedicatedAllocation = false;
14287  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14288  requiresDedicatedAllocation, prefersDedicatedAllocation);
14289 
14290  res = allocator->AllocateMemory(
14291  vkMemReq,
14292  requiresDedicatedAllocation,
14293  prefersDedicatedAllocation,
14294  VK_NULL_HANDLE, // dedicatedBuffer
14295  *pImage, // dedicatedImage
14296  *pAllocationCreateInfo,
14297  suballocType,
14298  pAllocation);
14299 
14300 #if VMA_RECORDING_ENABLED
14301  if(allocator->GetRecorder() != VMA_NULL)
14302  {
14303  allocator->GetRecorder()->RecordCreateImage(
14304  allocator->GetCurrentFrameIndex(),
14305  *pImageCreateInfo,
14306  *pAllocationCreateInfo,
14307  *pAllocation);
14308  }
14309 #endif
14310 
14311  if(res >= 0)
14312  {
14313  // 3. Bind image with memory.
14314  res = allocator->BindImageMemory(*pAllocation, *pImage);
14315  if(res >= 0)
14316  {
14317  // All steps succeeded.
14318  #if VMA_STATS_STRING_ENABLED
14319  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14320  #endif
14321  if(pAllocationInfo != VMA_NULL)
14322  {
14323  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14324  }
14325 
14326  return VK_SUCCESS;
14327  }
14328  allocator->FreeMemory(*pAllocation);
14329  *pAllocation = VK_NULL_HANDLE;
14330  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14331  *pImage = VK_NULL_HANDLE;
14332  return res;
14333  }
14334  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14335  *pImage = VK_NULL_HANDLE;
14336  return res;
14337  }
14338  return res;
14339 }
14340 
14341 void vmaDestroyImage(
14342  VmaAllocator allocator,
14343  VkImage image,
14344  VmaAllocation allocation)
14345 {
14346  VMA_ASSERT(allocator);
14347 
14348  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14349  {
14350  return;
14351  }
14352 
14353  VMA_DEBUG_LOG("vmaDestroyImage");
14354 
14355  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14356 
14357 #if VMA_RECORDING_ENABLED
14358  if(allocator->GetRecorder() != VMA_NULL)
14359  {
14360  allocator->GetRecorder()->RecordDestroyImage(
14361  allocator->GetCurrentFrameIndex(),
14362  allocation);
14363  }
14364 #endif
14365 
14366  if(image != VK_NULL_HANDLE)
14367  {
14368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14369  }
14370  if(allocation != VK_NULL_HANDLE)
14371  {
14372  allocator->FreeMemory(allocation);
14373  }
14374 }
14375 
14376 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1584
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1885
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1641
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1615
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2207
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1596
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1842
Definition: vk_mem_alloc.h:1945
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1588
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2307
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1638
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2552
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2096
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1485
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2188
Definition: vk_mem_alloc.h:1922
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1577
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1995
Definition: vk_mem_alloc.h:1869
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1650
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2124
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1703
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1635
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1873
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1775
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1593
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1774
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2556
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1667
VmaStatInfo total
Definition: vk_mem_alloc.h:1784
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2564
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1979
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2547
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1594
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1519
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1644
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2138
Definition: vk_mem_alloc.h:2132
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1710
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2317
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1589
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1613
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2016
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2158
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2194
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1575
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2141
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1820
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2542
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2560
Definition: vk_mem_alloc.h:1859
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2003
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1592
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1780
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1525
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1546
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1617
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1551
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2562
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1990
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2204
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1585
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1763
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2153
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1538
Definition: vk_mem_alloc.h:2128
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1929
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1776
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1542
Definition: vk_mem_alloc.h:1953
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2144
Definition: vk_mem_alloc.h:1868
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1591
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1985
Definition: vk_mem_alloc.h:1976
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1766
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1587
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2166
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1653
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2197
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1974
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2009
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1691
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1782
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1909
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1775
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1598
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1623
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1540
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1597
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2180
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1590
Definition: vk_mem_alloc.h:1940
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1631
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2331
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1647
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1775
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1772
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2185
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1949
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2312
Definition: vk_mem_alloc.h:1960
Definition: vk_mem_alloc.h:1972
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2558
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1583
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1770
Definition: vk_mem_alloc.h:1825
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2134
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1620
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1768
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1595
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1599
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1896
Definition: vk_mem_alloc.h:1967
Definition: vk_mem_alloc.h:1852
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2326
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1573
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1586
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2113
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2293
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1957
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2078
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1776
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1607
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1783
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2191
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1776
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2298