Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1460 /*
1461 Define this macro to 0/1 to disable/enable support for recording functionality,
1462 available through VmaAllocatorCreateInfo::pRecordSettings.
1463 */
1464 #ifndef VMA_RECORDING_ENABLED
1465  #ifdef _WIN32
1466  #define VMA_RECORDING_ENABLED 1
1467  #else
1468  #define VMA_RECORDING_ENABLED 0
1469  #endif
1470 #endif
1471 
1472 #ifndef NOMINMAX
1473  #define NOMINMAX // For windows.h
1474 #endif
1475 
1476 #include <vulkan/vulkan.h>
1477 
1478 #if VMA_RECORDING_ENABLED
1479  #include <windows.h>
1480 #endif
1481 
1482 #if !defined(VMA_DEDICATED_ALLOCATION)
1483  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1484  #define VMA_DEDICATED_ALLOCATION 1
1485  #else
1486  #define VMA_DEDICATED_ALLOCATION 0
1487  #endif
1488 #endif
1489 
1499 VK_DEFINE_HANDLE(VmaAllocator)
1500 
1501 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1503  VmaAllocator allocator,
1504  uint32_t memoryType,
1505  VkDeviceMemory memory,
1506  VkDeviceSize size);
1508 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1509  VmaAllocator allocator,
1510  uint32_t memoryType,
1511  VkDeviceMemory memory,
1512  VkDeviceSize size);
1513 
1527 
1557 
1560 typedef VkFlags VmaAllocatorCreateFlags;
1561 
1566 typedef struct VmaVulkanFunctions {
1567  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1568  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1569  PFN_vkAllocateMemory vkAllocateMemory;
1570  PFN_vkFreeMemory vkFreeMemory;
1571  PFN_vkMapMemory vkMapMemory;
1572  PFN_vkUnmapMemory vkUnmapMemory;
1573  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1574  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1575  PFN_vkBindBufferMemory vkBindBufferMemory;
1576  PFN_vkBindImageMemory vkBindImageMemory;
1577  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1578  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1579  PFN_vkCreateBuffer vkCreateBuffer;
1580  PFN_vkDestroyBuffer vkDestroyBuffer;
1581  PFN_vkCreateImage vkCreateImage;
1582  PFN_vkDestroyImage vkDestroyImage;
1583 #if VMA_DEDICATED_ALLOCATION
1584  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1585  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1586 #endif
1588 
1590 typedef enum VmaRecordFlagBits {
1597 
1600 typedef VkFlags VmaRecordFlags;
1601 
1603 typedef struct VmaRecordSettings
1604 {
1614  const char* pFilePath;
1616 
1619 {
1623 
1624  VkPhysicalDevice physicalDevice;
1626 
1627  VkDevice device;
1629 
1632 
1633  const VkAllocationCallbacks* pAllocationCallbacks;
1635 
1674  const VkDeviceSize* pHeapSizeLimit;
1695 
1697 VkResult vmaCreateAllocator(
1698  const VmaAllocatorCreateInfo* pCreateInfo,
1699  VmaAllocator* pAllocator);
1700 
1702 void vmaDestroyAllocator(
1703  VmaAllocator allocator);
1704 
1710  VmaAllocator allocator,
1711  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1712 
1718  VmaAllocator allocator,
1719  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1720 
1728  VmaAllocator allocator,
1729  uint32_t memoryTypeIndex,
1730  VkMemoryPropertyFlags* pFlags);
1731 
1741  VmaAllocator allocator,
1742  uint32_t frameIndex);
1743 
1746 typedef struct VmaStatInfo
1747 {
1749  uint32_t blockCount;
1755  VkDeviceSize usedBytes;
1757  VkDeviceSize unusedBytes;
1760 } VmaStatInfo;
1761 
1763 typedef struct VmaStats
1764 {
1765  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1766  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1768 } VmaStats;
1769 
1771 void vmaCalculateStats(
1772  VmaAllocator allocator,
1773  VmaStats* pStats);
1774 
1775 #define VMA_STATS_STRING_ENABLED 1
1776 
1777 #if VMA_STATS_STRING_ENABLED
1778 
1780 
1782 void vmaBuildStatsString(
1783  VmaAllocator allocator,
1784  char** ppStatsString,
1785  VkBool32 detailedMap);
1786 
1787 void vmaFreeStatsString(
1788  VmaAllocator allocator,
1789  char* pStatsString);
1790 
1791 #endif // #if VMA_STATS_STRING_ENABLED
1792 
1801 VK_DEFINE_HANDLE(VmaPool)
1802 
1803 typedef enum VmaMemoryUsage
1804 {
1853 } VmaMemoryUsage;
1854 
1869 
1924 
1937 
1947 
1954 
1958 
1960 {
1973  VkMemoryPropertyFlags requiredFlags;
1978  VkMemoryPropertyFlags preferredFlags;
1986  uint32_t memoryTypeBits;
1999  void* pUserData;
2001 
2018 VkResult vmaFindMemoryTypeIndex(
2019  VmaAllocator allocator,
2020  uint32_t memoryTypeBits,
2021  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2022  uint32_t* pMemoryTypeIndex);
2023 
2037  VmaAllocator allocator,
2038  const VkBufferCreateInfo* pBufferCreateInfo,
2039  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2040  uint32_t* pMemoryTypeIndex);
2041 
2055  VmaAllocator allocator,
2056  const VkImageCreateInfo* pImageCreateInfo,
2057  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2058  uint32_t* pMemoryTypeIndex);
2059 
2080 
2097 
2108 
2114 
2117 typedef VkFlags VmaPoolCreateFlags;
2118 
2121 typedef struct VmaPoolCreateInfo {
2136  VkDeviceSize blockSize;
2165 
2168 typedef struct VmaPoolStats {
2171  VkDeviceSize size;
2174  VkDeviceSize unusedSize;
2187  VkDeviceSize unusedRangeSizeMax;
2190  size_t blockCount;
2191 } VmaPoolStats;
2192 
2199 VkResult vmaCreatePool(
2200  VmaAllocator allocator,
2201  const VmaPoolCreateInfo* pCreateInfo,
2202  VmaPool* pPool);
2203 
2206 void vmaDestroyPool(
2207  VmaAllocator allocator,
2208  VmaPool pool);
2209 
2216 void vmaGetPoolStats(
2217  VmaAllocator allocator,
2218  VmaPool pool,
2219  VmaPoolStats* pPoolStats);
2220 
2228  VmaAllocator allocator,
2229  VmaPool pool,
2230  size_t* pLostAllocationCount);
2231 
2246 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2247 
2272 VK_DEFINE_HANDLE(VmaAllocation)
2273 
2274 
2276 typedef struct VmaAllocationInfo {
2281  uint32_t memoryType;
2290  VkDeviceMemory deviceMemory;
2295  VkDeviceSize offset;
2300  VkDeviceSize size;
2314  void* pUserData;
2316 
2327 VkResult vmaAllocateMemory(
2328  VmaAllocator allocator,
2329  const VkMemoryRequirements* pVkMemoryRequirements,
2330  const VmaAllocationCreateInfo* pCreateInfo,
2331  VmaAllocation* pAllocation,
2332  VmaAllocationInfo* pAllocationInfo);
2333 
2341  VmaAllocator allocator,
2342  VkBuffer buffer,
2343  const VmaAllocationCreateInfo* pCreateInfo,
2344  VmaAllocation* pAllocation,
2345  VmaAllocationInfo* pAllocationInfo);
2346 
2348 VkResult vmaAllocateMemoryForImage(
2349  VmaAllocator allocator,
2350  VkImage image,
2351  const VmaAllocationCreateInfo* pCreateInfo,
2352  VmaAllocation* pAllocation,
2353  VmaAllocationInfo* pAllocationInfo);
2354 
2356 void vmaFreeMemory(
2357  VmaAllocator allocator,
2358  VmaAllocation allocation);
2359 
2377  VmaAllocator allocator,
2378  VmaAllocation allocation,
2379  VmaAllocationInfo* pAllocationInfo);
2380 
2395 VkBool32 vmaTouchAllocation(
2396  VmaAllocator allocator,
2397  VmaAllocation allocation);
2398 
2413  VmaAllocator allocator,
2414  VmaAllocation allocation,
2415  void* pUserData);
2416 
2428  VmaAllocator allocator,
2429  VmaAllocation* pAllocation);
2430 
2465 VkResult vmaMapMemory(
2466  VmaAllocator allocator,
2467  VmaAllocation allocation,
2468  void** ppData);
2469 
2474 void vmaUnmapMemory(
2475  VmaAllocator allocator,
2476  VmaAllocation allocation);
2477 
2490 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2491 
2504 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2505 
2522 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2523 
2525 typedef struct VmaDefragmentationInfo {
2530  VkDeviceSize maxBytesToMove;
2537 
2539 typedef struct VmaDefragmentationStats {
2541  VkDeviceSize bytesMoved;
2543  VkDeviceSize bytesFreed;
2549 
2588 VkResult vmaDefragment(
2589  VmaAllocator allocator,
2590  VmaAllocation* pAllocations,
2591  size_t allocationCount,
2592  VkBool32* pAllocationsChanged,
2593  const VmaDefragmentationInfo *pDefragmentationInfo,
2594  VmaDefragmentationStats* pDefragmentationStats);
2595 
2608 VkResult vmaBindBufferMemory(
2609  VmaAllocator allocator,
2610  VmaAllocation allocation,
2611  VkBuffer buffer);
2612 
2625 VkResult vmaBindImageMemory(
2626  VmaAllocator allocator,
2627  VmaAllocation allocation,
2628  VkImage image);
2629 
2656 VkResult vmaCreateBuffer(
2657  VmaAllocator allocator,
2658  const VkBufferCreateInfo* pBufferCreateInfo,
2659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2660  VkBuffer* pBuffer,
2661  VmaAllocation* pAllocation,
2662  VmaAllocationInfo* pAllocationInfo);
2663 
2675 void vmaDestroyBuffer(
2676  VmaAllocator allocator,
2677  VkBuffer buffer,
2678  VmaAllocation allocation);
2679 
2681 VkResult vmaCreateImage(
2682  VmaAllocator allocator,
2683  const VkImageCreateInfo* pImageCreateInfo,
2684  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2685  VkImage* pImage,
2686  VmaAllocation* pAllocation,
2687  VmaAllocationInfo* pAllocationInfo);
2688 
2700 void vmaDestroyImage(
2701  VmaAllocator allocator,
2702  VkImage image,
2703  VmaAllocation allocation);
2704 
2705 #ifdef __cplusplus
2706 }
2707 #endif
2708 
2709 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2710 
2711 // For Visual Studio IntelliSense.
2712 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2713 #define VMA_IMPLEMENTATION
2714 #endif
2715 
2716 #ifdef VMA_IMPLEMENTATION
2717 #undef VMA_IMPLEMENTATION
2718 
2719 #include <cstdint>
2720 #include <cstdlib>
2721 #include <cstring>
2722 
2723 /*******************************************************************************
2724 CONFIGURATION SECTION
2725 
2726 Define some of these macros before each #include of this header or change them
2727 here if you need other then default behavior depending on your environment.
2728 */
2729 
2730 /*
2731 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2732 internally, like:
2733 
2734  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2735 
2736 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2737 VmaAllocatorCreateInfo::pVulkanFunctions.
2738 */
2739 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2740 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2741 #endif
2742 
2743 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2744 //#define VMA_USE_STL_CONTAINERS 1
2745 
2746 /* Set this macro to 1 to make the library including and using STL containers:
2747 std::pair, std::vector, std::list, std::unordered_map.
2748 
2749 Set it to 0 or undefined to make the library using its own implementation of
2750 the containers.
2751 */
2752 #if VMA_USE_STL_CONTAINERS
2753  #define VMA_USE_STL_VECTOR 1
2754  #define VMA_USE_STL_UNORDERED_MAP 1
2755  #define VMA_USE_STL_LIST 1
2756 #endif
2757 
2758 #if VMA_USE_STL_VECTOR
2759  #include <vector>
2760 #endif
2761 
2762 #if VMA_USE_STL_UNORDERED_MAP
2763  #include <unordered_map>
2764 #endif
2765 
2766 #if VMA_USE_STL_LIST
2767  #include <list>
2768 #endif
2769 
2770 /*
2771 Following headers are used in this CONFIGURATION section only, so feel free to
2772 remove them if not needed.
2773 */
2774 #include <cassert> // for assert
2775 #include <algorithm> // for min, max
2776 #include <mutex> // for std::mutex
2777 #include <atomic> // for std::atomic
2778 
2779 #ifndef VMA_NULL
2780  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2781  #define VMA_NULL nullptr
2782 #endif
2783 
2784 #if defined(__APPLE__) || defined(__ANDROID__)
2785 #include <cstdlib>
2786 void *aligned_alloc(size_t alignment, size_t size)
2787 {
2788  // alignment must be >= sizeof(void*)
2789  if(alignment < sizeof(void*))
2790  {
2791  alignment = sizeof(void*);
2792  }
2793 
2794  void *pointer;
2795  if(posix_memalign(&pointer, alignment, size) == 0)
2796  return pointer;
2797  return VMA_NULL;
2798 }
2799 #endif
2800 
2801 // If your compiler is not compatible with C++11 and definition of
2802 // aligned_alloc() function is missing, uncommeting following line may help:
2803 
2804 //#include <malloc.h>
2805 
2806 // Normal assert to check for programmer's errors, especially in Debug configuration.
2807 #ifndef VMA_ASSERT
2808  #ifdef _DEBUG
2809  #define VMA_ASSERT(expr) assert(expr)
2810  #else
2811  #define VMA_ASSERT(expr)
2812  #endif
2813 #endif
2814 
2815 // Assert that will be called very often, like inside data structures e.g. operator[].
2816 // Making it non-empty can make program slow.
2817 #ifndef VMA_HEAVY_ASSERT
2818  #ifdef _DEBUG
2819  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2820  #else
2821  #define VMA_HEAVY_ASSERT(expr)
2822  #endif
2823 #endif
2824 
2825 #ifndef VMA_ALIGN_OF
2826  #define VMA_ALIGN_OF(type) (__alignof(type))
2827 #endif
2828 
2829 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2830  #if defined(_WIN32)
2831  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2832  #else
2833  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2834  #endif
2835 #endif
2836 
2837 #ifndef VMA_SYSTEM_FREE
2838  #if defined(_WIN32)
2839  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2840  #else
2841  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2842  #endif
2843 #endif
2844 
2845 #ifndef VMA_MIN
2846  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2847 #endif
2848 
2849 #ifndef VMA_MAX
2850  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2851 #endif
2852 
2853 #ifndef VMA_SWAP
2854  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2855 #endif
2856 
2857 #ifndef VMA_SORT
2858  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2859 #endif
2860 
2861 #ifndef VMA_DEBUG_LOG
2862  #define VMA_DEBUG_LOG(format, ...)
2863  /*
2864  #define VMA_DEBUG_LOG(format, ...) do { \
2865  printf(format, __VA_ARGS__); \
2866  printf("\n"); \
2867  } while(false)
2868  */
2869 #endif
2870 
2871 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2872 #if VMA_STATS_STRING_ENABLED
2873  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2874  {
2875  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2876  }
2877  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2878  {
2879  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2880  }
2881  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2882  {
2883  snprintf(outStr, strLen, "%p", ptr);
2884  }
2885 #endif
2886 
2887 #ifndef VMA_MUTEX
2888  class VmaMutex
2889  {
2890  public:
2891  VmaMutex() { }
2892  ~VmaMutex() { }
2893  void Lock() { m_Mutex.lock(); }
2894  void Unlock() { m_Mutex.unlock(); }
2895  private:
2896  std::mutex m_Mutex;
2897  };
2898  #define VMA_MUTEX VmaMutex
2899 #endif
2900 
2901 /*
2902 If providing your own implementation, you need to implement a subset of std::atomic:
2903 
2904 - Constructor(uint32_t desired)
2905 - uint32_t load() const
2906 - void store(uint32_t desired)
2907 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2908 */
2909 #ifndef VMA_ATOMIC_UINT32
2910  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2911 #endif
2912 
2913 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2914 
2918  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2919 #endif
2920 
2921 #ifndef VMA_DEBUG_ALIGNMENT
2922 
2926  #define VMA_DEBUG_ALIGNMENT (1)
2927 #endif
2928 
2929 #ifndef VMA_DEBUG_MARGIN
2930 
2934  #define VMA_DEBUG_MARGIN (0)
2935 #endif
2936 
2937 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2938 
2942  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2943 #endif
2944 
2945 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2946 
2951  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2952 #endif
2953 
2954 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2955 
2959  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2960 #endif
2961 
2962 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2963 
2967  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2968 #endif
2969 
2970 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2971  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2973 #endif
2974 
2975 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2976  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2978 #endif
2979 
2980 #ifndef VMA_CLASS_NO_COPY
2981  #define VMA_CLASS_NO_COPY(className) \
2982  private: \
2983  className(const className&) = delete; \
2984  className& operator=(const className&) = delete;
2985 #endif
2986 
2987 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2988 
2989 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2990 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2991 
2992 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2993 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2994 
2995 /*******************************************************************************
2996 END OF CONFIGURATION
2997 */
2998 
2999 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3000  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3001 
3002 // Returns number of bits set to 1 in (v).
3003 static inline uint32_t VmaCountBitsSet(uint32_t v)
3004 {
3005  uint32_t c = v - ((v >> 1) & 0x55555555);
3006  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3007  c = ((c >> 4) + c) & 0x0F0F0F0F;
3008  c = ((c >> 8) + c) & 0x00FF00FF;
3009  c = ((c >> 16) + c) & 0x0000FFFF;
3010  return c;
3011 }
3012 
3013 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3014 // Use types like uint32_t, uint64_t as T.
3015 template <typename T>
3016 static inline T VmaAlignUp(T val, T align)
3017 {
3018  return (val + align - 1) / align * align;
3019 }
3020 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3021 // Use types like uint32_t, uint64_t as T.
3022 template <typename T>
3023 static inline T VmaAlignDown(T val, T align)
3024 {
3025  return val / align * align;
3026 }
3027 
3028 // Division with mathematical rounding to nearest number.
3029 template <typename T>
3030 static inline T VmaRoundDiv(T x, T y)
3031 {
3032  return (x + (y / (T)2)) / y;
3033 }
3034 
3035 /*
3036 Returns true if given number is a power of two.
3037 T must be unsigned integer number or signed integer but always nonnegative.
3038 For 0 returns true.
3039 */
3040 template <typename T>
3041 inline bool VmaIsPow2(T x)
3042 {
3043  return (x & (x-1)) == 0;
3044 }
3045 
3046 // Returns smallest power of 2 greater or equal to v.
3047 static inline uint32_t VmaNextPow2(uint32_t v)
3048 {
3049  v--;
3050  v |= v >> 1;
3051  v |= v >> 2;
3052  v |= v >> 4;
3053  v |= v >> 8;
3054  v |= v >> 16;
3055  v++;
3056  return v;
3057 }
3058 static inline uint64_t VmaNextPow2(uint64_t v)
3059 {
3060  v--;
3061  v |= v >> 1;
3062  v |= v >> 2;
3063  v |= v >> 4;
3064  v |= v >> 8;
3065  v |= v >> 16;
3066  v |= v >> 32;
3067  v++;
3068  return v;
3069 }
3070 
3071 // Returns largest power of 2 less or equal to v.
3072 static inline uint32_t VmaPrevPow2(uint32_t v)
3073 {
3074  v |= v >> 1;
3075  v |= v >> 2;
3076  v |= v >> 4;
3077  v |= v >> 8;
3078  v |= v >> 16;
3079  v = v ^ (v >> 1);
3080  return v;
3081 }
3082 static inline uint64_t VmaPrevPow2(uint64_t v)
3083 {
3084  v |= v >> 1;
3085  v |= v >> 2;
3086  v |= v >> 4;
3087  v |= v >> 8;
3088  v |= v >> 16;
3089  v |= v >> 32;
3090  v = v ^ (v >> 1);
3091  return v;
3092 }
3093 
3094 static inline bool VmaStrIsEmpty(const char* pStr)
3095 {
3096  return pStr == VMA_NULL || *pStr == '\0';
3097 }
3098 
3099 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3100 {
3101  switch(algorithm)
3102  {
3104  return "Linear";
3106  return "Buddy";
3107  case 0:
3108  return "Default";
3109  default:
3110  VMA_ASSERT(0);
3111  return "";
3112  }
3113 }
3114 
3115 #ifndef VMA_SORT
3116 
3117 template<typename Iterator, typename Compare>
3118 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3119 {
3120  Iterator centerValue = end; --centerValue;
3121  Iterator insertIndex = beg;
3122  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3123  {
3124  if(cmp(*memTypeIndex, *centerValue))
3125  {
3126  if(insertIndex != memTypeIndex)
3127  {
3128  VMA_SWAP(*memTypeIndex, *insertIndex);
3129  }
3130  ++insertIndex;
3131  }
3132  }
3133  if(insertIndex != centerValue)
3134  {
3135  VMA_SWAP(*insertIndex, *centerValue);
3136  }
3137  return insertIndex;
3138 }
3139 
3140 template<typename Iterator, typename Compare>
3141 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3142 {
3143  if(beg < end)
3144  {
3145  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3146  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3147  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3148  }
3149 }
3150 
3151 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3152 
3153 #endif // #ifndef VMA_SORT
3154 
3155 /*
3156 Returns true if two memory blocks occupy overlapping pages.
3157 ResourceA must be in less memory offset than ResourceB.
3158 
3159 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3160 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3161 */
3162 static inline bool VmaBlocksOnSamePage(
3163  VkDeviceSize resourceAOffset,
3164  VkDeviceSize resourceASize,
3165  VkDeviceSize resourceBOffset,
3166  VkDeviceSize pageSize)
3167 {
3168  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3169  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3170  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3171  VkDeviceSize resourceBStart = resourceBOffset;
3172  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3173  return resourceAEndPage == resourceBStartPage;
3174 }
3175 
3176 enum VmaSuballocationType
3177 {
3178  VMA_SUBALLOCATION_TYPE_FREE = 0,
3179  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3180  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3181  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3182  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3183  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3184  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3185 };
3186 
3187 /*
3188 Returns true if given suballocation types could conflict and must respect
3189 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3190 or linear image and another one is optimal image. If type is unknown, behave
3191 conservatively.
3192 */
3193 static inline bool VmaIsBufferImageGranularityConflict(
3194  VmaSuballocationType suballocType1,
3195  VmaSuballocationType suballocType2)
3196 {
3197  if(suballocType1 > suballocType2)
3198  {
3199  VMA_SWAP(suballocType1, suballocType2);
3200  }
3201 
3202  switch(suballocType1)
3203  {
3204  case VMA_SUBALLOCATION_TYPE_FREE:
3205  return false;
3206  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3207  return true;
3208  case VMA_SUBALLOCATION_TYPE_BUFFER:
3209  return
3210  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3211  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3212  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3213  return
3214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3216  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3217  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3218  return
3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3220  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3221  return false;
3222  default:
3223  VMA_ASSERT(0);
3224  return true;
3225  }
3226 }
3227 
3228 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3229 {
3230  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3231  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3232  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3233  {
3234  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3235  }
3236 }
3237 
3238 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3239 {
3240  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3241  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3242  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3243  {
3244  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3245  {
3246  return false;
3247  }
3248  }
3249  return true;
3250 }
3251 
3252 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3253 struct VmaMutexLock
3254 {
3255  VMA_CLASS_NO_COPY(VmaMutexLock)
3256 public:
3257  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3258  m_pMutex(useMutex ? &mutex : VMA_NULL)
3259  {
3260  if(m_pMutex)
3261  {
3262  m_pMutex->Lock();
3263  }
3264  }
3265 
3266  ~VmaMutexLock()
3267  {
3268  if(m_pMutex)
3269  {
3270  m_pMutex->Unlock();
3271  }
3272  }
3273 
3274 private:
3275  VMA_MUTEX* m_pMutex;
3276 };
3277 
3278 #if VMA_DEBUG_GLOBAL_MUTEX
3279  static VMA_MUTEX gDebugGlobalMutex;
3280  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3281 #else
3282  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3283 #endif
3284 
3285 // Minimum size of a free suballocation to register it in the free suballocation collection.
3286 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3287 
3288 /*
3289 Performs binary search and returns iterator to first element that is greater or
3290 equal to (key), according to comparison (cmp).
3291 
3292 Cmp should return true if first argument is less than second argument.
3293 
3294 Returned value is the found element, if present in the collection or place where
3295 new element with value (key) should be inserted.
3296 */
3297 template <typename CmpLess, typename IterT, typename KeyT>
3298 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3299 {
3300  size_t down = 0, up = (end - beg);
3301  while(down < up)
3302  {
3303  const size_t mid = (down + up) / 2;
3304  if(cmp(*(beg+mid), key))
3305  {
3306  down = mid + 1;
3307  }
3308  else
3309  {
3310  up = mid;
3311  }
3312  }
3313  return beg + down;
3314 }
3315 
3317 // Memory allocation
3318 
3319 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3320 {
3321  if((pAllocationCallbacks != VMA_NULL) &&
3322  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3323  {
3324  return (*pAllocationCallbacks->pfnAllocation)(
3325  pAllocationCallbacks->pUserData,
3326  size,
3327  alignment,
3328  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3329  }
3330  else
3331  {
3332  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3333  }
3334 }
3335 
3336 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3337 {
3338  if((pAllocationCallbacks != VMA_NULL) &&
3339  (pAllocationCallbacks->pfnFree != VMA_NULL))
3340  {
3341  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3342  }
3343  else
3344  {
3345  VMA_SYSTEM_FREE(ptr);
3346  }
3347 }
3348 
3349 template<typename T>
3350 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3351 {
3352  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3353 }
3354 
3355 template<typename T>
3356 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3357 {
3358  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3359 }
3360 
3361 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3362 
3363 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3364 
3365 template<typename T>
3366 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3367 {
3368  ptr->~T();
3369  VmaFree(pAllocationCallbacks, ptr);
3370 }
3371 
3372 template<typename T>
3373 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3374 {
3375  if(ptr != VMA_NULL)
3376  {
3377  for(size_t i = count; i--; )
3378  {
3379  ptr[i].~T();
3380  }
3381  VmaFree(pAllocationCallbacks, ptr);
3382  }
3383 }
3384 
3385 // STL-compatible allocator.
3386 template<typename T>
3387 class VmaStlAllocator
3388 {
3389 public:
3390  const VkAllocationCallbacks* const m_pCallbacks;
3391  typedef T value_type;
3392 
3393  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3394  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3395 
3396  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3397  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3398 
3399  template<typename U>
3400  bool operator==(const VmaStlAllocator<U>& rhs) const
3401  {
3402  return m_pCallbacks == rhs.m_pCallbacks;
3403  }
3404  template<typename U>
3405  bool operator!=(const VmaStlAllocator<U>& rhs) const
3406  {
3407  return m_pCallbacks != rhs.m_pCallbacks;
3408  }
3409 
3410  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3411 };
3412 
3413 #if VMA_USE_STL_VECTOR
3414 
3415 #define VmaVector std::vector
3416 
3417 template<typename T, typename allocatorT>
3418 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3419 {
3420  vec.insert(vec.begin() + index, item);
3421 }
3422 
3423 template<typename T, typename allocatorT>
3424 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3425 {
3426  vec.erase(vec.begin() + index);
3427 }
3428 
3429 #else // #if VMA_USE_STL_VECTOR
3430 
3431 /* Class with interface compatible with subset of std::vector.
3432 T must be POD because constructors and destructors are not called and memcpy is
3433 used for these objects. */
3434 template<typename T, typename AllocatorT>
3435 class VmaVector
3436 {
3437 public:
3438  typedef T value_type;
3439 
3440  VmaVector(const AllocatorT& allocator) :
3441  m_Allocator(allocator),
3442  m_pArray(VMA_NULL),
3443  m_Count(0),
3444  m_Capacity(0)
3445  {
3446  }
3447 
3448  VmaVector(size_t count, const AllocatorT& allocator) :
3449  m_Allocator(allocator),
3450  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3451  m_Count(count),
3452  m_Capacity(count)
3453  {
3454  }
3455 
3456  VmaVector(const VmaVector<T, AllocatorT>& src) :
3457  m_Allocator(src.m_Allocator),
3458  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3459  m_Count(src.m_Count),
3460  m_Capacity(src.m_Count)
3461  {
3462  if(m_Count != 0)
3463  {
3464  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3465  }
3466  }
3467 
3468  ~VmaVector()
3469  {
3470  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3471  }
3472 
3473  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3474  {
3475  if(&rhs != this)
3476  {
3477  resize(rhs.m_Count);
3478  if(m_Count != 0)
3479  {
3480  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3481  }
3482  }
3483  return *this;
3484  }
3485 
3486  bool empty() const { return m_Count == 0; }
3487  size_t size() const { return m_Count; }
3488  T* data() { return m_pArray; }
3489  const T* data() const { return m_pArray; }
3490 
3491  T& operator[](size_t index)
3492  {
3493  VMA_HEAVY_ASSERT(index < m_Count);
3494  return m_pArray[index];
3495  }
3496  const T& operator[](size_t index) const
3497  {
3498  VMA_HEAVY_ASSERT(index < m_Count);
3499  return m_pArray[index];
3500  }
3501 
3502  T& front()
3503  {
3504  VMA_HEAVY_ASSERT(m_Count > 0);
3505  return m_pArray[0];
3506  }
3507  const T& front() const
3508  {
3509  VMA_HEAVY_ASSERT(m_Count > 0);
3510  return m_pArray[0];
3511  }
3512  T& back()
3513  {
3514  VMA_HEAVY_ASSERT(m_Count > 0);
3515  return m_pArray[m_Count - 1];
3516  }
3517  const T& back() const
3518  {
3519  VMA_HEAVY_ASSERT(m_Count > 0);
3520  return m_pArray[m_Count - 1];
3521  }
3522 
3523  void reserve(size_t newCapacity, bool freeMemory = false)
3524  {
3525  newCapacity = VMA_MAX(newCapacity, m_Count);
3526 
3527  if((newCapacity < m_Capacity) && !freeMemory)
3528  {
3529  newCapacity = m_Capacity;
3530  }
3531 
3532  if(newCapacity != m_Capacity)
3533  {
3534  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3535  if(m_Count != 0)
3536  {
3537  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3538  }
3539  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3540  m_Capacity = newCapacity;
3541  m_pArray = newArray;
3542  }
3543  }
3544 
3545  void resize(size_t newCount, bool freeMemory = false)
3546  {
3547  size_t newCapacity = m_Capacity;
3548  if(newCount > m_Capacity)
3549  {
3550  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3551  }
3552  else if(freeMemory)
3553  {
3554  newCapacity = newCount;
3555  }
3556 
3557  if(newCapacity != m_Capacity)
3558  {
3559  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3560  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3561  if(elementsToCopy != 0)
3562  {
3563  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3564  }
3565  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3566  m_Capacity = newCapacity;
3567  m_pArray = newArray;
3568  }
3569 
3570  m_Count = newCount;
3571  }
3572 
3573  void clear(bool freeMemory = false)
3574  {
3575  resize(0, freeMemory);
3576  }
3577 
3578  void insert(size_t index, const T& src)
3579  {
3580  VMA_HEAVY_ASSERT(index <= m_Count);
3581  const size_t oldCount = size();
3582  resize(oldCount + 1);
3583  if(index < oldCount)
3584  {
3585  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3586  }
3587  m_pArray[index] = src;
3588  }
3589 
3590  void remove(size_t index)
3591  {
3592  VMA_HEAVY_ASSERT(index < m_Count);
3593  const size_t oldCount = size();
3594  if(index < oldCount - 1)
3595  {
3596  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3597  }
3598  resize(oldCount - 1);
3599  }
3600 
3601  void push_back(const T& src)
3602  {
3603  const size_t newIndex = size();
3604  resize(newIndex + 1);
3605  m_pArray[newIndex] = src;
3606  }
3607 
3608  void pop_back()
3609  {
3610  VMA_HEAVY_ASSERT(m_Count > 0);
3611  resize(size() - 1);
3612  }
3613 
3614  void push_front(const T& src)
3615  {
3616  insert(0, src);
3617  }
3618 
3619  void pop_front()
3620  {
3621  VMA_HEAVY_ASSERT(m_Count > 0);
3622  remove(0);
3623  }
3624 
3625  typedef T* iterator;
3626 
3627  iterator begin() { return m_pArray; }
3628  iterator end() { return m_pArray + m_Count; }
3629 
3630 private:
3631  AllocatorT m_Allocator;
3632  T* m_pArray;
3633  size_t m_Count;
3634  size_t m_Capacity;
3635 };
3636 
3637 template<typename T, typename allocatorT>
3638 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3639 {
3640  vec.insert(index, item);
3641 }
3642 
3643 template<typename T, typename allocatorT>
3644 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3645 {
3646  vec.remove(index);
3647 }
3648 
3649 #endif // #if VMA_USE_STL_VECTOR
3650 
3651 template<typename CmpLess, typename VectorT>
3652 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3653 {
3654  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3655  vector.data(),
3656  vector.data() + vector.size(),
3657  value,
3658  CmpLess()) - vector.data();
3659  VmaVectorInsert(vector, indexToInsert, value);
3660  return indexToInsert;
3661 }
3662 
3663 template<typename CmpLess, typename VectorT>
3664 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3665 {
3666  CmpLess comparator;
3667  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3668  vector.begin(),
3669  vector.end(),
3670  value,
3671  comparator);
3672  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3673  {
3674  size_t indexToRemove = it - vector.begin();
3675  VmaVectorRemove(vector, indexToRemove);
3676  return true;
3677  }
3678  return false;
3679 }
3680 
3681 template<typename CmpLess, typename IterT, typename KeyT>
3682 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3683 {
3684  CmpLess comparator;
3685  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3686  beg, end, value, comparator);
3687  if(it == end ||
3688  (!comparator(*it, value) && !comparator(value, *it)))
3689  {
3690  return it;
3691  }
3692  return end;
3693 }
3694 
3696 // class VmaPoolAllocator
3697 
3698 /*
3699 Allocator for objects of type T using a list of arrays (pools) to speed up
3700 allocation. Number of elements that can be allocated is not bounded because
3701 allocator can create multiple blocks.
3702 */
3703 template<typename T>
3704 class VmaPoolAllocator
3705 {
3706  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3707 public:
3708  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3709  ~VmaPoolAllocator();
3710  void Clear();
3711  T* Alloc();
3712  void Free(T* ptr);
3713 
3714 private:
3715  union Item
3716  {
3717  uint32_t NextFreeIndex;
3718  T Value;
3719  };
3720 
3721  struct ItemBlock
3722  {
3723  Item* pItems;
3724  uint32_t FirstFreeIndex;
3725  };
3726 
3727  const VkAllocationCallbacks* m_pAllocationCallbacks;
3728  size_t m_ItemsPerBlock;
3729  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3730 
3731  ItemBlock& CreateNewBlock();
3732 };
3733 
3734 template<typename T>
3735 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3736  m_pAllocationCallbacks(pAllocationCallbacks),
3737  m_ItemsPerBlock(itemsPerBlock),
3738  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3739 {
3740  VMA_ASSERT(itemsPerBlock > 0);
3741 }
3742 
3743 template<typename T>
3744 VmaPoolAllocator<T>::~VmaPoolAllocator()
3745 {
3746  Clear();
3747 }
3748 
3749 template<typename T>
3750 void VmaPoolAllocator<T>::Clear()
3751 {
3752  for(size_t i = m_ItemBlocks.size(); i--; )
3753  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3754  m_ItemBlocks.clear();
3755 }
3756 
3757 template<typename T>
3758 T* VmaPoolAllocator<T>::Alloc()
3759 {
3760  for(size_t i = m_ItemBlocks.size(); i--; )
3761  {
3762  ItemBlock& block = m_ItemBlocks[i];
3763  // This block has some free items: Use first one.
3764  if(block.FirstFreeIndex != UINT32_MAX)
3765  {
3766  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3767  block.FirstFreeIndex = pItem->NextFreeIndex;
3768  return &pItem->Value;
3769  }
3770  }
3771 
3772  // No block has free item: Create new one and use it.
3773  ItemBlock& newBlock = CreateNewBlock();
3774  Item* const pItem = &newBlock.pItems[0];
3775  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3776  return &pItem->Value;
3777 }
3778 
3779 template<typename T>
3780 void VmaPoolAllocator<T>::Free(T* ptr)
3781 {
3782  // Search all memory blocks to find ptr.
3783  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3784  {
3785  ItemBlock& block = m_ItemBlocks[i];
3786 
3787  // Casting to union.
3788  Item* pItemPtr;
3789  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3790 
3791  // Check if pItemPtr is in address range of this block.
3792  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3793  {
3794  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3795  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3796  block.FirstFreeIndex = index;
3797  return;
3798  }
3799  }
3800  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3801 }
3802 
3803 template<typename T>
3804 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3805 {
3806  ItemBlock newBlock = {
3807  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3808 
3809  m_ItemBlocks.push_back(newBlock);
3810 
3811  // Setup singly-linked list of all free items in this block.
3812  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3813  newBlock.pItems[i].NextFreeIndex = i + 1;
3814  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3815  return m_ItemBlocks.back();
3816 }
3817 
3819 // class VmaRawList, VmaList
3820 
3821 #if VMA_USE_STL_LIST
3822 
3823 #define VmaList std::list
3824 
3825 #else // #if VMA_USE_STL_LIST
3826 
3827 template<typename T>
3828 struct VmaListItem
3829 {
3830  VmaListItem* pPrev;
3831  VmaListItem* pNext;
3832  T Value;
3833 };
3834 
3835 // Doubly linked list.
3836 template<typename T>
3837 class VmaRawList
3838 {
3839  VMA_CLASS_NO_COPY(VmaRawList)
3840 public:
3841  typedef VmaListItem<T> ItemType;
3842 
3843  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3844  ~VmaRawList();
3845  void Clear();
3846 
3847  size_t GetCount() const { return m_Count; }
3848  bool IsEmpty() const { return m_Count == 0; }
3849 
3850  ItemType* Front() { return m_pFront; }
3851  const ItemType* Front() const { return m_pFront; }
3852  ItemType* Back() { return m_pBack; }
3853  const ItemType* Back() const { return m_pBack; }
3854 
3855  ItemType* PushBack();
3856  ItemType* PushFront();
3857  ItemType* PushBack(const T& value);
3858  ItemType* PushFront(const T& value);
3859  void PopBack();
3860  void PopFront();
3861 
3862  // Item can be null - it means PushBack.
3863  ItemType* InsertBefore(ItemType* pItem);
3864  // Item can be null - it means PushFront.
3865  ItemType* InsertAfter(ItemType* pItem);
3866 
3867  ItemType* InsertBefore(ItemType* pItem, const T& value);
3868  ItemType* InsertAfter(ItemType* pItem, const T& value);
3869 
3870  void Remove(ItemType* pItem);
3871 
3872 private:
3873  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3874  VmaPoolAllocator<ItemType> m_ItemAllocator;
3875  ItemType* m_pFront;
3876  ItemType* m_pBack;
3877  size_t m_Count;
3878 };
3879 
3880 template<typename T>
3881 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3882  m_pAllocationCallbacks(pAllocationCallbacks),
3883  m_ItemAllocator(pAllocationCallbacks, 128),
3884  m_pFront(VMA_NULL),
3885  m_pBack(VMA_NULL),
3886  m_Count(0)
3887 {
3888 }
3889 
3890 template<typename T>
3891 VmaRawList<T>::~VmaRawList()
3892 {
3893  // Intentionally not calling Clear, because that would be unnecessary
3894  // computations to return all items to m_ItemAllocator as free.
3895 }
3896 
3897 template<typename T>
3898 void VmaRawList<T>::Clear()
3899 {
3900  if(IsEmpty() == false)
3901  {
3902  ItemType* pItem = m_pBack;
3903  while(pItem != VMA_NULL)
3904  {
3905  ItemType* const pPrevItem = pItem->pPrev;
3906  m_ItemAllocator.Free(pItem);
3907  pItem = pPrevItem;
3908  }
3909  m_pFront = VMA_NULL;
3910  m_pBack = VMA_NULL;
3911  m_Count = 0;
3912  }
3913 }
3914 
3915 template<typename T>
3916 VmaListItem<T>* VmaRawList<T>::PushBack()
3917 {
3918  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3919  pNewItem->pNext = VMA_NULL;
3920  if(IsEmpty())
3921  {
3922  pNewItem->pPrev = VMA_NULL;
3923  m_pFront = pNewItem;
3924  m_pBack = pNewItem;
3925  m_Count = 1;
3926  }
3927  else
3928  {
3929  pNewItem->pPrev = m_pBack;
3930  m_pBack->pNext = pNewItem;
3931  m_pBack = pNewItem;
3932  ++m_Count;
3933  }
3934  return pNewItem;
3935 }
3936 
3937 template<typename T>
3938 VmaListItem<T>* VmaRawList<T>::PushFront()
3939 {
3940  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3941  pNewItem->pPrev = VMA_NULL;
3942  if(IsEmpty())
3943  {
3944  pNewItem->pNext = VMA_NULL;
3945  m_pFront = pNewItem;
3946  m_pBack = pNewItem;
3947  m_Count = 1;
3948  }
3949  else
3950  {
3951  pNewItem->pNext = m_pFront;
3952  m_pFront->pPrev = pNewItem;
3953  m_pFront = pNewItem;
3954  ++m_Count;
3955  }
3956  return pNewItem;
3957 }
3958 
3959 template<typename T>
3960 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3961 {
3962  ItemType* const pNewItem = PushBack();
3963  pNewItem->Value = value;
3964  return pNewItem;
3965 }
3966 
3967 template<typename T>
3968 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3969 {
3970  ItemType* const pNewItem = PushFront();
3971  pNewItem->Value = value;
3972  return pNewItem;
3973 }
3974 
3975 template<typename T>
3976 void VmaRawList<T>::PopBack()
3977 {
3978  VMA_HEAVY_ASSERT(m_Count > 0);
3979  ItemType* const pBackItem = m_pBack;
3980  ItemType* const pPrevItem = pBackItem->pPrev;
3981  if(pPrevItem != VMA_NULL)
3982  {
3983  pPrevItem->pNext = VMA_NULL;
3984  }
3985  m_pBack = pPrevItem;
3986  m_ItemAllocator.Free(pBackItem);
3987  --m_Count;
3988 }
3989 
3990 template<typename T>
3991 void VmaRawList<T>::PopFront()
3992 {
3993  VMA_HEAVY_ASSERT(m_Count > 0);
3994  ItemType* const pFrontItem = m_pFront;
3995  ItemType* const pNextItem = pFrontItem->pNext;
3996  if(pNextItem != VMA_NULL)
3997  {
3998  pNextItem->pPrev = VMA_NULL;
3999  }
4000  m_pFront = pNextItem;
4001  m_ItemAllocator.Free(pFrontItem);
4002  --m_Count;
4003 }
4004 
4005 template<typename T>
4006 void VmaRawList<T>::Remove(ItemType* pItem)
4007 {
4008  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4009  VMA_HEAVY_ASSERT(m_Count > 0);
4010 
4011  if(pItem->pPrev != VMA_NULL)
4012  {
4013  pItem->pPrev->pNext = pItem->pNext;
4014  }
4015  else
4016  {
4017  VMA_HEAVY_ASSERT(m_pFront == pItem);
4018  m_pFront = pItem->pNext;
4019  }
4020 
4021  if(pItem->pNext != VMA_NULL)
4022  {
4023  pItem->pNext->pPrev = pItem->pPrev;
4024  }
4025  else
4026  {
4027  VMA_HEAVY_ASSERT(m_pBack == pItem);
4028  m_pBack = pItem->pPrev;
4029  }
4030 
4031  m_ItemAllocator.Free(pItem);
4032  --m_Count;
4033 }
4034 
4035 template<typename T>
4036 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4037 {
4038  if(pItem != VMA_NULL)
4039  {
4040  ItemType* const prevItem = pItem->pPrev;
4041  ItemType* const newItem = m_ItemAllocator.Alloc();
4042  newItem->pPrev = prevItem;
4043  newItem->pNext = pItem;
4044  pItem->pPrev = newItem;
4045  if(prevItem != VMA_NULL)
4046  {
4047  prevItem->pNext = newItem;
4048  }
4049  else
4050  {
4051  VMA_HEAVY_ASSERT(m_pFront == pItem);
4052  m_pFront = newItem;
4053  }
4054  ++m_Count;
4055  return newItem;
4056  }
4057  else
4058  return PushBack();
4059 }
4060 
4061 template<typename T>
4062 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4063 {
4064  if(pItem != VMA_NULL)
4065  {
4066  ItemType* const nextItem = pItem->pNext;
4067  ItemType* const newItem = m_ItemAllocator.Alloc();
4068  newItem->pNext = nextItem;
4069  newItem->pPrev = pItem;
4070  pItem->pNext = newItem;
4071  if(nextItem != VMA_NULL)
4072  {
4073  nextItem->pPrev = newItem;
4074  }
4075  else
4076  {
4077  VMA_HEAVY_ASSERT(m_pBack == pItem);
4078  m_pBack = newItem;
4079  }
4080  ++m_Count;
4081  return newItem;
4082  }
4083  else
4084  return PushFront();
4085 }
4086 
4087 template<typename T>
4088 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4089 {
4090  ItemType* const newItem = InsertBefore(pItem);
4091  newItem->Value = value;
4092  return newItem;
4093 }
4094 
4095 template<typename T>
4096 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4097 {
4098  ItemType* const newItem = InsertAfter(pItem);
4099  newItem->Value = value;
4100  return newItem;
4101 }
4102 
4103 template<typename T, typename AllocatorT>
4104 class VmaList
4105 {
4106  VMA_CLASS_NO_COPY(VmaList)
4107 public:
4108  class iterator
4109  {
4110  public:
4111  iterator() :
4112  m_pList(VMA_NULL),
4113  m_pItem(VMA_NULL)
4114  {
4115  }
4116 
4117  T& operator*() const
4118  {
4119  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4120  return m_pItem->Value;
4121  }
4122  T* operator->() const
4123  {
4124  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4125  return &m_pItem->Value;
4126  }
4127 
4128  iterator& operator++()
4129  {
4130  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4131  m_pItem = m_pItem->pNext;
4132  return *this;
4133  }
4134  iterator& operator--()
4135  {
4136  if(m_pItem != VMA_NULL)
4137  {
4138  m_pItem = m_pItem->pPrev;
4139  }
4140  else
4141  {
4142  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4143  m_pItem = m_pList->Back();
4144  }
4145  return *this;
4146  }
4147 
4148  iterator operator++(int)
4149  {
4150  iterator result = *this;
4151  ++*this;
4152  return result;
4153  }
4154  iterator operator--(int)
4155  {
4156  iterator result = *this;
4157  --*this;
4158  return result;
4159  }
4160 
4161  bool operator==(const iterator& rhs) const
4162  {
4163  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4164  return m_pItem == rhs.m_pItem;
4165  }
4166  bool operator!=(const iterator& rhs) const
4167  {
4168  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4169  return m_pItem != rhs.m_pItem;
4170  }
4171 
4172  private:
4173  VmaRawList<T>* m_pList;
4174  VmaListItem<T>* m_pItem;
4175 
4176  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4177  m_pList(pList),
4178  m_pItem(pItem)
4179  {
4180  }
4181 
4182  friend class VmaList<T, AllocatorT>;
4183  };
4184 
4185  class const_iterator
4186  {
4187  public:
4188  const_iterator() :
4189  m_pList(VMA_NULL),
4190  m_pItem(VMA_NULL)
4191  {
4192  }
4193 
4194  const_iterator(const iterator& src) :
4195  m_pList(src.m_pList),
4196  m_pItem(src.m_pItem)
4197  {
4198  }
4199 
4200  const T& operator*() const
4201  {
4202  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4203  return m_pItem->Value;
4204  }
4205  const T* operator->() const
4206  {
4207  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4208  return &m_pItem->Value;
4209  }
4210 
4211  const_iterator& operator++()
4212  {
4213  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4214  m_pItem = m_pItem->pNext;
4215  return *this;
4216  }
4217  const_iterator& operator--()
4218  {
4219  if(m_pItem != VMA_NULL)
4220  {
4221  m_pItem = m_pItem->pPrev;
4222  }
4223  else
4224  {
4225  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4226  m_pItem = m_pList->Back();
4227  }
4228  return *this;
4229  }
4230 
4231  const_iterator operator++(int)
4232  {
4233  const_iterator result = *this;
4234  ++*this;
4235  return result;
4236  }
4237  const_iterator operator--(int)
4238  {
4239  const_iterator result = *this;
4240  --*this;
4241  return result;
4242  }
4243 
4244  bool operator==(const const_iterator& rhs) const
4245  {
4246  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4247  return m_pItem == rhs.m_pItem;
4248  }
4249  bool operator!=(const const_iterator& rhs) const
4250  {
4251  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4252  return m_pItem != rhs.m_pItem;
4253  }
4254 
4255  private:
4256  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4257  m_pList(pList),
4258  m_pItem(pItem)
4259  {
4260  }
4261 
4262  const VmaRawList<T>* m_pList;
4263  const VmaListItem<T>* m_pItem;
4264 
4265  friend class VmaList<T, AllocatorT>;
4266  };
4267 
4268  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4269 
4270  bool empty() const { return m_RawList.IsEmpty(); }
4271  size_t size() const { return m_RawList.GetCount(); }
4272 
4273  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4274  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4275 
4276  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4277  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4278 
4279  void clear() { m_RawList.Clear(); }
4280  void push_back(const T& value) { m_RawList.PushBack(value); }
4281  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4282  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4283 
4284 private:
4285  VmaRawList<T> m_RawList;
4286 };
4287 
4288 #endif // #if VMA_USE_STL_LIST
4289 
4291 // class VmaMap
4292 
4293 // Unused in this version.
4294 #if 0
4295 
4296 #if VMA_USE_STL_UNORDERED_MAP
4297 
4298 #define VmaPair std::pair
4299 
4300 #define VMA_MAP_TYPE(KeyT, ValueT) \
4301  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4302 
4303 #else // #if VMA_USE_STL_UNORDERED_MAP
4304 
4305 template<typename T1, typename T2>
4306 struct VmaPair
4307 {
4308  T1 first;
4309  T2 second;
4310 
4311  VmaPair() : first(), second() { }
4312  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4313 };
4314 
4315 /* Class compatible with subset of interface of std::unordered_map.
4316 KeyT, ValueT must be POD because they will be stored in VmaVector.
4317 */
4318 template<typename KeyT, typename ValueT>
4319 class VmaMap
4320 {
4321 public:
4322  typedef VmaPair<KeyT, ValueT> PairType;
4323  typedef PairType* iterator;
4324 
4325  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4326 
4327  iterator begin() { return m_Vector.begin(); }
4328  iterator end() { return m_Vector.end(); }
4329 
4330  void insert(const PairType& pair);
4331  iterator find(const KeyT& key);
4332  void erase(iterator it);
4333 
4334 private:
4335  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4336 };
4337 
4338 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4339 
4340 template<typename FirstT, typename SecondT>
4341 struct VmaPairFirstLess
4342 {
4343  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4344  {
4345  return lhs.first < rhs.first;
4346  }
4347  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4348  {
4349  return lhs.first < rhsFirst;
4350  }
4351 };
4352 
4353 template<typename KeyT, typename ValueT>
4354 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4355 {
4356  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4357  m_Vector.data(),
4358  m_Vector.data() + m_Vector.size(),
4359  pair,
4360  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4361  VmaVectorInsert(m_Vector, indexToInsert, pair);
4362 }
4363 
4364 template<typename KeyT, typename ValueT>
4365 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4366 {
4367  PairType* it = VmaBinaryFindFirstNotLess(
4368  m_Vector.data(),
4369  m_Vector.data() + m_Vector.size(),
4370  key,
4371  VmaPairFirstLess<KeyT, ValueT>());
4372  if((it != m_Vector.end()) && (it->first == key))
4373  {
4374  return it;
4375  }
4376  else
4377  {
4378  return m_Vector.end();
4379  }
4380 }
4381 
4382 template<typename KeyT, typename ValueT>
4383 void VmaMap<KeyT, ValueT>::erase(iterator it)
4384 {
4385  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4386 }
4387 
4388 #endif // #if VMA_USE_STL_UNORDERED_MAP
4389 
4390 #endif // #if 0
4391 
4393 
4394 class VmaDeviceMemoryBlock;
4395 
4396 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4397 
4398 struct VmaAllocation_T
4399 {
4400  VMA_CLASS_NO_COPY(VmaAllocation_T)
4401 private:
4402  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4403 
4404  enum FLAGS
4405  {
4406  FLAG_USER_DATA_STRING = 0x01,
4407  };
4408 
4409 public:
4410  enum ALLOCATION_TYPE
4411  {
4412  ALLOCATION_TYPE_NONE,
4413  ALLOCATION_TYPE_BLOCK,
4414  ALLOCATION_TYPE_DEDICATED,
4415  };
4416 
4417  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4418  m_Alignment(1),
4419  m_Size(0),
4420  m_pUserData(VMA_NULL),
4421  m_LastUseFrameIndex(currentFrameIndex),
4422  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4423  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4424  m_MapCount(0),
4425  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4426  {
4427 #if VMA_STATS_STRING_ENABLED
4428  m_CreationFrameIndex = currentFrameIndex;
4429  m_BufferImageUsage = 0;
4430 #endif
4431  }
4432 
4433  ~VmaAllocation_T()
4434  {
4435  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4436 
4437  // Check if owned string was freed.
4438  VMA_ASSERT(m_pUserData == VMA_NULL);
4439  }
4440 
4441  void InitBlockAllocation(
4442  VmaPool hPool,
4443  VmaDeviceMemoryBlock* block,
4444  VkDeviceSize offset,
4445  VkDeviceSize alignment,
4446  VkDeviceSize size,
4447  VmaSuballocationType suballocationType,
4448  bool mapped,
4449  bool canBecomeLost)
4450  {
4451  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4452  VMA_ASSERT(block != VMA_NULL);
4453  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4454  m_Alignment = alignment;
4455  m_Size = size;
4456  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4457  m_SuballocationType = (uint8_t)suballocationType;
4458  m_BlockAllocation.m_hPool = hPool;
4459  m_BlockAllocation.m_Block = block;
4460  m_BlockAllocation.m_Offset = offset;
4461  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4462  }
4463 
4464  void InitLost()
4465  {
4466  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4467  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4468  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4469  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4470  m_BlockAllocation.m_Block = VMA_NULL;
4471  m_BlockAllocation.m_Offset = 0;
4472  m_BlockAllocation.m_CanBecomeLost = true;
4473  }
4474 
4475  void ChangeBlockAllocation(
4476  VmaAllocator hAllocator,
4477  VmaDeviceMemoryBlock* block,
4478  VkDeviceSize offset);
4479 
4480  // pMappedData not null means allocation is created with MAPPED flag.
4481  void InitDedicatedAllocation(
4482  uint32_t memoryTypeIndex,
4483  VkDeviceMemory hMemory,
4484  VmaSuballocationType suballocationType,
4485  void* pMappedData,
4486  VkDeviceSize size)
4487  {
4488  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4489  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4490  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4491  m_Alignment = 0;
4492  m_Size = size;
4493  m_SuballocationType = (uint8_t)suballocationType;
4494  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4495  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4496  m_DedicatedAllocation.m_hMemory = hMemory;
4497  m_DedicatedAllocation.m_pMappedData = pMappedData;
4498  }
4499 
4500  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4501  VkDeviceSize GetAlignment() const { return m_Alignment; }
4502  VkDeviceSize GetSize() const { return m_Size; }
4503  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4504  void* GetUserData() const { return m_pUserData; }
4505  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4506  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4507 
4508  VmaDeviceMemoryBlock* GetBlock() const
4509  {
4510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4511  return m_BlockAllocation.m_Block;
4512  }
4513  VkDeviceSize GetOffset() const;
4514  VkDeviceMemory GetMemory() const;
4515  uint32_t GetMemoryTypeIndex() const;
4516  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4517  void* GetMappedData() const;
4518  bool CanBecomeLost() const;
4519  VmaPool GetPool() const;
4520 
4521  uint32_t GetLastUseFrameIndex() const
4522  {
4523  return m_LastUseFrameIndex.load();
4524  }
4525  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4526  {
4527  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4528  }
4529  /*
4530  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4531  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4532  - Else, returns false.
4533 
4534  If hAllocation is already lost, assert - you should not call it then.
4535  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4536  */
4537  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4538 
4539  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4540  {
4541  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4542  outInfo.blockCount = 1;
4543  outInfo.allocationCount = 1;
4544  outInfo.unusedRangeCount = 0;
4545  outInfo.usedBytes = m_Size;
4546  outInfo.unusedBytes = 0;
4547  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4548  outInfo.unusedRangeSizeMin = UINT64_MAX;
4549  outInfo.unusedRangeSizeMax = 0;
4550  }
4551 
4552  void BlockAllocMap();
4553  void BlockAllocUnmap();
4554  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4555  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4556 
4557 #if VMA_STATS_STRING_ENABLED
4558  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4559  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4560 
4561  void InitBufferImageUsage(uint32_t bufferImageUsage)
4562  {
4563  VMA_ASSERT(m_BufferImageUsage == 0);
4564  m_BufferImageUsage = bufferImageUsage;
4565  }
4566 
4567  void PrintParameters(class VmaJsonWriter& json) const;
4568 #endif
4569 
4570 private:
4571  VkDeviceSize m_Alignment;
4572  VkDeviceSize m_Size;
4573  void* m_pUserData;
4574  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4575  uint8_t m_Type; // ALLOCATION_TYPE
4576  uint8_t m_SuballocationType; // VmaSuballocationType
4577  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4578  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4579  uint8_t m_MapCount;
4580  uint8_t m_Flags; // enum FLAGS
4581 
4582  // Allocation out of VmaDeviceMemoryBlock.
4583  struct BlockAllocation
4584  {
4585  VmaPool m_hPool; // Null if belongs to general memory.
4586  VmaDeviceMemoryBlock* m_Block;
4587  VkDeviceSize m_Offset;
4588  bool m_CanBecomeLost;
4589  };
4590 
4591  // Allocation for an object that has its own private VkDeviceMemory.
4592  struct DedicatedAllocation
4593  {
4594  uint32_t m_MemoryTypeIndex;
4595  VkDeviceMemory m_hMemory;
4596  void* m_pMappedData; // Not null means memory is mapped.
4597  };
4598 
4599  union
4600  {
4601  // Allocation out of VmaDeviceMemoryBlock.
4602  BlockAllocation m_BlockAllocation;
4603  // Allocation for an object that has its own private VkDeviceMemory.
4604  DedicatedAllocation m_DedicatedAllocation;
4605  };
4606 
4607 #if VMA_STATS_STRING_ENABLED
4608  uint32_t m_CreationFrameIndex;
4609  uint32_t m_BufferImageUsage; // 0 if unknown.
4610 #endif
4611 
4612  void FreeUserDataString(VmaAllocator hAllocator);
4613 };
4614 
4615 /*
4616 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4617 allocated memory block or free.
4618 */
4619 struct VmaSuballocation
4620 {
4621  VkDeviceSize offset;
4622  VkDeviceSize size;
4623  VmaAllocation hAllocation;
4624  VmaSuballocationType type;
4625 };
4626 
4627 // Comparator for offsets.
4628 struct VmaSuballocationOffsetLess
4629 {
4630  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4631  {
4632  return lhs.offset < rhs.offset;
4633  }
4634 };
4635 struct VmaSuballocationOffsetGreater
4636 {
4637  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4638  {
4639  return lhs.offset > rhs.offset;
4640  }
4641 };
4642 
4643 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4644 
4645 // Cost of one additional allocation lost, as equivalent in bytes.
4646 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4647 
4648 /*
4649 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4650 
4651 If canMakeOtherLost was false:
4652 - item points to a FREE suballocation.
4653 - itemsToMakeLostCount is 0.
4654 
4655 If canMakeOtherLost was true:
4656 - item points to first of sequence of suballocations, which are either FREE,
4657  or point to VmaAllocations that can become lost.
4658 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4659  the requested allocation to succeed.
4660 */
4661 struct VmaAllocationRequest
4662 {
4663  VkDeviceSize offset;
4664  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4665  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4666  VmaSuballocationList::iterator item;
4667  size_t itemsToMakeLostCount;
4668  void* customData;
4669 
4670  VkDeviceSize CalcCost() const
4671  {
4672  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4673  }
4674 };
4675 
4676 /*
4677 Data structure used for bookkeeping of allocations and unused ranges of memory
4678 in a single VkDeviceMemory block.
4679 */
4680 class VmaBlockMetadata
4681 {
4682 public:
4683  VmaBlockMetadata(VmaAllocator hAllocator);
4684  virtual ~VmaBlockMetadata() { }
4685  virtual void Init(VkDeviceSize size) { m_Size = size; }
4686 
4687  // Validates all data structures inside this object. If not valid, returns false.
4688  virtual bool Validate() const = 0;
4689  VkDeviceSize GetSize() const { return m_Size; }
4690  virtual size_t GetAllocationCount() const = 0;
4691  virtual VkDeviceSize GetSumFreeSize() const = 0;
4692  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4693  // Returns true if this block is empty - contains only single free suballocation.
4694  virtual bool IsEmpty() const = 0;
4695 
4696  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4697  // Shouldn't modify blockCount.
4698  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4699 
4700 #if VMA_STATS_STRING_ENABLED
4701  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4702 #endif
4703 
4704  // Tries to find a place for suballocation with given parameters inside this block.
4705  // If succeeded, fills pAllocationRequest and returns true.
4706  // If failed, returns false.
4707  virtual bool CreateAllocationRequest(
4708  uint32_t currentFrameIndex,
4709  uint32_t frameInUseCount,
4710  VkDeviceSize bufferImageGranularity,
4711  VkDeviceSize allocSize,
4712  VkDeviceSize allocAlignment,
4713  bool upperAddress,
4714  VmaSuballocationType allocType,
4715  bool canMakeOtherLost,
4716  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4717  VmaAllocationRequest* pAllocationRequest) = 0;
4718 
4719  virtual bool MakeRequestedAllocationsLost(
4720  uint32_t currentFrameIndex,
4721  uint32_t frameInUseCount,
4722  VmaAllocationRequest* pAllocationRequest) = 0;
4723 
4724  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4725 
4726  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4727 
4728  // Makes actual allocation based on request. Request must already be checked and valid.
4729  virtual void Alloc(
4730  const VmaAllocationRequest& request,
4731  VmaSuballocationType type,
4732  VkDeviceSize allocSize,
4733  bool upperAddress,
4734  VmaAllocation hAllocation) = 0;
4735 
4736  // Frees suballocation assigned to given memory region.
4737  virtual void Free(const VmaAllocation allocation) = 0;
4738  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4739 
4740 protected:
4741  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
4742 
4743 #if VMA_STATS_STRING_ENABLED
4744  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4745  VkDeviceSize unusedBytes,
4746  size_t allocationCount,
4747  size_t unusedRangeCount) const;
4748  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4749  VkDeviceSize offset,
4750  VmaAllocation hAllocation) const;
4751  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4752  VkDeviceSize offset,
4753  VkDeviceSize size) const;
4754  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4755 #endif
4756 
4757 private:
4758  VkDeviceSize m_Size;
4759  const VkAllocationCallbacks* m_pAllocationCallbacks;
4760 };
4761 
4762 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
4763  VMA_ASSERT(0 && "Validation failed: " #cond); \
4764  return false; \
4765  } } while(false)
4766 
4767 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4768 {
4769  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4770 public:
4771  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4772  virtual ~VmaBlockMetadata_Generic();
4773  virtual void Init(VkDeviceSize size);
4774 
4775  virtual bool Validate() const;
4776  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4777  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4778  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4779  virtual bool IsEmpty() const;
4780 
4781  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4782  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4783 
4784 #if VMA_STATS_STRING_ENABLED
4785  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4786 #endif
4787 
4788  virtual bool CreateAllocationRequest(
4789  uint32_t currentFrameIndex,
4790  uint32_t frameInUseCount,
4791  VkDeviceSize bufferImageGranularity,
4792  VkDeviceSize allocSize,
4793  VkDeviceSize allocAlignment,
4794  bool upperAddress,
4795  VmaSuballocationType allocType,
4796  bool canMakeOtherLost,
4797  uint32_t strategy,
4798  VmaAllocationRequest* pAllocationRequest);
4799 
4800  virtual bool MakeRequestedAllocationsLost(
4801  uint32_t currentFrameIndex,
4802  uint32_t frameInUseCount,
4803  VmaAllocationRequest* pAllocationRequest);
4804 
4805  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4806 
4807  virtual VkResult CheckCorruption(const void* pBlockData);
4808 
4809  virtual void Alloc(
4810  const VmaAllocationRequest& request,
4811  VmaSuballocationType type,
4812  VkDeviceSize allocSize,
4813  bool upperAddress,
4814  VmaAllocation hAllocation);
4815 
4816  virtual void Free(const VmaAllocation allocation);
4817  virtual void FreeAtOffset(VkDeviceSize offset);
4818 
4819 private:
4820  uint32_t m_FreeCount;
4821  VkDeviceSize m_SumFreeSize;
4822  VmaSuballocationList m_Suballocations;
4823  // Suballocations that are free and have size greater than certain threshold.
4824  // Sorted by size, ascending.
4825  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4826 
4827  bool ValidateFreeSuballocationList() const;
4828 
4829  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4830  // If yes, fills pOffset and returns true. If no, returns false.
4831  bool CheckAllocation(
4832  uint32_t currentFrameIndex,
4833  uint32_t frameInUseCount,
4834  VkDeviceSize bufferImageGranularity,
4835  VkDeviceSize allocSize,
4836  VkDeviceSize allocAlignment,
4837  VmaSuballocationType allocType,
4838  VmaSuballocationList::const_iterator suballocItem,
4839  bool canMakeOtherLost,
4840  VkDeviceSize* pOffset,
4841  size_t* itemsToMakeLostCount,
4842  VkDeviceSize* pSumFreeSize,
4843  VkDeviceSize* pSumItemSize) const;
4844  // Given free suballocation, it merges it with following one, which must also be free.
4845  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4846  // Releases given suballocation, making it free.
4847  // Merges it with adjacent free suballocations if applicable.
4848  // Returns iterator to new free suballocation at this place.
4849  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4850  // Given free suballocation, it inserts it into sorted list of
4851  // m_FreeSuballocationsBySize if it's suitable.
4852  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4853  // Given free suballocation, it removes it from sorted list of
4854  // m_FreeSuballocationsBySize if it's suitable.
4855  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4856 };
4857 
4858 /*
4859 Allocations and their references in internal data structure look like this:
4860 
4861 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4862 
4863  0 +-------+
4864  | |
4865  | |
4866  | |
4867  +-------+
4868  | Alloc | 1st[m_1stNullItemsBeginCount]
4869  +-------+
4870  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4871  +-------+
4872  | ... |
4873  +-------+
4874  | Alloc | 1st[1st.size() - 1]
4875  +-------+
4876  | |
4877  | |
4878  | |
4879 GetSize() +-------+
4880 
4881 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4882 
4883  0 +-------+
4884  | Alloc | 2nd[0]
4885  +-------+
4886  | Alloc | 2nd[1]
4887  +-------+
4888  | ... |
4889  +-------+
4890  | Alloc | 2nd[2nd.size() - 1]
4891  +-------+
4892  | |
4893  | |
4894  | |
4895  +-------+
4896  | Alloc | 1st[m_1stNullItemsBeginCount]
4897  +-------+
4898  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4899  +-------+
4900  | ... |
4901  +-------+
4902  | Alloc | 1st[1st.size() - 1]
4903  +-------+
4904  | |
4905 GetSize() +-------+
4906 
4907 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4908 
4909  0 +-------+
4910  | |
4911  | |
4912  | |
4913  +-------+
4914  | Alloc | 1st[m_1stNullItemsBeginCount]
4915  +-------+
4916  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4917  +-------+
4918  | ... |
4919  +-------+
4920  | Alloc | 1st[1st.size() - 1]
4921  +-------+
4922  | |
4923  | |
4924  | |
4925  +-------+
4926  | Alloc | 2nd[2nd.size() - 1]
4927  +-------+
4928  | ... |
4929  +-------+
4930  | Alloc | 2nd[1]
4931  +-------+
4932  | Alloc | 2nd[0]
4933 GetSize() +-------+
4934 
4935 */
4936 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4937 {
4938  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4939 public:
4940  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4941  virtual ~VmaBlockMetadata_Linear();
4942  virtual void Init(VkDeviceSize size);
4943 
4944  virtual bool Validate() const;
4945  virtual size_t GetAllocationCount() const;
4946  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4947  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4948  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4949 
4950  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4951  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4952 
4953 #if VMA_STATS_STRING_ENABLED
4954  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4955 #endif
4956 
4957  virtual bool CreateAllocationRequest(
4958  uint32_t currentFrameIndex,
4959  uint32_t frameInUseCount,
4960  VkDeviceSize bufferImageGranularity,
4961  VkDeviceSize allocSize,
4962  VkDeviceSize allocAlignment,
4963  bool upperAddress,
4964  VmaSuballocationType allocType,
4965  bool canMakeOtherLost,
4966  uint32_t strategy,
4967  VmaAllocationRequest* pAllocationRequest);
4968 
4969  virtual bool MakeRequestedAllocationsLost(
4970  uint32_t currentFrameIndex,
4971  uint32_t frameInUseCount,
4972  VmaAllocationRequest* pAllocationRequest);
4973 
4974  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4975 
4976  virtual VkResult CheckCorruption(const void* pBlockData);
4977 
4978  virtual void Alloc(
4979  const VmaAllocationRequest& request,
4980  VmaSuballocationType type,
4981  VkDeviceSize allocSize,
4982  bool upperAddress,
4983  VmaAllocation hAllocation);
4984 
4985  virtual void Free(const VmaAllocation allocation);
4986  virtual void FreeAtOffset(VkDeviceSize offset);
4987 
4988 private:
4989  /*
4990  There are two suballocation vectors, used in ping-pong way.
4991  The one with index m_1stVectorIndex is called 1st.
4992  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
4993  2nd can be non-empty only when 1st is not empty.
4994  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
4995  */
4996  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
4997 
4998  enum SECOND_VECTOR_MODE
4999  {
5000  SECOND_VECTOR_EMPTY,
5001  /*
5002  Suballocations in 2nd vector are created later than the ones in 1st, but they
5003  all have smaller offset.
5004  */
5005  SECOND_VECTOR_RING_BUFFER,
5006  /*
5007  Suballocations in 2nd vector are upper side of double stack.
5008  They all have offsets higher than those in 1st vector.
5009  Top of this stack means smaller offsets, but higher indices in this vector.
5010  */
5011  SECOND_VECTOR_DOUBLE_STACK,
5012  };
5013 
5014  VkDeviceSize m_SumFreeSize;
5015  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5016  uint32_t m_1stVectorIndex;
5017  SECOND_VECTOR_MODE m_2ndVectorMode;
5018 
5019  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5020  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5021  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5022  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5023 
5024  // Number of items in 1st vector with hAllocation = null at the beginning.
5025  size_t m_1stNullItemsBeginCount;
5026  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5027  size_t m_1stNullItemsMiddleCount;
5028  // Number of items in 2nd vector with hAllocation = null.
5029  size_t m_2ndNullItemsCount;
5030 
5031  bool ShouldCompact1st() const;
5032  void CleanupAfterFree();
5033 };
5034 
5035 /*
5036 - GetSize() is the original size of allocated memory block.
5037 - m_UsableSize is this size aligned down to a power of two.
5038  All allocations and calculations happen relative to m_UsableSize.
5039 - GetUnusableSize() is the difference between them.
5040  It is repoted as separate, unused range, not available for allocations.
5041 
5042 Node at level 0 has size = m_UsableSize.
5043 Each next level contains nodes with size 2 times smaller than current level.
5044 m_LevelCount is the maximum number of levels to use in the current object.
5045 */
5046 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5047 {
5048  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5049 public:
5050  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5051  virtual ~VmaBlockMetadata_Buddy();
5052  virtual void Init(VkDeviceSize size);
5053 
5054  virtual bool Validate() const;
5055  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5056  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5057  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5058  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5059 
5060  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5061  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5062 
5063 #if VMA_STATS_STRING_ENABLED
5064  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5065 #endif
5066 
5067  virtual bool CreateAllocationRequest(
5068  uint32_t currentFrameIndex,
5069  uint32_t frameInUseCount,
5070  VkDeviceSize bufferImageGranularity,
5071  VkDeviceSize allocSize,
5072  VkDeviceSize allocAlignment,
5073  bool upperAddress,
5074  VmaSuballocationType allocType,
5075  bool canMakeOtherLost,
5076  uint32_t strategy,
5077  VmaAllocationRequest* pAllocationRequest);
5078 
5079  virtual bool MakeRequestedAllocationsLost(
5080  uint32_t currentFrameIndex,
5081  uint32_t frameInUseCount,
5082  VmaAllocationRequest* pAllocationRequest);
5083 
5084  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5085 
5086  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5087 
5088  virtual void Alloc(
5089  const VmaAllocationRequest& request,
5090  VmaSuballocationType type,
5091  VkDeviceSize allocSize,
5092  bool upperAddress,
5093  VmaAllocation hAllocation);
5094 
5095  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5096  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5097 
5098 private:
5099  static const VkDeviceSize MIN_NODE_SIZE = 32;
5100  static const size_t MAX_LEVELS = 30;
5101 
5102  struct ValidationContext
5103  {
5104  size_t calculatedAllocationCount;
5105  size_t calculatedFreeCount;
5106  VkDeviceSize calculatedSumFreeSize;
5107 
5108  ValidationContext() :
5109  calculatedAllocationCount(0),
5110  calculatedFreeCount(0),
5111  calculatedSumFreeSize(0) { }
5112  };
5113 
5114  struct Node
5115  {
5116  VkDeviceSize offset;
5117  enum TYPE
5118  {
5119  TYPE_FREE,
5120  TYPE_ALLOCATION,
5121  TYPE_SPLIT,
5122  TYPE_COUNT
5123  } type;
5124  Node* parent;
5125  Node* buddy;
5126 
5127  union
5128  {
5129  struct
5130  {
5131  Node* prev;
5132  Node* next;
5133  } free;
5134  struct
5135  {
5136  VmaAllocation alloc;
5137  } allocation;
5138  struct
5139  {
5140  Node* leftChild;
5141  } split;
5142  };
5143  };
5144 
5145  // Size of the memory block aligned down to a power of two.
5146  VkDeviceSize m_UsableSize;
5147  uint32_t m_LevelCount;
5148 
5149  Node* m_Root;
5150  struct {
5151  Node* front;
5152  Node* back;
5153  } m_FreeList[MAX_LEVELS];
5154  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5155  size_t m_AllocationCount;
5156  // Number of nodes in the tree with type == TYPE_FREE.
5157  size_t m_FreeCount;
5158  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5159  VkDeviceSize m_SumFreeSize;
5160 
5161  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5162  void DeleteNode(Node* node);
5163  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5164  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5165  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5166  // Alloc passed just for validation. Can be null.
5167  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5168  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5169  // Adds node to the front of FreeList at given level.
5170  // node->type must be FREE.
5171  // node->free.prev, next can be undefined.
5172  void AddToFreeListFront(uint32_t level, Node* node);
5173  // Removes node from FreeList at given level.
5174  // node->type must be FREE.
5175  // node->free.prev, next stay untouched.
5176  void RemoveFromFreeList(uint32_t level, Node* node);
5177 
5178 #if VMA_STATS_STRING_ENABLED
5179  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5180 #endif
5181 };
5182 
5183 /*
5184 Represents a single block of device memory (`VkDeviceMemory`) with all the
5185 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5186 
5187 Thread-safety: This class must be externally synchronized.
5188 */
5189 class VmaDeviceMemoryBlock
5190 {
5191  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5192 public:
5193  VmaBlockMetadata* m_pMetadata;
5194 
5195  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5196 
5197  ~VmaDeviceMemoryBlock()
5198  {
5199  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5200  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5201  }
5202 
5203  // Always call after construction.
5204  void Init(
5205  VmaAllocator hAllocator,
5206  uint32_t newMemoryTypeIndex,
5207  VkDeviceMemory newMemory,
5208  VkDeviceSize newSize,
5209  uint32_t id,
5210  uint32_t algorithm);
5211  // Always call before destruction.
5212  void Destroy(VmaAllocator allocator);
5213 
5214  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5215  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5216  uint32_t GetId() const { return m_Id; }
5217  void* GetMappedData() const { return m_pMappedData; }
5218 
5219  // Validates all data structures inside this object. If not valid, returns false.
5220  bool Validate() const;
5221 
5222  VkResult CheckCorruption(VmaAllocator hAllocator);
5223 
5224  // ppData can be null.
5225  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5226  void Unmap(VmaAllocator hAllocator, uint32_t count);
5227 
5228  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5229  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5230 
5231  VkResult BindBufferMemory(
5232  const VmaAllocator hAllocator,
5233  const VmaAllocation hAllocation,
5234  VkBuffer hBuffer);
5235  VkResult BindImageMemory(
5236  const VmaAllocator hAllocator,
5237  const VmaAllocation hAllocation,
5238  VkImage hImage);
5239 
5240 private:
5241  uint32_t m_MemoryTypeIndex;
5242  uint32_t m_Id;
5243  VkDeviceMemory m_hMemory;
5244 
5245  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5246  // Also protects m_MapCount, m_pMappedData.
5247  VMA_MUTEX m_Mutex;
5248  uint32_t m_MapCount;
5249  void* m_pMappedData;
5250 };
5251 
5252 struct VmaPointerLess
5253 {
5254  bool operator()(const void* lhs, const void* rhs) const
5255  {
5256  return lhs < rhs;
5257  }
5258 };
5259 
5260 class VmaDefragmentator;
5261 
5262 /*
5263 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5264 Vulkan memory type.
5265 
5266 Synchronized internally with a mutex.
5267 */
5268 struct VmaBlockVector
5269 {
5270  VMA_CLASS_NO_COPY(VmaBlockVector)
5271 public:
5272  VmaBlockVector(
5273  VmaAllocator hAllocator,
5274  uint32_t memoryTypeIndex,
5275  VkDeviceSize preferredBlockSize,
5276  size_t minBlockCount,
5277  size_t maxBlockCount,
5278  VkDeviceSize bufferImageGranularity,
5279  uint32_t frameInUseCount,
5280  bool isCustomPool,
5281  bool explicitBlockSize,
5282  uint32_t algorithm);
5283  ~VmaBlockVector();
5284 
5285  VkResult CreateMinBlocks();
5286 
5287  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5288  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5289  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5290  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5291  uint32_t GetAlgorithm() const { return m_Algorithm; }
5292 
5293  void GetPoolStats(VmaPoolStats* pStats);
5294 
5295  bool IsEmpty() const { return m_Blocks.empty(); }
5296  bool IsCorruptionDetectionEnabled() const;
5297 
5298  VkResult Allocate(
5299  VmaPool hCurrentPool,
5300  uint32_t currentFrameIndex,
5301  VkDeviceSize size,
5302  VkDeviceSize alignment,
5303  const VmaAllocationCreateInfo& createInfo,
5304  VmaSuballocationType suballocType,
5305  VmaAllocation* pAllocation);
5306 
5307  void Free(
5308  VmaAllocation hAllocation);
5309 
5310  // Adds statistics of this BlockVector to pStats.
5311  void AddStats(VmaStats* pStats);
5312 
5313 #if VMA_STATS_STRING_ENABLED
5314  void PrintDetailedMap(class VmaJsonWriter& json);
5315 #endif
5316 
5317  void MakePoolAllocationsLost(
5318  uint32_t currentFrameIndex,
5319  size_t* pLostAllocationCount);
5320  VkResult CheckCorruption();
5321 
5322  VmaDefragmentator* EnsureDefragmentator(
5323  VmaAllocator hAllocator,
5324  uint32_t currentFrameIndex);
5325 
5326  VkResult Defragment(
5327  VmaDefragmentationStats* pDefragmentationStats,
5328  VkDeviceSize& maxBytesToMove,
5329  uint32_t& maxAllocationsToMove);
5330 
5331  void DestroyDefragmentator();
5332 
5333 private:
5334  friend class VmaDefragmentator;
5335 
5336  const VmaAllocator m_hAllocator;
5337  const uint32_t m_MemoryTypeIndex;
5338  const VkDeviceSize m_PreferredBlockSize;
5339  const size_t m_MinBlockCount;
5340  const size_t m_MaxBlockCount;
5341  const VkDeviceSize m_BufferImageGranularity;
5342  const uint32_t m_FrameInUseCount;
5343  const bool m_IsCustomPool;
5344  const bool m_ExplicitBlockSize;
5345  const uint32_t m_Algorithm;
5346  bool m_HasEmptyBlock;
5347  VMA_MUTEX m_Mutex;
5348  // Incrementally sorted by sumFreeSize, ascending.
5349  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5350  /* There can be at most one allocation that is completely empty - a
5351  hysteresis to avoid pessimistic case of alternating creation and destruction
5352  of a VkDeviceMemory. */
5353  VmaDefragmentator* m_pDefragmentator;
5354  uint32_t m_NextBlockId;
5355 
5356  VkDeviceSize CalcMaxBlockSize() const;
5357 
5358  // Finds and removes given block from vector.
5359  void Remove(VmaDeviceMemoryBlock* pBlock);
5360 
5361  // Performs single step in sorting m_Blocks. They may not be fully sorted
5362  // after this call.
5363  void IncrementallySortBlocks();
5364 
5365  // To be used only without CAN_MAKE_OTHER_LOST flag.
5366  VkResult AllocateFromBlock(
5367  VmaDeviceMemoryBlock* pBlock,
5368  VmaPool hCurrentPool,
5369  uint32_t currentFrameIndex,
5370  VkDeviceSize size,
5371  VkDeviceSize alignment,
5372  VmaAllocationCreateFlags allocFlags,
5373  void* pUserData,
5374  VmaSuballocationType suballocType,
5375  uint32_t strategy,
5376  VmaAllocation* pAllocation);
5377 
5378  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5379 };
5380 
5381 struct VmaPool_T
5382 {
5383  VMA_CLASS_NO_COPY(VmaPool_T)
5384 public:
5385  VmaBlockVector m_BlockVector;
5386 
5387  VmaPool_T(
5388  VmaAllocator hAllocator,
5389  const VmaPoolCreateInfo& createInfo,
5390  VkDeviceSize preferredBlockSize);
5391  ~VmaPool_T();
5392 
5393  uint32_t GetId() const { return m_Id; }
5394  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5395 
5396 #if VMA_STATS_STRING_ENABLED
5397  //void PrintDetailedMap(class VmaStringBuilder& sb);
5398 #endif
5399 
5400 private:
5401  uint32_t m_Id;
5402 };
5403 
5404 class VmaDefragmentator
5405 {
5406  VMA_CLASS_NO_COPY(VmaDefragmentator)
5407 private:
5408  const VmaAllocator m_hAllocator;
5409  VmaBlockVector* const m_pBlockVector;
5410  uint32_t m_CurrentFrameIndex;
5411  VkDeviceSize m_BytesMoved;
5412  uint32_t m_AllocationsMoved;
5413 
5414  struct AllocationInfo
5415  {
5416  VmaAllocation m_hAllocation;
5417  VkBool32* m_pChanged;
5418 
5419  AllocationInfo() :
5420  m_hAllocation(VK_NULL_HANDLE),
5421  m_pChanged(VMA_NULL)
5422  {
5423  }
5424  };
5425 
5426  struct AllocationInfoSizeGreater
5427  {
5428  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5429  {
5430  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5431  }
5432  };
5433 
5434  // Used between AddAllocation and Defragment.
5435  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5436 
5437  struct BlockInfo
5438  {
5439  VmaDeviceMemoryBlock* m_pBlock;
5440  bool m_HasNonMovableAllocations;
5441  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5442 
5443  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5444  m_pBlock(VMA_NULL),
5445  m_HasNonMovableAllocations(true),
5446  m_Allocations(pAllocationCallbacks),
5447  m_pMappedDataForDefragmentation(VMA_NULL)
5448  {
5449  }
5450 
5451  void CalcHasNonMovableAllocations()
5452  {
5453  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5454  const size_t defragmentAllocCount = m_Allocations.size();
5455  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5456  }
5457 
5458  void SortAllocationsBySizeDescecnding()
5459  {
5460  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5461  }
5462 
5463  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5464  void Unmap(VmaAllocator hAllocator);
5465 
5466  private:
5467  // Not null if mapped for defragmentation only, not originally mapped.
5468  void* m_pMappedDataForDefragmentation;
5469  };
5470 
5471  struct BlockPointerLess
5472  {
5473  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5474  {
5475  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5476  }
5477  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5478  {
5479  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5480  }
5481  };
5482 
5483  // 1. Blocks with some non-movable allocations go first.
5484  // 2. Blocks with smaller sumFreeSize go first.
5485  struct BlockInfoCompareMoveDestination
5486  {
5487  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5488  {
5489  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5490  {
5491  return true;
5492  }
5493  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5494  {
5495  return false;
5496  }
5497  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5498  {
5499  return true;
5500  }
5501  return false;
5502  }
5503  };
5504 
5505  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5506  BlockInfoVector m_Blocks;
5507 
5508  VkResult DefragmentRound(
5509  VkDeviceSize maxBytesToMove,
5510  uint32_t maxAllocationsToMove);
5511 
5512  static bool MoveMakesSense(
5513  size_t dstBlockIndex, VkDeviceSize dstOffset,
5514  size_t srcBlockIndex, VkDeviceSize srcOffset);
5515 
5516 public:
5517  VmaDefragmentator(
5518  VmaAllocator hAllocator,
5519  VmaBlockVector* pBlockVector,
5520  uint32_t currentFrameIndex);
5521 
5522  ~VmaDefragmentator();
5523 
5524  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5525  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5526 
5527  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5528 
5529  VkResult Defragment(
5530  VkDeviceSize maxBytesToMove,
5531  uint32_t maxAllocationsToMove);
5532 };
5533 
5534 #if VMA_RECORDING_ENABLED
5535 
5536 class VmaRecorder
5537 {
5538 public:
5539  VmaRecorder();
5540  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5541  void WriteConfiguration(
5542  const VkPhysicalDeviceProperties& devProps,
5543  const VkPhysicalDeviceMemoryProperties& memProps,
5544  bool dedicatedAllocationExtensionEnabled);
5545  ~VmaRecorder();
5546 
5547  void RecordCreateAllocator(uint32_t frameIndex);
5548  void RecordDestroyAllocator(uint32_t frameIndex);
5549  void RecordCreatePool(uint32_t frameIndex,
5550  const VmaPoolCreateInfo& createInfo,
5551  VmaPool pool);
5552  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5553  void RecordAllocateMemory(uint32_t frameIndex,
5554  const VkMemoryRequirements& vkMemReq,
5555  const VmaAllocationCreateInfo& createInfo,
5556  VmaAllocation allocation);
5557  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5558  const VkMemoryRequirements& vkMemReq,
5559  bool requiresDedicatedAllocation,
5560  bool prefersDedicatedAllocation,
5561  const VmaAllocationCreateInfo& createInfo,
5562  VmaAllocation allocation);
5563  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5564  const VkMemoryRequirements& vkMemReq,
5565  bool requiresDedicatedAllocation,
5566  bool prefersDedicatedAllocation,
5567  const VmaAllocationCreateInfo& createInfo,
5568  VmaAllocation allocation);
5569  void RecordFreeMemory(uint32_t frameIndex,
5570  VmaAllocation allocation);
5571  void RecordSetAllocationUserData(uint32_t frameIndex,
5572  VmaAllocation allocation,
5573  const void* pUserData);
5574  void RecordCreateLostAllocation(uint32_t frameIndex,
5575  VmaAllocation allocation);
5576  void RecordMapMemory(uint32_t frameIndex,
5577  VmaAllocation allocation);
5578  void RecordUnmapMemory(uint32_t frameIndex,
5579  VmaAllocation allocation);
5580  void RecordFlushAllocation(uint32_t frameIndex,
5581  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5582  void RecordInvalidateAllocation(uint32_t frameIndex,
5583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5584  void RecordCreateBuffer(uint32_t frameIndex,
5585  const VkBufferCreateInfo& bufCreateInfo,
5586  const VmaAllocationCreateInfo& allocCreateInfo,
5587  VmaAllocation allocation);
5588  void RecordCreateImage(uint32_t frameIndex,
5589  const VkImageCreateInfo& imageCreateInfo,
5590  const VmaAllocationCreateInfo& allocCreateInfo,
5591  VmaAllocation allocation);
5592  void RecordDestroyBuffer(uint32_t frameIndex,
5593  VmaAllocation allocation);
5594  void RecordDestroyImage(uint32_t frameIndex,
5595  VmaAllocation allocation);
5596  void RecordTouchAllocation(uint32_t frameIndex,
5597  VmaAllocation allocation);
5598  void RecordGetAllocationInfo(uint32_t frameIndex,
5599  VmaAllocation allocation);
5600  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5601  VmaPool pool);
5602 
5603 private:
5604  struct CallParams
5605  {
5606  uint32_t threadId;
5607  double time;
5608  };
5609 
5610  class UserDataString
5611  {
5612  public:
5613  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5614  const char* GetString() const { return m_Str; }
5615 
5616  private:
5617  char m_PtrStr[17];
5618  const char* m_Str;
5619  };
5620 
5621  bool m_UseMutex;
5622  VmaRecordFlags m_Flags;
5623  FILE* m_File;
5624  VMA_MUTEX m_FileMutex;
5625  int64_t m_Freq;
5626  int64_t m_StartCounter;
5627 
5628  void GetBasicParams(CallParams& outParams);
5629  void Flush();
5630 };
5631 
5632 #endif // #if VMA_RECORDING_ENABLED
5633 
5634 // Main allocator object.
5635 struct VmaAllocator_T
5636 {
5637  VMA_CLASS_NO_COPY(VmaAllocator_T)
5638 public:
5639  bool m_UseMutex;
5640  bool m_UseKhrDedicatedAllocation;
5641  VkDevice m_hDevice;
5642  bool m_AllocationCallbacksSpecified;
5643  VkAllocationCallbacks m_AllocationCallbacks;
5644  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5645 
5646  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5647  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5648  VMA_MUTEX m_HeapSizeLimitMutex;
5649 
5650  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5651  VkPhysicalDeviceMemoryProperties m_MemProps;
5652 
5653  // Default pools.
5654  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5655 
5656  // Each vector is sorted by memory (handle value).
5657  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5658  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5659  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5660 
5661  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5662  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5663  ~VmaAllocator_T();
5664 
5665  const VkAllocationCallbacks* GetAllocationCallbacks() const
5666  {
5667  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5668  }
5669  const VmaVulkanFunctions& GetVulkanFunctions() const
5670  {
5671  return m_VulkanFunctions;
5672  }
5673 
5674  VkDeviceSize GetBufferImageGranularity() const
5675  {
5676  return VMA_MAX(
5677  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5678  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5679  }
5680 
5681  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5682  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5683 
5684  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5685  {
5686  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5687  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5688  }
5689  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5690  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5691  {
5692  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5693  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5694  }
5695  // Minimum alignment for all allocations in specific memory type.
5696  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5697  {
5698  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5699  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5700  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5701  }
5702 
5703  bool IsIntegratedGpu() const
5704  {
5705  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5706  }
5707 
5708 #if VMA_RECORDING_ENABLED
5709  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5710 #endif
5711 
5712  void GetBufferMemoryRequirements(
5713  VkBuffer hBuffer,
5714  VkMemoryRequirements& memReq,
5715  bool& requiresDedicatedAllocation,
5716  bool& prefersDedicatedAllocation) const;
5717  void GetImageMemoryRequirements(
5718  VkImage hImage,
5719  VkMemoryRequirements& memReq,
5720  bool& requiresDedicatedAllocation,
5721  bool& prefersDedicatedAllocation) const;
5722 
5723  // Main allocation function.
5724  VkResult AllocateMemory(
5725  const VkMemoryRequirements& vkMemReq,
5726  bool requiresDedicatedAllocation,
5727  bool prefersDedicatedAllocation,
5728  VkBuffer dedicatedBuffer,
5729  VkImage dedicatedImage,
5730  const VmaAllocationCreateInfo& createInfo,
5731  VmaSuballocationType suballocType,
5732  VmaAllocation* pAllocation);
5733 
5734  // Main deallocation function.
5735  void FreeMemory(const VmaAllocation allocation);
5736 
5737  void CalculateStats(VmaStats* pStats);
5738 
5739 #if VMA_STATS_STRING_ENABLED
5740  void PrintDetailedMap(class VmaJsonWriter& json);
5741 #endif
5742 
5743  VkResult Defragment(
5744  VmaAllocation* pAllocations,
5745  size_t allocationCount,
5746  VkBool32* pAllocationsChanged,
5747  const VmaDefragmentationInfo* pDefragmentationInfo,
5748  VmaDefragmentationStats* pDefragmentationStats);
5749 
5750  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5751  bool TouchAllocation(VmaAllocation hAllocation);
5752 
5753  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5754  void DestroyPool(VmaPool pool);
5755  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5756 
5757  void SetCurrentFrameIndex(uint32_t frameIndex);
5758  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5759 
5760  void MakePoolAllocationsLost(
5761  VmaPool hPool,
5762  size_t* pLostAllocationCount);
5763  VkResult CheckPoolCorruption(VmaPool hPool);
5764  VkResult CheckCorruption(uint32_t memoryTypeBits);
5765 
5766  void CreateLostAllocation(VmaAllocation* pAllocation);
5767 
5768  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5769  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5770 
5771  VkResult Map(VmaAllocation hAllocation, void** ppData);
5772  void Unmap(VmaAllocation hAllocation);
5773 
5774  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5775  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5776 
5777  void FlushOrInvalidateAllocation(
5778  VmaAllocation hAllocation,
5779  VkDeviceSize offset, VkDeviceSize size,
5780  VMA_CACHE_OPERATION op);
5781 
5782  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5783 
5784 private:
5785  VkDeviceSize m_PreferredLargeHeapBlockSize;
5786 
5787  VkPhysicalDevice m_PhysicalDevice;
5788  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5789 
5790  VMA_MUTEX m_PoolsMutex;
5791  // Protected by m_PoolsMutex. Sorted by pointer value.
5792  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5793  uint32_t m_NextPoolId;
5794 
5795  VmaVulkanFunctions m_VulkanFunctions;
5796 
5797 #if VMA_RECORDING_ENABLED
5798  VmaRecorder* m_pRecorder;
5799 #endif
5800 
5801  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5802 
5803  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5804 
5805  VkResult AllocateMemoryOfType(
5806  VkDeviceSize size,
5807  VkDeviceSize alignment,
5808  bool dedicatedAllocation,
5809  VkBuffer dedicatedBuffer,
5810  VkImage dedicatedImage,
5811  const VmaAllocationCreateInfo& createInfo,
5812  uint32_t memTypeIndex,
5813  VmaSuballocationType suballocType,
5814  VmaAllocation* pAllocation);
5815 
5816  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5817  VkResult AllocateDedicatedMemory(
5818  VkDeviceSize size,
5819  VmaSuballocationType suballocType,
5820  uint32_t memTypeIndex,
5821  bool map,
5822  bool isUserDataString,
5823  void* pUserData,
5824  VkBuffer dedicatedBuffer,
5825  VkImage dedicatedImage,
5826  VmaAllocation* pAllocation);
5827 
5828  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5829  void FreeDedicatedMemory(VmaAllocation allocation);
5830 };
5831 
5833 // Memory allocation #2 after VmaAllocator_T definition
5834 
5835 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5836 {
5837  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5838 }
5839 
5840 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5841 {
5842  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5843 }
5844 
5845 template<typename T>
5846 static T* VmaAllocate(VmaAllocator hAllocator)
5847 {
5848  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5849 }
5850 
5851 template<typename T>
5852 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5853 {
5854  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5855 }
5856 
5857 template<typename T>
5858 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5859 {
5860  if(ptr != VMA_NULL)
5861  {
5862  ptr->~T();
5863  VmaFree(hAllocator, ptr);
5864  }
5865 }
5866 
5867 template<typename T>
5868 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5869 {
5870  if(ptr != VMA_NULL)
5871  {
5872  for(size_t i = count; i--; )
5873  ptr[i].~T();
5874  VmaFree(hAllocator, ptr);
5875  }
5876 }
5877 
5879 // VmaStringBuilder
5880 
5881 #if VMA_STATS_STRING_ENABLED
5882 
5883 class VmaStringBuilder
5884 {
5885 public:
5886  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5887  size_t GetLength() const { return m_Data.size(); }
5888  const char* GetData() const { return m_Data.data(); }
5889 
5890  void Add(char ch) { m_Data.push_back(ch); }
5891  void Add(const char* pStr);
5892  void AddNewLine() { Add('\n'); }
5893  void AddNumber(uint32_t num);
5894  void AddNumber(uint64_t num);
5895  void AddPointer(const void* ptr);
5896 
5897 private:
5898  VmaVector< char, VmaStlAllocator<char> > m_Data;
5899 };
5900 
5901 void VmaStringBuilder::Add(const char* pStr)
5902 {
5903  const size_t strLen = strlen(pStr);
5904  if(strLen > 0)
5905  {
5906  const size_t oldCount = m_Data.size();
5907  m_Data.resize(oldCount + strLen);
5908  memcpy(m_Data.data() + oldCount, pStr, strLen);
5909  }
5910 }
5911 
5912 void VmaStringBuilder::AddNumber(uint32_t num)
5913 {
5914  char buf[11];
5915  VmaUint32ToStr(buf, sizeof(buf), num);
5916  Add(buf);
5917 }
5918 
5919 void VmaStringBuilder::AddNumber(uint64_t num)
5920 {
5921  char buf[21];
5922  VmaUint64ToStr(buf, sizeof(buf), num);
5923  Add(buf);
5924 }
5925 
5926 void VmaStringBuilder::AddPointer(const void* ptr)
5927 {
5928  char buf[21];
5929  VmaPtrToStr(buf, sizeof(buf), ptr);
5930  Add(buf);
5931 }
5932 
5933 #endif // #if VMA_STATS_STRING_ENABLED
5934 
5936 // VmaJsonWriter
5937 
5938 #if VMA_STATS_STRING_ENABLED
5939 
5940 class VmaJsonWriter
5941 {
5942  VMA_CLASS_NO_COPY(VmaJsonWriter)
5943 public:
5944  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5945  ~VmaJsonWriter();
5946 
5947  void BeginObject(bool singleLine = false);
5948  void EndObject();
5949 
5950  void BeginArray(bool singleLine = false);
5951  void EndArray();
5952 
5953  void WriteString(const char* pStr);
5954  void BeginString(const char* pStr = VMA_NULL);
5955  void ContinueString(const char* pStr);
5956  void ContinueString(uint32_t n);
5957  void ContinueString(uint64_t n);
5958  void ContinueString_Pointer(const void* ptr);
5959  void EndString(const char* pStr = VMA_NULL);
5960 
5961  void WriteNumber(uint32_t n);
5962  void WriteNumber(uint64_t n);
5963  void WriteBool(bool b);
5964  void WriteNull();
5965 
5966 private:
5967  static const char* const INDENT;
5968 
5969  enum COLLECTION_TYPE
5970  {
5971  COLLECTION_TYPE_OBJECT,
5972  COLLECTION_TYPE_ARRAY,
5973  };
5974  struct StackItem
5975  {
5976  COLLECTION_TYPE type;
5977  uint32_t valueCount;
5978  bool singleLineMode;
5979  };
5980 
5981  VmaStringBuilder& m_SB;
5982  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5983  bool m_InsideString;
5984 
5985  void BeginValue(bool isString);
5986  void WriteIndent(bool oneLess = false);
5987 };
5988 
5989 const char* const VmaJsonWriter::INDENT = " ";
5990 
5991 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5992  m_SB(sb),
5993  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5994  m_InsideString(false)
5995 {
5996 }
5997 
5998 VmaJsonWriter::~VmaJsonWriter()
5999 {
6000  VMA_ASSERT(!m_InsideString);
6001  VMA_ASSERT(m_Stack.empty());
6002 }
6003 
6004 void VmaJsonWriter::BeginObject(bool singleLine)
6005 {
6006  VMA_ASSERT(!m_InsideString);
6007 
6008  BeginValue(false);
6009  m_SB.Add('{');
6010 
6011  StackItem item;
6012  item.type = COLLECTION_TYPE_OBJECT;
6013  item.valueCount = 0;
6014  item.singleLineMode = singleLine;
6015  m_Stack.push_back(item);
6016 }
6017 
6018 void VmaJsonWriter::EndObject()
6019 {
6020  VMA_ASSERT(!m_InsideString);
6021 
6022  WriteIndent(true);
6023  m_SB.Add('}');
6024 
6025  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6026  m_Stack.pop_back();
6027 }
6028 
6029 void VmaJsonWriter::BeginArray(bool singleLine)
6030 {
6031  VMA_ASSERT(!m_InsideString);
6032 
6033  BeginValue(false);
6034  m_SB.Add('[');
6035 
6036  StackItem item;
6037  item.type = COLLECTION_TYPE_ARRAY;
6038  item.valueCount = 0;
6039  item.singleLineMode = singleLine;
6040  m_Stack.push_back(item);
6041 }
6042 
6043 void VmaJsonWriter::EndArray()
6044 {
6045  VMA_ASSERT(!m_InsideString);
6046 
6047  WriteIndent(true);
6048  m_SB.Add(']');
6049 
6050  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6051  m_Stack.pop_back();
6052 }
6053 
6054 void VmaJsonWriter::WriteString(const char* pStr)
6055 {
6056  BeginString(pStr);
6057  EndString();
6058 }
6059 
6060 void VmaJsonWriter::BeginString(const char* pStr)
6061 {
6062  VMA_ASSERT(!m_InsideString);
6063 
6064  BeginValue(true);
6065  m_SB.Add('"');
6066  m_InsideString = true;
6067  if(pStr != VMA_NULL && pStr[0] != '\0')
6068  {
6069  ContinueString(pStr);
6070  }
6071 }
6072 
6073 void VmaJsonWriter::ContinueString(const char* pStr)
6074 {
6075  VMA_ASSERT(m_InsideString);
6076 
6077  const size_t strLen = strlen(pStr);
6078  for(size_t i = 0; i < strLen; ++i)
6079  {
6080  char ch = pStr[i];
6081  if(ch == '\\')
6082  {
6083  m_SB.Add("\\\\");
6084  }
6085  else if(ch == '"')
6086  {
6087  m_SB.Add("\\\"");
6088  }
6089  else if(ch >= 32)
6090  {
6091  m_SB.Add(ch);
6092  }
6093  else switch(ch)
6094  {
6095  case '\b':
6096  m_SB.Add("\\b");
6097  break;
6098  case '\f':
6099  m_SB.Add("\\f");
6100  break;
6101  case '\n':
6102  m_SB.Add("\\n");
6103  break;
6104  case '\r':
6105  m_SB.Add("\\r");
6106  break;
6107  case '\t':
6108  m_SB.Add("\\t");
6109  break;
6110  default:
6111  VMA_ASSERT(0 && "Character not currently supported.");
6112  break;
6113  }
6114  }
6115 }
6116 
6117 void VmaJsonWriter::ContinueString(uint32_t n)
6118 {
6119  VMA_ASSERT(m_InsideString);
6120  m_SB.AddNumber(n);
6121 }
6122 
6123 void VmaJsonWriter::ContinueString(uint64_t n)
6124 {
6125  VMA_ASSERT(m_InsideString);
6126  m_SB.AddNumber(n);
6127 }
6128 
6129 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6130 {
6131  VMA_ASSERT(m_InsideString);
6132  m_SB.AddPointer(ptr);
6133 }
6134 
6135 void VmaJsonWriter::EndString(const char* pStr)
6136 {
6137  VMA_ASSERT(m_InsideString);
6138  if(pStr != VMA_NULL && pStr[0] != '\0')
6139  {
6140  ContinueString(pStr);
6141  }
6142  m_SB.Add('"');
6143  m_InsideString = false;
6144 }
6145 
6146 void VmaJsonWriter::WriteNumber(uint32_t n)
6147 {
6148  VMA_ASSERT(!m_InsideString);
6149  BeginValue(false);
6150  m_SB.AddNumber(n);
6151 }
6152 
6153 void VmaJsonWriter::WriteNumber(uint64_t n)
6154 {
6155  VMA_ASSERT(!m_InsideString);
6156  BeginValue(false);
6157  m_SB.AddNumber(n);
6158 }
6159 
6160 void VmaJsonWriter::WriteBool(bool b)
6161 {
6162  VMA_ASSERT(!m_InsideString);
6163  BeginValue(false);
6164  m_SB.Add(b ? "true" : "false");
6165 }
6166 
6167 void VmaJsonWriter::WriteNull()
6168 {
6169  VMA_ASSERT(!m_InsideString);
6170  BeginValue(false);
6171  m_SB.Add("null");
6172 }
6173 
6174 void VmaJsonWriter::BeginValue(bool isString)
6175 {
6176  if(!m_Stack.empty())
6177  {
6178  StackItem& currItem = m_Stack.back();
6179  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6180  currItem.valueCount % 2 == 0)
6181  {
6182  VMA_ASSERT(isString);
6183  }
6184 
6185  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6186  currItem.valueCount % 2 != 0)
6187  {
6188  m_SB.Add(": ");
6189  }
6190  else if(currItem.valueCount > 0)
6191  {
6192  m_SB.Add(", ");
6193  WriteIndent();
6194  }
6195  else
6196  {
6197  WriteIndent();
6198  }
6199  ++currItem.valueCount;
6200  }
6201 }
6202 
6203 void VmaJsonWriter::WriteIndent(bool oneLess)
6204 {
6205  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6206  {
6207  m_SB.AddNewLine();
6208 
6209  size_t count = m_Stack.size();
6210  if(count > 0 && oneLess)
6211  {
6212  --count;
6213  }
6214  for(size_t i = 0; i < count; ++i)
6215  {
6216  m_SB.Add(INDENT);
6217  }
6218  }
6219 }
6220 
6221 #endif // #if VMA_STATS_STRING_ENABLED
6222 
6224 
6225 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6226 {
6227  if(IsUserDataString())
6228  {
6229  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6230 
6231  FreeUserDataString(hAllocator);
6232 
6233  if(pUserData != VMA_NULL)
6234  {
6235  const char* const newStrSrc = (char*)pUserData;
6236  const size_t newStrLen = strlen(newStrSrc);
6237  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6238  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6239  m_pUserData = newStrDst;
6240  }
6241  }
6242  else
6243  {
6244  m_pUserData = pUserData;
6245  }
6246 }
6247 
6248 void VmaAllocation_T::ChangeBlockAllocation(
6249  VmaAllocator hAllocator,
6250  VmaDeviceMemoryBlock* block,
6251  VkDeviceSize offset)
6252 {
6253  VMA_ASSERT(block != VMA_NULL);
6254  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6255 
6256  // Move mapping reference counter from old block to new block.
6257  if(block != m_BlockAllocation.m_Block)
6258  {
6259  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6260  if(IsPersistentMap())
6261  ++mapRefCount;
6262  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6263  block->Map(hAllocator, mapRefCount, VMA_NULL);
6264  }
6265 
6266  m_BlockAllocation.m_Block = block;
6267  m_BlockAllocation.m_Offset = offset;
6268 }
6269 
6270 VkDeviceSize VmaAllocation_T::GetOffset() const
6271 {
6272  switch(m_Type)
6273  {
6274  case ALLOCATION_TYPE_BLOCK:
6275  return m_BlockAllocation.m_Offset;
6276  case ALLOCATION_TYPE_DEDICATED:
6277  return 0;
6278  default:
6279  VMA_ASSERT(0);
6280  return 0;
6281  }
6282 }
6283 
6284 VkDeviceMemory VmaAllocation_T::GetMemory() const
6285 {
6286  switch(m_Type)
6287  {
6288  case ALLOCATION_TYPE_BLOCK:
6289  return m_BlockAllocation.m_Block->GetDeviceMemory();
6290  case ALLOCATION_TYPE_DEDICATED:
6291  return m_DedicatedAllocation.m_hMemory;
6292  default:
6293  VMA_ASSERT(0);
6294  return VK_NULL_HANDLE;
6295  }
6296 }
6297 
6298 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6299 {
6300  switch(m_Type)
6301  {
6302  case ALLOCATION_TYPE_BLOCK:
6303  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6304  case ALLOCATION_TYPE_DEDICATED:
6305  return m_DedicatedAllocation.m_MemoryTypeIndex;
6306  default:
6307  VMA_ASSERT(0);
6308  return UINT32_MAX;
6309  }
6310 }
6311 
6312 void* VmaAllocation_T::GetMappedData() const
6313 {
6314  switch(m_Type)
6315  {
6316  case ALLOCATION_TYPE_BLOCK:
6317  if(m_MapCount != 0)
6318  {
6319  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6320  VMA_ASSERT(pBlockData != VMA_NULL);
6321  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6322  }
6323  else
6324  {
6325  return VMA_NULL;
6326  }
6327  break;
6328  case ALLOCATION_TYPE_DEDICATED:
6329  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6330  return m_DedicatedAllocation.m_pMappedData;
6331  default:
6332  VMA_ASSERT(0);
6333  return VMA_NULL;
6334  }
6335 }
6336 
6337 bool VmaAllocation_T::CanBecomeLost() const
6338 {
6339  switch(m_Type)
6340  {
6341  case ALLOCATION_TYPE_BLOCK:
6342  return m_BlockAllocation.m_CanBecomeLost;
6343  case ALLOCATION_TYPE_DEDICATED:
6344  return false;
6345  default:
6346  VMA_ASSERT(0);
6347  return false;
6348  }
6349 }
6350 
6351 VmaPool VmaAllocation_T::GetPool() const
6352 {
6353  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6354  return m_BlockAllocation.m_hPool;
6355 }
6356 
6357 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6358 {
6359  VMA_ASSERT(CanBecomeLost());
6360 
6361  /*
6362  Warning: This is a carefully designed algorithm.
6363  Do not modify unless you really know what you're doing :)
6364  */
6365  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6366  for(;;)
6367  {
6368  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6369  {
6370  VMA_ASSERT(0);
6371  return false;
6372  }
6373  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6374  {
6375  return false;
6376  }
6377  else // Last use time earlier than current time.
6378  {
6379  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6380  {
6381  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6382  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6383  return true;
6384  }
6385  }
6386  }
6387 }
6388 
6389 #if VMA_STATS_STRING_ENABLED
6390 
6391 // Correspond to values of enum VmaSuballocationType.
6392 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6393  "FREE",
6394  "UNKNOWN",
6395  "BUFFER",
6396  "IMAGE_UNKNOWN",
6397  "IMAGE_LINEAR",
6398  "IMAGE_OPTIMAL",
6399 };
6400 
6401 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6402 {
6403  json.WriteString("Type");
6404  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6405 
6406  json.WriteString("Size");
6407  json.WriteNumber(m_Size);
6408 
6409  if(m_pUserData != VMA_NULL)
6410  {
6411  json.WriteString("UserData");
6412  if(IsUserDataString())
6413  {
6414  json.WriteString((const char*)m_pUserData);
6415  }
6416  else
6417  {
6418  json.BeginString();
6419  json.ContinueString_Pointer(m_pUserData);
6420  json.EndString();
6421  }
6422  }
6423 
6424  json.WriteString("CreationFrameIndex");
6425  json.WriteNumber(m_CreationFrameIndex);
6426 
6427  json.WriteString("LastUseFrameIndex");
6428  json.WriteNumber(GetLastUseFrameIndex());
6429 
6430  if(m_BufferImageUsage != 0)
6431  {
6432  json.WriteString("Usage");
6433  json.WriteNumber(m_BufferImageUsage);
6434  }
6435 }
6436 
6437 #endif
6438 
6439 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6440 {
6441  VMA_ASSERT(IsUserDataString());
6442  if(m_pUserData != VMA_NULL)
6443  {
6444  char* const oldStr = (char*)m_pUserData;
6445  const size_t oldStrLen = strlen(oldStr);
6446  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6447  m_pUserData = VMA_NULL;
6448  }
6449 }
6450 
6451 void VmaAllocation_T::BlockAllocMap()
6452 {
6453  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6454 
6455  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6456  {
6457  ++m_MapCount;
6458  }
6459  else
6460  {
6461  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6462  }
6463 }
6464 
6465 void VmaAllocation_T::BlockAllocUnmap()
6466 {
6467  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6468 
6469  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6470  {
6471  --m_MapCount;
6472  }
6473  else
6474  {
6475  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6476  }
6477 }
6478 
6479 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6480 {
6481  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6482 
6483  if(m_MapCount != 0)
6484  {
6485  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6486  {
6487  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6488  *ppData = m_DedicatedAllocation.m_pMappedData;
6489  ++m_MapCount;
6490  return VK_SUCCESS;
6491  }
6492  else
6493  {
6494  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6495  return VK_ERROR_MEMORY_MAP_FAILED;
6496  }
6497  }
6498  else
6499  {
6500  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6501  hAllocator->m_hDevice,
6502  m_DedicatedAllocation.m_hMemory,
6503  0, // offset
6504  VK_WHOLE_SIZE,
6505  0, // flags
6506  ppData);
6507  if(result == VK_SUCCESS)
6508  {
6509  m_DedicatedAllocation.m_pMappedData = *ppData;
6510  m_MapCount = 1;
6511  }
6512  return result;
6513  }
6514 }
6515 
6516 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6517 {
6518  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6519 
6520  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6521  {
6522  --m_MapCount;
6523  if(m_MapCount == 0)
6524  {
6525  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6526  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6527  hAllocator->m_hDevice,
6528  m_DedicatedAllocation.m_hMemory);
6529  }
6530  }
6531  else
6532  {
6533  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6534  }
6535 }
6536 
6537 #if VMA_STATS_STRING_ENABLED
6538 
6539 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6540 {
6541  json.BeginObject();
6542 
6543  json.WriteString("Blocks");
6544  json.WriteNumber(stat.blockCount);
6545 
6546  json.WriteString("Allocations");
6547  json.WriteNumber(stat.allocationCount);
6548 
6549  json.WriteString("UnusedRanges");
6550  json.WriteNumber(stat.unusedRangeCount);
6551 
6552  json.WriteString("UsedBytes");
6553  json.WriteNumber(stat.usedBytes);
6554 
6555  json.WriteString("UnusedBytes");
6556  json.WriteNumber(stat.unusedBytes);
6557 
6558  if(stat.allocationCount > 1)
6559  {
6560  json.WriteString("AllocationSize");
6561  json.BeginObject(true);
6562  json.WriteString("Min");
6563  json.WriteNumber(stat.allocationSizeMin);
6564  json.WriteString("Avg");
6565  json.WriteNumber(stat.allocationSizeAvg);
6566  json.WriteString("Max");
6567  json.WriteNumber(stat.allocationSizeMax);
6568  json.EndObject();
6569  }
6570 
6571  if(stat.unusedRangeCount > 1)
6572  {
6573  json.WriteString("UnusedRangeSize");
6574  json.BeginObject(true);
6575  json.WriteString("Min");
6576  json.WriteNumber(stat.unusedRangeSizeMin);
6577  json.WriteString("Avg");
6578  json.WriteNumber(stat.unusedRangeSizeAvg);
6579  json.WriteString("Max");
6580  json.WriteNumber(stat.unusedRangeSizeMax);
6581  json.EndObject();
6582  }
6583 
6584  json.EndObject();
6585 }
6586 
6587 #endif // #if VMA_STATS_STRING_ENABLED
6588 
6589 struct VmaSuballocationItemSizeLess
6590 {
6591  bool operator()(
6592  const VmaSuballocationList::iterator lhs,
6593  const VmaSuballocationList::iterator rhs) const
6594  {
6595  return lhs->size < rhs->size;
6596  }
6597  bool operator()(
6598  const VmaSuballocationList::iterator lhs,
6599  VkDeviceSize rhsSize) const
6600  {
6601  return lhs->size < rhsSize;
6602  }
6603 };
6604 
6605 
6607 // class VmaBlockMetadata
6608 
6609 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
6610  m_Size(0),
6611  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
6612 {
6613 }
6614 
6615 #if VMA_STATS_STRING_ENABLED
6616 
6617 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6618  VkDeviceSize unusedBytes,
6619  size_t allocationCount,
6620  size_t unusedRangeCount) const
6621 {
6622  json.BeginObject();
6623 
6624  json.WriteString("TotalBytes");
6625  json.WriteNumber(GetSize());
6626 
6627  json.WriteString("UnusedBytes");
6628  json.WriteNumber(unusedBytes);
6629 
6630  json.WriteString("Allocations");
6631  json.WriteNumber((uint64_t)allocationCount);
6632 
6633  json.WriteString("UnusedRanges");
6634  json.WriteNumber((uint64_t)unusedRangeCount);
6635 
6636  json.WriteString("Suballocations");
6637  json.BeginArray();
6638 }
6639 
6640 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6641  VkDeviceSize offset,
6642  VmaAllocation hAllocation) const
6643 {
6644  json.BeginObject(true);
6645 
6646  json.WriteString("Offset");
6647  json.WriteNumber(offset);
6648 
6649  hAllocation->PrintParameters(json);
6650 
6651  json.EndObject();
6652 }
6653 
6654 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6655  VkDeviceSize offset,
6656  VkDeviceSize size) const
6657 {
6658  json.BeginObject(true);
6659 
6660  json.WriteString("Offset");
6661  json.WriteNumber(offset);
6662 
6663  json.WriteString("Type");
6664  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6665 
6666  json.WriteString("Size");
6667  json.WriteNumber(size);
6668 
6669  json.EndObject();
6670 }
6671 
6672 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6673 {
6674  json.EndArray();
6675  json.EndObject();
6676 }
6677 
6678 #endif // #if VMA_STATS_STRING_ENABLED
6679 
6681 // class VmaBlockMetadata_Generic
6682 
6683 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6684  VmaBlockMetadata(hAllocator),
6685  m_FreeCount(0),
6686  m_SumFreeSize(0),
6687  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6688  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6689 {
6690 }
6691 
6692 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6693 {
6694 }
6695 
6696 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6697 {
6698  VmaBlockMetadata::Init(size);
6699 
6700  m_FreeCount = 1;
6701  m_SumFreeSize = size;
6702 
6703  VmaSuballocation suballoc = {};
6704  suballoc.offset = 0;
6705  suballoc.size = size;
6706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6707  suballoc.hAllocation = VK_NULL_HANDLE;
6708 
6709  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
6710  m_Suballocations.push_back(suballoc);
6711  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6712  --suballocItem;
6713  m_FreeSuballocationsBySize.push_back(suballocItem);
6714 }
6715 
6716 bool VmaBlockMetadata_Generic::Validate() const
6717 {
6718  VMA_VALIDATE(!m_Suballocations.empty());
6719 
6720  // Expected offset of new suballocation as calculated from previous ones.
6721  VkDeviceSize calculatedOffset = 0;
6722  // Expected number of free suballocations as calculated from traversing their list.
6723  uint32_t calculatedFreeCount = 0;
6724  // Expected sum size of free suballocations as calculated from traversing their list.
6725  VkDeviceSize calculatedSumFreeSize = 0;
6726  // Expected number of free suballocations that should be registered in
6727  // m_FreeSuballocationsBySize calculated from traversing their list.
6728  size_t freeSuballocationsToRegister = 0;
6729  // True if previous visited suballocation was free.
6730  bool prevFree = false;
6731 
6732  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6733  suballocItem != m_Suballocations.cend();
6734  ++suballocItem)
6735  {
6736  const VmaSuballocation& subAlloc = *suballocItem;
6737 
6738  // Actual offset of this suballocation doesn't match expected one.
6739  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6740 
6741  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6742  // Two adjacent free suballocations are invalid. They should be merged.
6743  VMA_VALIDATE(!prevFree || !currFree);
6744 
6745  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
6746 
6747  if(currFree)
6748  {
6749  calculatedSumFreeSize += subAlloc.size;
6750  ++calculatedFreeCount;
6751  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6752  {
6753  ++freeSuballocationsToRegister;
6754  }
6755 
6756  // Margin required between allocations - every free space must be at least that large.
6757  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
6758  }
6759  else
6760  {
6761  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
6762  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
6763 
6764  // Margin required between allocations - previous allocation must be free.
6765  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
6766  }
6767 
6768  calculatedOffset += subAlloc.size;
6769  prevFree = currFree;
6770  }
6771 
6772  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6773  // match expected one.
6774  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6775 
6776  VkDeviceSize lastSize = 0;
6777  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6778  {
6779  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6780 
6781  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6782  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6783  // They must be sorted by size ascending.
6784  VMA_VALIDATE(suballocItem->size >= lastSize);
6785 
6786  lastSize = suballocItem->size;
6787  }
6788 
6789  // Check if totals match calculacted values.
6790  VMA_VALIDATE(ValidateFreeSuballocationList());
6791  VMA_VALIDATE(calculatedOffset == GetSize());
6792  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6793  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6794 
6795  return true;
6796 }
6797 
6798 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6799 {
6800  if(!m_FreeSuballocationsBySize.empty())
6801  {
6802  return m_FreeSuballocationsBySize.back()->size;
6803  }
6804  else
6805  {
6806  return 0;
6807  }
6808 }
6809 
6810 bool VmaBlockMetadata_Generic::IsEmpty() const
6811 {
6812  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6813 }
6814 
6815 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6816 {
6817  outInfo.blockCount = 1;
6818 
6819  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6820  outInfo.allocationCount = rangeCount - m_FreeCount;
6821  outInfo.unusedRangeCount = m_FreeCount;
6822 
6823  outInfo.unusedBytes = m_SumFreeSize;
6824  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6825 
6826  outInfo.allocationSizeMin = UINT64_MAX;
6827  outInfo.allocationSizeMax = 0;
6828  outInfo.unusedRangeSizeMin = UINT64_MAX;
6829  outInfo.unusedRangeSizeMax = 0;
6830 
6831  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6832  suballocItem != m_Suballocations.cend();
6833  ++suballocItem)
6834  {
6835  const VmaSuballocation& suballoc = *suballocItem;
6836  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6837  {
6838  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6839  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6840  }
6841  else
6842  {
6843  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6844  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6845  }
6846  }
6847 }
6848 
6849 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6850 {
6851  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6852 
6853  inoutStats.size += GetSize();
6854  inoutStats.unusedSize += m_SumFreeSize;
6855  inoutStats.allocationCount += rangeCount - m_FreeCount;
6856  inoutStats.unusedRangeCount += m_FreeCount;
6857  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6858 }
6859 
6860 #if VMA_STATS_STRING_ENABLED
6861 
6862 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6863 {
6864  PrintDetailedMap_Begin(json,
6865  m_SumFreeSize, // unusedBytes
6866  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6867  m_FreeCount); // unusedRangeCount
6868 
6869  size_t i = 0;
6870  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6871  suballocItem != m_Suballocations.cend();
6872  ++suballocItem, ++i)
6873  {
6874  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6875  {
6876  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6877  }
6878  else
6879  {
6880  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6881  }
6882  }
6883 
6884  PrintDetailedMap_End(json);
6885 }
6886 
6887 #endif // #if VMA_STATS_STRING_ENABLED
6888 
6889 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6890  uint32_t currentFrameIndex,
6891  uint32_t frameInUseCount,
6892  VkDeviceSize bufferImageGranularity,
6893  VkDeviceSize allocSize,
6894  VkDeviceSize allocAlignment,
6895  bool upperAddress,
6896  VmaSuballocationType allocType,
6897  bool canMakeOtherLost,
6898  uint32_t strategy,
6899  VmaAllocationRequest* pAllocationRequest)
6900 {
6901  VMA_ASSERT(allocSize > 0);
6902  VMA_ASSERT(!upperAddress);
6903  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6904  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6905  VMA_HEAVY_ASSERT(Validate());
6906 
6907  // There is not enough total free space in this block to fullfill the request: Early return.
6908  if(canMakeOtherLost == false &&
6909  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6910  {
6911  return false;
6912  }
6913 
6914  // New algorithm, efficiently searching freeSuballocationsBySize.
6915  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6916  if(freeSuballocCount > 0)
6917  {
6919  {
6920  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6921  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6922  m_FreeSuballocationsBySize.data(),
6923  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6924  allocSize + 2 * VMA_DEBUG_MARGIN,
6925  VmaSuballocationItemSizeLess());
6926  size_t index = it - m_FreeSuballocationsBySize.data();
6927  for(; index < freeSuballocCount; ++index)
6928  {
6929  if(CheckAllocation(
6930  currentFrameIndex,
6931  frameInUseCount,
6932  bufferImageGranularity,
6933  allocSize,
6934  allocAlignment,
6935  allocType,
6936  m_FreeSuballocationsBySize[index],
6937  false, // canMakeOtherLost
6938  &pAllocationRequest->offset,
6939  &pAllocationRequest->itemsToMakeLostCount,
6940  &pAllocationRequest->sumFreeSize,
6941  &pAllocationRequest->sumItemSize))
6942  {
6943  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6944  return true;
6945  }
6946  }
6947  }
6948  else // WORST_FIT, FIRST_FIT
6949  {
6950  // Search staring from biggest suballocations.
6951  for(size_t index = freeSuballocCount; index--; )
6952  {
6953  if(CheckAllocation(
6954  currentFrameIndex,
6955  frameInUseCount,
6956  bufferImageGranularity,
6957  allocSize,
6958  allocAlignment,
6959  allocType,
6960  m_FreeSuballocationsBySize[index],
6961  false, // canMakeOtherLost
6962  &pAllocationRequest->offset,
6963  &pAllocationRequest->itemsToMakeLostCount,
6964  &pAllocationRequest->sumFreeSize,
6965  &pAllocationRequest->sumItemSize))
6966  {
6967  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6968  return true;
6969  }
6970  }
6971  }
6972  }
6973 
6974  if(canMakeOtherLost)
6975  {
6976  // Brute-force algorithm. TODO: Come up with something better.
6977 
6978  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6979  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6980 
6981  VmaAllocationRequest tmpAllocRequest = {};
6982  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6983  suballocIt != m_Suballocations.end();
6984  ++suballocIt)
6985  {
6986  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6987  suballocIt->hAllocation->CanBecomeLost())
6988  {
6989  if(CheckAllocation(
6990  currentFrameIndex,
6991  frameInUseCount,
6992  bufferImageGranularity,
6993  allocSize,
6994  allocAlignment,
6995  allocType,
6996  suballocIt,
6997  canMakeOtherLost,
6998  &tmpAllocRequest.offset,
6999  &tmpAllocRequest.itemsToMakeLostCount,
7000  &tmpAllocRequest.sumFreeSize,
7001  &tmpAllocRequest.sumItemSize))
7002  {
7003  tmpAllocRequest.item = suballocIt;
7004 
7005  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7007  {
7008  *pAllocationRequest = tmpAllocRequest;
7009  }
7010  }
7011  }
7012  }
7013 
7014  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7015  {
7016  return true;
7017  }
7018  }
7019 
7020  return false;
7021 }
7022 
7023 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7024  uint32_t currentFrameIndex,
7025  uint32_t frameInUseCount,
7026  VmaAllocationRequest* pAllocationRequest)
7027 {
7028  while(pAllocationRequest->itemsToMakeLostCount > 0)
7029  {
7030  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7031  {
7032  ++pAllocationRequest->item;
7033  }
7034  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7035  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7036  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7037  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7038  {
7039  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7040  --pAllocationRequest->itemsToMakeLostCount;
7041  }
7042  else
7043  {
7044  return false;
7045  }
7046  }
7047 
7048  VMA_HEAVY_ASSERT(Validate());
7049  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7050  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7051 
7052  return true;
7053 }
7054 
7055 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7056 {
7057  uint32_t lostAllocationCount = 0;
7058  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7059  it != m_Suballocations.end();
7060  ++it)
7061  {
7062  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7063  it->hAllocation->CanBecomeLost() &&
7064  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7065  {
7066  it = FreeSuballocation(it);
7067  ++lostAllocationCount;
7068  }
7069  }
7070  return lostAllocationCount;
7071 }
7072 
7073 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7074 {
7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7076  it != m_Suballocations.end();
7077  ++it)
7078  {
7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7080  {
7081  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7082  {
7083  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7084  return VK_ERROR_VALIDATION_FAILED_EXT;
7085  }
7086  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7087  {
7088  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7089  return VK_ERROR_VALIDATION_FAILED_EXT;
7090  }
7091  }
7092  }
7093 
7094  return VK_SUCCESS;
7095 }
7096 
7097 void VmaBlockMetadata_Generic::Alloc(
7098  const VmaAllocationRequest& request,
7099  VmaSuballocationType type,
7100  VkDeviceSize allocSize,
7101  bool upperAddress,
7102  VmaAllocation hAllocation)
7103 {
7104  VMA_ASSERT(!upperAddress);
7105  VMA_ASSERT(request.item != m_Suballocations.end());
7106  VmaSuballocation& suballoc = *request.item;
7107  // Given suballocation is a free block.
7108  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7109  // Given offset is inside this suballocation.
7110  VMA_ASSERT(request.offset >= suballoc.offset);
7111  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7112  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7113  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7114 
7115  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7116  // it to become used.
7117  UnregisterFreeSuballocation(request.item);
7118 
7119  suballoc.offset = request.offset;
7120  suballoc.size = allocSize;
7121  suballoc.type = type;
7122  suballoc.hAllocation = hAllocation;
7123 
7124  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7125  if(paddingEnd)
7126  {
7127  VmaSuballocation paddingSuballoc = {};
7128  paddingSuballoc.offset = request.offset + allocSize;
7129  paddingSuballoc.size = paddingEnd;
7130  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7131  VmaSuballocationList::iterator next = request.item;
7132  ++next;
7133  const VmaSuballocationList::iterator paddingEndItem =
7134  m_Suballocations.insert(next, paddingSuballoc);
7135  RegisterFreeSuballocation(paddingEndItem);
7136  }
7137 
7138  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7139  if(paddingBegin)
7140  {
7141  VmaSuballocation paddingSuballoc = {};
7142  paddingSuballoc.offset = request.offset - paddingBegin;
7143  paddingSuballoc.size = paddingBegin;
7144  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7145  const VmaSuballocationList::iterator paddingBeginItem =
7146  m_Suballocations.insert(request.item, paddingSuballoc);
7147  RegisterFreeSuballocation(paddingBeginItem);
7148  }
7149 
7150  // Update totals.
7151  m_FreeCount = m_FreeCount - 1;
7152  if(paddingBegin > 0)
7153  {
7154  ++m_FreeCount;
7155  }
7156  if(paddingEnd > 0)
7157  {
7158  ++m_FreeCount;
7159  }
7160  m_SumFreeSize -= allocSize;
7161 }
7162 
7163 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7164 {
7165  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7166  suballocItem != m_Suballocations.end();
7167  ++suballocItem)
7168  {
7169  VmaSuballocation& suballoc = *suballocItem;
7170  if(suballoc.hAllocation == allocation)
7171  {
7172  FreeSuballocation(suballocItem);
7173  VMA_HEAVY_ASSERT(Validate());
7174  return;
7175  }
7176  }
7177  VMA_ASSERT(0 && "Not found!");
7178 }
7179 
7180 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7181 {
7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7183  suballocItem != m_Suballocations.end();
7184  ++suballocItem)
7185  {
7186  VmaSuballocation& suballoc = *suballocItem;
7187  if(suballoc.offset == offset)
7188  {
7189  FreeSuballocation(suballocItem);
7190  return;
7191  }
7192  }
7193  VMA_ASSERT(0 && "Not found!");
7194 }
7195 
7196 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7197 {
7198  VkDeviceSize lastSize = 0;
7199  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7200  {
7201  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7202 
7203  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7204  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7205  VMA_VALIDATE(it->size >= lastSize);
7206  lastSize = it->size;
7207  }
7208  return true;
7209 }
7210 
7211 bool VmaBlockMetadata_Generic::CheckAllocation(
7212  uint32_t currentFrameIndex,
7213  uint32_t frameInUseCount,
7214  VkDeviceSize bufferImageGranularity,
7215  VkDeviceSize allocSize,
7216  VkDeviceSize allocAlignment,
7217  VmaSuballocationType allocType,
7218  VmaSuballocationList::const_iterator suballocItem,
7219  bool canMakeOtherLost,
7220  VkDeviceSize* pOffset,
7221  size_t* itemsToMakeLostCount,
7222  VkDeviceSize* pSumFreeSize,
7223  VkDeviceSize* pSumItemSize) const
7224 {
7225  VMA_ASSERT(allocSize > 0);
7226  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7227  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7228  VMA_ASSERT(pOffset != VMA_NULL);
7229 
7230  *itemsToMakeLostCount = 0;
7231  *pSumFreeSize = 0;
7232  *pSumItemSize = 0;
7233 
7234  if(canMakeOtherLost)
7235  {
7236  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7237  {
7238  *pSumFreeSize = suballocItem->size;
7239  }
7240  else
7241  {
7242  if(suballocItem->hAllocation->CanBecomeLost() &&
7243  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7244  {
7245  ++*itemsToMakeLostCount;
7246  *pSumItemSize = suballocItem->size;
7247  }
7248  else
7249  {
7250  return false;
7251  }
7252  }
7253 
7254  // Remaining size is too small for this request: Early return.
7255  if(GetSize() - suballocItem->offset < allocSize)
7256  {
7257  return false;
7258  }
7259 
7260  // Start from offset equal to beginning of this suballocation.
7261  *pOffset = suballocItem->offset;
7262 
7263  // Apply VMA_DEBUG_MARGIN at the beginning.
7264  if(VMA_DEBUG_MARGIN > 0)
7265  {
7266  *pOffset += VMA_DEBUG_MARGIN;
7267  }
7268 
7269  // Apply alignment.
7270  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7271 
7272  // Check previous suballocations for BufferImageGranularity conflicts.
7273  // Make bigger alignment if necessary.
7274  if(bufferImageGranularity > 1)
7275  {
7276  bool bufferImageGranularityConflict = false;
7277  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7278  while(prevSuballocItem != m_Suballocations.cbegin())
7279  {
7280  --prevSuballocItem;
7281  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7282  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7283  {
7284  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7285  {
7286  bufferImageGranularityConflict = true;
7287  break;
7288  }
7289  }
7290  else
7291  // Already on previous page.
7292  break;
7293  }
7294  if(bufferImageGranularityConflict)
7295  {
7296  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7297  }
7298  }
7299 
7300  // Now that we have final *pOffset, check if we are past suballocItem.
7301  // If yes, return false - this function should be called for another suballocItem as starting point.
7302  if(*pOffset >= suballocItem->offset + suballocItem->size)
7303  {
7304  return false;
7305  }
7306 
7307  // Calculate padding at the beginning based on current offset.
7308  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7309 
7310  // Calculate required margin at the end.
7311  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7312 
7313  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7314  // Another early return check.
7315  if(suballocItem->offset + totalSize > GetSize())
7316  {
7317  return false;
7318  }
7319 
7320  // Advance lastSuballocItem until desired size is reached.
7321  // Update itemsToMakeLostCount.
7322  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7323  if(totalSize > suballocItem->size)
7324  {
7325  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7326  while(remainingSize > 0)
7327  {
7328  ++lastSuballocItem;
7329  if(lastSuballocItem == m_Suballocations.cend())
7330  {
7331  return false;
7332  }
7333  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7334  {
7335  *pSumFreeSize += lastSuballocItem->size;
7336  }
7337  else
7338  {
7339  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7340  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7341  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7342  {
7343  ++*itemsToMakeLostCount;
7344  *pSumItemSize += lastSuballocItem->size;
7345  }
7346  else
7347  {
7348  return false;
7349  }
7350  }
7351  remainingSize = (lastSuballocItem->size < remainingSize) ?
7352  remainingSize - lastSuballocItem->size : 0;
7353  }
7354  }
7355 
7356  // Check next suballocations for BufferImageGranularity conflicts.
7357  // If conflict exists, we must mark more allocations lost or fail.
7358  if(bufferImageGranularity > 1)
7359  {
7360  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7361  ++nextSuballocItem;
7362  while(nextSuballocItem != m_Suballocations.cend())
7363  {
7364  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7365  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7366  {
7367  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7368  {
7369  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7370  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7371  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7372  {
7373  ++*itemsToMakeLostCount;
7374  }
7375  else
7376  {
7377  return false;
7378  }
7379  }
7380  }
7381  else
7382  {
7383  // Already on next page.
7384  break;
7385  }
7386  ++nextSuballocItem;
7387  }
7388  }
7389  }
7390  else
7391  {
7392  const VmaSuballocation& suballoc = *suballocItem;
7393  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7394 
7395  *pSumFreeSize = suballoc.size;
7396 
7397  // Size of this suballocation is too small for this request: Early return.
7398  if(suballoc.size < allocSize)
7399  {
7400  return false;
7401  }
7402 
7403  // Start from offset equal to beginning of this suballocation.
7404  *pOffset = suballoc.offset;
7405 
7406  // Apply VMA_DEBUG_MARGIN at the beginning.
7407  if(VMA_DEBUG_MARGIN > 0)
7408  {
7409  *pOffset += VMA_DEBUG_MARGIN;
7410  }
7411 
7412  // Apply alignment.
7413  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7414 
7415  // Check previous suballocations for BufferImageGranularity conflicts.
7416  // Make bigger alignment if necessary.
7417  if(bufferImageGranularity > 1)
7418  {
7419  bool bufferImageGranularityConflict = false;
7420  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7421  while(prevSuballocItem != m_Suballocations.cbegin())
7422  {
7423  --prevSuballocItem;
7424  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7425  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7426  {
7427  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7428  {
7429  bufferImageGranularityConflict = true;
7430  break;
7431  }
7432  }
7433  else
7434  // Already on previous page.
7435  break;
7436  }
7437  if(bufferImageGranularityConflict)
7438  {
7439  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7440  }
7441  }
7442 
7443  // Calculate padding at the beginning based on current offset.
7444  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7445 
7446  // Calculate required margin at the end.
7447  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7448 
7449  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7450  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7451  {
7452  return false;
7453  }
7454 
7455  // Check next suballocations for BufferImageGranularity conflicts.
7456  // If conflict exists, allocation cannot be made here.
7457  if(bufferImageGranularity > 1)
7458  {
7459  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7460  ++nextSuballocItem;
7461  while(nextSuballocItem != m_Suballocations.cend())
7462  {
7463  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7464  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7465  {
7466  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7467  {
7468  return false;
7469  }
7470  }
7471  else
7472  {
7473  // Already on next page.
7474  break;
7475  }
7476  ++nextSuballocItem;
7477  }
7478  }
7479  }
7480 
7481  // All tests passed: Success. pOffset is already filled.
7482  return true;
7483 }
7484 
7485 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7486 {
7487  VMA_ASSERT(item != m_Suballocations.end());
7488  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7489 
7490  VmaSuballocationList::iterator nextItem = item;
7491  ++nextItem;
7492  VMA_ASSERT(nextItem != m_Suballocations.end());
7493  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7494 
7495  item->size += nextItem->size;
7496  --m_FreeCount;
7497  m_Suballocations.erase(nextItem);
7498 }
7499 
7500 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7501 {
7502  // Change this suballocation to be marked as free.
7503  VmaSuballocation& suballoc = *suballocItem;
7504  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7505  suballoc.hAllocation = VK_NULL_HANDLE;
7506 
7507  // Update totals.
7508  ++m_FreeCount;
7509  m_SumFreeSize += suballoc.size;
7510 
7511  // Merge with previous and/or next suballocation if it's also free.
7512  bool mergeWithNext = false;
7513  bool mergeWithPrev = false;
7514 
7515  VmaSuballocationList::iterator nextItem = suballocItem;
7516  ++nextItem;
7517  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7518  {
7519  mergeWithNext = true;
7520  }
7521 
7522  VmaSuballocationList::iterator prevItem = suballocItem;
7523  if(suballocItem != m_Suballocations.begin())
7524  {
7525  --prevItem;
7526  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7527  {
7528  mergeWithPrev = true;
7529  }
7530  }
7531 
7532  if(mergeWithNext)
7533  {
7534  UnregisterFreeSuballocation(nextItem);
7535  MergeFreeWithNext(suballocItem);
7536  }
7537 
7538  if(mergeWithPrev)
7539  {
7540  UnregisterFreeSuballocation(prevItem);
7541  MergeFreeWithNext(prevItem);
7542  RegisterFreeSuballocation(prevItem);
7543  return prevItem;
7544  }
7545  else
7546  {
7547  RegisterFreeSuballocation(suballocItem);
7548  return suballocItem;
7549  }
7550 }
7551 
7552 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7553 {
7554  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7555  VMA_ASSERT(item->size > 0);
7556 
7557  // You may want to enable this validation at the beginning or at the end of
7558  // this function, depending on what do you want to check.
7559  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7560 
7561  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7562  {
7563  if(m_FreeSuballocationsBySize.empty())
7564  {
7565  m_FreeSuballocationsBySize.push_back(item);
7566  }
7567  else
7568  {
7569  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7570  }
7571  }
7572 
7573  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7574 }
7575 
7576 
7577 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7578 {
7579  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7580  VMA_ASSERT(item->size > 0);
7581 
7582  // You may want to enable this validation at the beginning or at the end of
7583  // this function, depending on what do you want to check.
7584  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7585 
7586  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7587  {
7588  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7589  m_FreeSuballocationsBySize.data(),
7590  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7591  item,
7592  VmaSuballocationItemSizeLess());
7593  for(size_t index = it - m_FreeSuballocationsBySize.data();
7594  index < m_FreeSuballocationsBySize.size();
7595  ++index)
7596  {
7597  if(m_FreeSuballocationsBySize[index] == item)
7598  {
7599  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7600  return;
7601  }
7602  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7603  }
7604  VMA_ASSERT(0 && "Not found.");
7605  }
7606 
7607  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7608 }
7609 
7611 // class VmaBlockMetadata_Linear
7612 
7613 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7614  VmaBlockMetadata(hAllocator),
7615  m_SumFreeSize(0),
7616  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7617  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7618  m_1stVectorIndex(0),
7619  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7620  m_1stNullItemsBeginCount(0),
7621  m_1stNullItemsMiddleCount(0),
7622  m_2ndNullItemsCount(0)
7623 {
7624 }
7625 
7626 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7627 {
7628 }
7629 
7630 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7631 {
7632  VmaBlockMetadata::Init(size);
7633  m_SumFreeSize = size;
7634 }
7635 
7636 bool VmaBlockMetadata_Linear::Validate() const
7637 {
7638  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7639  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7640 
7641  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7642  VMA_VALIDATE(!suballocations1st.empty() ||
7643  suballocations2nd.empty() ||
7644  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7645 
7646  if(!suballocations1st.empty())
7647  {
7648  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7649  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
7650  // Null item at the end should be just pop_back().
7651  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
7652  }
7653  if(!suballocations2nd.empty())
7654  {
7655  // Null item at the end should be just pop_back().
7656  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
7657  }
7658 
7659  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7660  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7661 
7662  VkDeviceSize sumUsedSize = 0;
7663  const size_t suballoc1stCount = suballocations1st.size();
7664  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7665 
7666  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7667  {
7668  const size_t suballoc2ndCount = suballocations2nd.size();
7669  size_t nullItem2ndCount = 0;
7670  for(size_t i = 0; i < suballoc2ndCount; ++i)
7671  {
7672  const VmaSuballocation& suballoc = suballocations2nd[i];
7673  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7674 
7675  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7676  VMA_VALIDATE(suballoc.offset >= offset);
7677 
7678  if(!currFree)
7679  {
7680  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7681  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7682  sumUsedSize += suballoc.size;
7683  }
7684  else
7685  {
7686  ++nullItem2ndCount;
7687  }
7688 
7689  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7690  }
7691 
7692  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7693  }
7694 
7695  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7696  {
7697  const VmaSuballocation& suballoc = suballocations1st[i];
7698  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7699  suballoc.hAllocation == VK_NULL_HANDLE);
7700  }
7701 
7702  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7703 
7704  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7705  {
7706  const VmaSuballocation& suballoc = suballocations1st[i];
7707  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7708 
7709  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7710  VMA_VALIDATE(suballoc.offset >= offset);
7711  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7712 
7713  if(!currFree)
7714  {
7715  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7716  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7717  sumUsedSize += suballoc.size;
7718  }
7719  else
7720  {
7721  ++nullItem1stCount;
7722  }
7723 
7724  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7725  }
7726  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7727 
7728  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7729  {
7730  const size_t suballoc2ndCount = suballocations2nd.size();
7731  size_t nullItem2ndCount = 0;
7732  for(size_t i = suballoc2ndCount; i--; )
7733  {
7734  const VmaSuballocation& suballoc = suballocations2nd[i];
7735  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7736 
7737  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
7738  VMA_VALIDATE(suballoc.offset >= offset);
7739 
7740  if(!currFree)
7741  {
7742  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
7743  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
7744  sumUsedSize += suballoc.size;
7745  }
7746  else
7747  {
7748  ++nullItem2ndCount;
7749  }
7750 
7751  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7752  }
7753 
7754  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7755  }
7756 
7757  VMA_VALIDATE(offset <= GetSize());
7758  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7759 
7760  return true;
7761 }
7762 
7763 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7764 {
7765  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7766  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7767 }
7768 
7769 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7770 {
7771  const VkDeviceSize size = GetSize();
7772 
7773  /*
7774  We don't consider gaps inside allocation vectors with freed allocations because
7775  they are not suitable for reuse in linear allocator. We consider only space that
7776  is available for new allocations.
7777  */
7778  if(IsEmpty())
7779  {
7780  return size;
7781  }
7782 
7783  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7784 
7785  switch(m_2ndVectorMode)
7786  {
7787  case SECOND_VECTOR_EMPTY:
7788  /*
7789  Available space is after end of 1st, as well as before beginning of 1st (which
7790  whould make it a ring buffer).
7791  */
7792  {
7793  const size_t suballocations1stCount = suballocations1st.size();
7794  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7795  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7796  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7797  return VMA_MAX(
7798  firstSuballoc.offset,
7799  size - (lastSuballoc.offset + lastSuballoc.size));
7800  }
7801  break;
7802 
7803  case SECOND_VECTOR_RING_BUFFER:
7804  /*
7805  Available space is only between end of 2nd and beginning of 1st.
7806  */
7807  {
7808  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7809  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7810  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7811  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7812  }
7813  break;
7814 
7815  case SECOND_VECTOR_DOUBLE_STACK:
7816  /*
7817  Available space is only between end of 1st and top of 2nd.
7818  */
7819  {
7820  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7821  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7822  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7823  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7824  }
7825  break;
7826 
7827  default:
7828  VMA_ASSERT(0);
7829  return 0;
7830  }
7831 }
7832 
7833 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7834 {
7835  const VkDeviceSize size = GetSize();
7836  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7838  const size_t suballoc1stCount = suballocations1st.size();
7839  const size_t suballoc2ndCount = suballocations2nd.size();
7840 
7841  outInfo.blockCount = 1;
7842  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7843  outInfo.unusedRangeCount = 0;
7844  outInfo.usedBytes = 0;
7845  outInfo.allocationSizeMin = UINT64_MAX;
7846  outInfo.allocationSizeMax = 0;
7847  outInfo.unusedRangeSizeMin = UINT64_MAX;
7848  outInfo.unusedRangeSizeMax = 0;
7849 
7850  VkDeviceSize lastOffset = 0;
7851 
7852  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7853  {
7854  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7855  size_t nextAlloc2ndIndex = 0;
7856  while(lastOffset < freeSpace2ndTo1stEnd)
7857  {
7858  // Find next non-null allocation or move nextAllocIndex to the end.
7859  while(nextAlloc2ndIndex < suballoc2ndCount &&
7860  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7861  {
7862  ++nextAlloc2ndIndex;
7863  }
7864 
7865  // Found non-null allocation.
7866  if(nextAlloc2ndIndex < suballoc2ndCount)
7867  {
7868  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7869 
7870  // 1. Process free space before this allocation.
7871  if(lastOffset < suballoc.offset)
7872  {
7873  // There is free space from lastOffset to suballoc.offset.
7874  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7875  ++outInfo.unusedRangeCount;
7876  outInfo.unusedBytes += unusedRangeSize;
7877  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7878  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7879  }
7880 
7881  // 2. Process this allocation.
7882  // There is allocation with suballoc.offset, suballoc.size.
7883  outInfo.usedBytes += suballoc.size;
7884  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7885  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7886 
7887  // 3. Prepare for next iteration.
7888  lastOffset = suballoc.offset + suballoc.size;
7889  ++nextAlloc2ndIndex;
7890  }
7891  // We are at the end.
7892  else
7893  {
7894  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7895  if(lastOffset < freeSpace2ndTo1stEnd)
7896  {
7897  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7898  ++outInfo.unusedRangeCount;
7899  outInfo.unusedBytes += unusedRangeSize;
7900  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7901  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7902  }
7903 
7904  // End of loop.
7905  lastOffset = freeSpace2ndTo1stEnd;
7906  }
7907  }
7908  }
7909 
7910  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7911  const VkDeviceSize freeSpace1stTo2ndEnd =
7912  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7913  while(lastOffset < freeSpace1stTo2ndEnd)
7914  {
7915  // Find next non-null allocation or move nextAllocIndex to the end.
7916  while(nextAlloc1stIndex < suballoc1stCount &&
7917  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7918  {
7919  ++nextAlloc1stIndex;
7920  }
7921 
7922  // Found non-null allocation.
7923  if(nextAlloc1stIndex < suballoc1stCount)
7924  {
7925  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7926 
7927  // 1. Process free space before this allocation.
7928  if(lastOffset < suballoc.offset)
7929  {
7930  // There is free space from lastOffset to suballoc.offset.
7931  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7932  ++outInfo.unusedRangeCount;
7933  outInfo.unusedBytes += unusedRangeSize;
7934  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7935  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7936  }
7937 
7938  // 2. Process this allocation.
7939  // There is allocation with suballoc.offset, suballoc.size.
7940  outInfo.usedBytes += suballoc.size;
7941  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7942  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7943 
7944  // 3. Prepare for next iteration.
7945  lastOffset = suballoc.offset + suballoc.size;
7946  ++nextAlloc1stIndex;
7947  }
7948  // We are at the end.
7949  else
7950  {
7951  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7952  if(lastOffset < freeSpace1stTo2ndEnd)
7953  {
7954  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7955  ++outInfo.unusedRangeCount;
7956  outInfo.unusedBytes += unusedRangeSize;
7957  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7958  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7959  }
7960 
7961  // End of loop.
7962  lastOffset = freeSpace1stTo2ndEnd;
7963  }
7964  }
7965 
7966  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7967  {
7968  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7969  while(lastOffset < size)
7970  {
7971  // Find next non-null allocation or move nextAllocIndex to the end.
7972  while(nextAlloc2ndIndex != SIZE_MAX &&
7973  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7974  {
7975  --nextAlloc2ndIndex;
7976  }
7977 
7978  // Found non-null allocation.
7979  if(nextAlloc2ndIndex != SIZE_MAX)
7980  {
7981  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7982 
7983  // 1. Process free space before this allocation.
7984  if(lastOffset < suballoc.offset)
7985  {
7986  // There is free space from lastOffset to suballoc.offset.
7987  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7988  ++outInfo.unusedRangeCount;
7989  outInfo.unusedBytes += unusedRangeSize;
7990  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7991  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7992  }
7993 
7994  // 2. Process this allocation.
7995  // There is allocation with suballoc.offset, suballoc.size.
7996  outInfo.usedBytes += suballoc.size;
7997  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7998  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7999 
8000  // 3. Prepare for next iteration.
8001  lastOffset = suballoc.offset + suballoc.size;
8002  --nextAlloc2ndIndex;
8003  }
8004  // We are at the end.
8005  else
8006  {
8007  // There is free space from lastOffset to size.
8008  if(lastOffset < size)
8009  {
8010  const VkDeviceSize unusedRangeSize = size - lastOffset;
8011  ++outInfo.unusedRangeCount;
8012  outInfo.unusedBytes += unusedRangeSize;
8013  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8014  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8015  }
8016 
8017  // End of loop.
8018  lastOffset = size;
8019  }
8020  }
8021  }
8022 
8023  outInfo.unusedBytes = size - outInfo.usedBytes;
8024 }
8025 
8026 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8027 {
8028  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8029  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8030  const VkDeviceSize size = GetSize();
8031  const size_t suballoc1stCount = suballocations1st.size();
8032  const size_t suballoc2ndCount = suballocations2nd.size();
8033 
8034  inoutStats.size += size;
8035 
8036  VkDeviceSize lastOffset = 0;
8037 
8038  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8039  {
8040  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8041  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8042  while(lastOffset < freeSpace2ndTo1stEnd)
8043  {
8044  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8045  while(nextAlloc2ndIndex < suballoc2ndCount &&
8046  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8047  {
8048  ++nextAlloc2ndIndex;
8049  }
8050 
8051  // Found non-null allocation.
8052  if(nextAlloc2ndIndex < suballoc2ndCount)
8053  {
8054  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8055 
8056  // 1. Process free space before this allocation.
8057  if(lastOffset < suballoc.offset)
8058  {
8059  // There is free space from lastOffset to suballoc.offset.
8060  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8061  inoutStats.unusedSize += unusedRangeSize;
8062  ++inoutStats.unusedRangeCount;
8063  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8064  }
8065 
8066  // 2. Process this allocation.
8067  // There is allocation with suballoc.offset, suballoc.size.
8068  ++inoutStats.allocationCount;
8069 
8070  // 3. Prepare for next iteration.
8071  lastOffset = suballoc.offset + suballoc.size;
8072  ++nextAlloc2ndIndex;
8073  }
8074  // We are at the end.
8075  else
8076  {
8077  if(lastOffset < freeSpace2ndTo1stEnd)
8078  {
8079  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8080  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8081  inoutStats.unusedSize += unusedRangeSize;
8082  ++inoutStats.unusedRangeCount;
8083  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8084  }
8085 
8086  // End of loop.
8087  lastOffset = freeSpace2ndTo1stEnd;
8088  }
8089  }
8090  }
8091 
8092  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8093  const VkDeviceSize freeSpace1stTo2ndEnd =
8094  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8095  while(lastOffset < freeSpace1stTo2ndEnd)
8096  {
8097  // Find next non-null allocation or move nextAllocIndex to the end.
8098  while(nextAlloc1stIndex < suballoc1stCount &&
8099  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8100  {
8101  ++nextAlloc1stIndex;
8102  }
8103 
8104  // Found non-null allocation.
8105  if(nextAlloc1stIndex < suballoc1stCount)
8106  {
8107  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8108 
8109  // 1. Process free space before this allocation.
8110  if(lastOffset < suballoc.offset)
8111  {
8112  // There is free space from lastOffset to suballoc.offset.
8113  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8114  inoutStats.unusedSize += unusedRangeSize;
8115  ++inoutStats.unusedRangeCount;
8116  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8117  }
8118 
8119  // 2. Process this allocation.
8120  // There is allocation with suballoc.offset, suballoc.size.
8121  ++inoutStats.allocationCount;
8122 
8123  // 3. Prepare for next iteration.
8124  lastOffset = suballoc.offset + suballoc.size;
8125  ++nextAlloc1stIndex;
8126  }
8127  // We are at the end.
8128  else
8129  {
8130  if(lastOffset < freeSpace1stTo2ndEnd)
8131  {
8132  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8133  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8134  inoutStats.unusedSize += unusedRangeSize;
8135  ++inoutStats.unusedRangeCount;
8136  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8137  }
8138 
8139  // End of loop.
8140  lastOffset = freeSpace1stTo2ndEnd;
8141  }
8142  }
8143 
8144  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8145  {
8146  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8147  while(lastOffset < size)
8148  {
8149  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8150  while(nextAlloc2ndIndex != SIZE_MAX &&
8151  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8152  {
8153  --nextAlloc2ndIndex;
8154  }
8155 
8156  // Found non-null allocation.
8157  if(nextAlloc2ndIndex != SIZE_MAX)
8158  {
8159  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8160 
8161  // 1. Process free space before this allocation.
8162  if(lastOffset < suballoc.offset)
8163  {
8164  // There is free space from lastOffset to suballoc.offset.
8165  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8166  inoutStats.unusedSize += unusedRangeSize;
8167  ++inoutStats.unusedRangeCount;
8168  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8169  }
8170 
8171  // 2. Process this allocation.
8172  // There is allocation with suballoc.offset, suballoc.size.
8173  ++inoutStats.allocationCount;
8174 
8175  // 3. Prepare for next iteration.
8176  lastOffset = suballoc.offset + suballoc.size;
8177  --nextAlloc2ndIndex;
8178  }
8179  // We are at the end.
8180  else
8181  {
8182  if(lastOffset < size)
8183  {
8184  // There is free space from lastOffset to size.
8185  const VkDeviceSize unusedRangeSize = size - lastOffset;
8186  inoutStats.unusedSize += unusedRangeSize;
8187  ++inoutStats.unusedRangeCount;
8188  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8189  }
8190 
8191  // End of loop.
8192  lastOffset = size;
8193  }
8194  }
8195  }
8196 }
8197 
8198 #if VMA_STATS_STRING_ENABLED
8199 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8200 {
8201  const VkDeviceSize size = GetSize();
8202  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8203  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8204  const size_t suballoc1stCount = suballocations1st.size();
8205  const size_t suballoc2ndCount = suballocations2nd.size();
8206 
8207  // FIRST PASS
8208 
8209  size_t unusedRangeCount = 0;
8210  VkDeviceSize usedBytes = 0;
8211 
8212  VkDeviceSize lastOffset = 0;
8213 
8214  size_t alloc2ndCount = 0;
8215  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8216  {
8217  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8218  size_t nextAlloc2ndIndex = 0;
8219  while(lastOffset < freeSpace2ndTo1stEnd)
8220  {
8221  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8222  while(nextAlloc2ndIndex < suballoc2ndCount &&
8223  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8224  {
8225  ++nextAlloc2ndIndex;
8226  }
8227 
8228  // Found non-null allocation.
8229  if(nextAlloc2ndIndex < suballoc2ndCount)
8230  {
8231  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8232 
8233  // 1. Process free space before this allocation.
8234  if(lastOffset < suballoc.offset)
8235  {
8236  // There is free space from lastOffset to suballoc.offset.
8237  ++unusedRangeCount;
8238  }
8239 
8240  // 2. Process this allocation.
8241  // There is allocation with suballoc.offset, suballoc.size.
8242  ++alloc2ndCount;
8243  usedBytes += suballoc.size;
8244 
8245  // 3. Prepare for next iteration.
8246  lastOffset = suballoc.offset + suballoc.size;
8247  ++nextAlloc2ndIndex;
8248  }
8249  // We are at the end.
8250  else
8251  {
8252  if(lastOffset < freeSpace2ndTo1stEnd)
8253  {
8254  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8255  ++unusedRangeCount;
8256  }
8257 
8258  // End of loop.
8259  lastOffset = freeSpace2ndTo1stEnd;
8260  }
8261  }
8262  }
8263 
8264  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8265  size_t alloc1stCount = 0;
8266  const VkDeviceSize freeSpace1stTo2ndEnd =
8267  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8268  while(lastOffset < freeSpace1stTo2ndEnd)
8269  {
8270  // Find next non-null allocation or move nextAllocIndex to the end.
8271  while(nextAlloc1stIndex < suballoc1stCount &&
8272  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8273  {
8274  ++nextAlloc1stIndex;
8275  }
8276 
8277  // Found non-null allocation.
8278  if(nextAlloc1stIndex < suballoc1stCount)
8279  {
8280  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8281 
8282  // 1. Process free space before this allocation.
8283  if(lastOffset < suballoc.offset)
8284  {
8285  // There is free space from lastOffset to suballoc.offset.
8286  ++unusedRangeCount;
8287  }
8288 
8289  // 2. Process this allocation.
8290  // There is allocation with suballoc.offset, suballoc.size.
8291  ++alloc1stCount;
8292  usedBytes += suballoc.size;
8293 
8294  // 3. Prepare for next iteration.
8295  lastOffset = suballoc.offset + suballoc.size;
8296  ++nextAlloc1stIndex;
8297  }
8298  // We are at the end.
8299  else
8300  {
8301  if(lastOffset < size)
8302  {
8303  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8304  ++unusedRangeCount;
8305  }
8306 
8307  // End of loop.
8308  lastOffset = freeSpace1stTo2ndEnd;
8309  }
8310  }
8311 
8312  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8313  {
8314  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8315  while(lastOffset < size)
8316  {
8317  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8318  while(nextAlloc2ndIndex != SIZE_MAX &&
8319  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8320  {
8321  --nextAlloc2ndIndex;
8322  }
8323 
8324  // Found non-null allocation.
8325  if(nextAlloc2ndIndex != SIZE_MAX)
8326  {
8327  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8328 
8329  // 1. Process free space before this allocation.
8330  if(lastOffset < suballoc.offset)
8331  {
8332  // There is free space from lastOffset to suballoc.offset.
8333  ++unusedRangeCount;
8334  }
8335 
8336  // 2. Process this allocation.
8337  // There is allocation with suballoc.offset, suballoc.size.
8338  ++alloc2ndCount;
8339  usedBytes += suballoc.size;
8340 
8341  // 3. Prepare for next iteration.
8342  lastOffset = suballoc.offset + suballoc.size;
8343  --nextAlloc2ndIndex;
8344  }
8345  // We are at the end.
8346  else
8347  {
8348  if(lastOffset < size)
8349  {
8350  // There is free space from lastOffset to size.
8351  ++unusedRangeCount;
8352  }
8353 
8354  // End of loop.
8355  lastOffset = size;
8356  }
8357  }
8358  }
8359 
8360  const VkDeviceSize unusedBytes = size - usedBytes;
8361  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8362 
8363  // SECOND PASS
8364  lastOffset = 0;
8365 
8366  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8367  {
8368  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8369  size_t nextAlloc2ndIndex = 0;
8370  while(lastOffset < freeSpace2ndTo1stEnd)
8371  {
8372  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8373  while(nextAlloc2ndIndex < suballoc2ndCount &&
8374  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8375  {
8376  ++nextAlloc2ndIndex;
8377  }
8378 
8379  // Found non-null allocation.
8380  if(nextAlloc2ndIndex < suballoc2ndCount)
8381  {
8382  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8383 
8384  // 1. Process free space before this allocation.
8385  if(lastOffset < suballoc.offset)
8386  {
8387  // There is free space from lastOffset to suballoc.offset.
8388  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8389  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8390  }
8391 
8392  // 2. Process this allocation.
8393  // There is allocation with suballoc.offset, suballoc.size.
8394  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8395 
8396  // 3. Prepare for next iteration.
8397  lastOffset = suballoc.offset + suballoc.size;
8398  ++nextAlloc2ndIndex;
8399  }
8400  // We are at the end.
8401  else
8402  {
8403  if(lastOffset < freeSpace2ndTo1stEnd)
8404  {
8405  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8406  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8407  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8408  }
8409 
8410  // End of loop.
8411  lastOffset = freeSpace2ndTo1stEnd;
8412  }
8413  }
8414  }
8415 
8416  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8417  while(lastOffset < freeSpace1stTo2ndEnd)
8418  {
8419  // Find next non-null allocation or move nextAllocIndex to the end.
8420  while(nextAlloc1stIndex < suballoc1stCount &&
8421  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8422  {
8423  ++nextAlloc1stIndex;
8424  }
8425 
8426  // Found non-null allocation.
8427  if(nextAlloc1stIndex < suballoc1stCount)
8428  {
8429  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8430 
8431  // 1. Process free space before this allocation.
8432  if(lastOffset < suballoc.offset)
8433  {
8434  // There is free space from lastOffset to suballoc.offset.
8435  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8436  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8437  }
8438 
8439  // 2. Process this allocation.
8440  // There is allocation with suballoc.offset, suballoc.size.
8441  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8442 
8443  // 3. Prepare for next iteration.
8444  lastOffset = suballoc.offset + suballoc.size;
8445  ++nextAlloc1stIndex;
8446  }
8447  // We are at the end.
8448  else
8449  {
8450  if(lastOffset < freeSpace1stTo2ndEnd)
8451  {
8452  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8453  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8454  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8455  }
8456 
8457  // End of loop.
8458  lastOffset = freeSpace1stTo2ndEnd;
8459  }
8460  }
8461 
8462  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8463  {
8464  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8465  while(lastOffset < size)
8466  {
8467  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8468  while(nextAlloc2ndIndex != SIZE_MAX &&
8469  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8470  {
8471  --nextAlloc2ndIndex;
8472  }
8473 
8474  // Found non-null allocation.
8475  if(nextAlloc2ndIndex != SIZE_MAX)
8476  {
8477  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8478 
8479  // 1. Process free space before this allocation.
8480  if(lastOffset < suballoc.offset)
8481  {
8482  // There is free space from lastOffset to suballoc.offset.
8483  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8484  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8485  }
8486 
8487  // 2. Process this allocation.
8488  // There is allocation with suballoc.offset, suballoc.size.
8489  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8490 
8491  // 3. Prepare for next iteration.
8492  lastOffset = suballoc.offset + suballoc.size;
8493  --nextAlloc2ndIndex;
8494  }
8495  // We are at the end.
8496  else
8497  {
8498  if(lastOffset < size)
8499  {
8500  // There is free space from lastOffset to size.
8501  const VkDeviceSize unusedRangeSize = size - lastOffset;
8502  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8503  }
8504 
8505  // End of loop.
8506  lastOffset = size;
8507  }
8508  }
8509  }
8510 
8511  PrintDetailedMap_End(json);
8512 }
8513 #endif // #if VMA_STATS_STRING_ENABLED
8514 
8515 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8516  uint32_t currentFrameIndex,
8517  uint32_t frameInUseCount,
8518  VkDeviceSize bufferImageGranularity,
8519  VkDeviceSize allocSize,
8520  VkDeviceSize allocAlignment,
8521  bool upperAddress,
8522  VmaSuballocationType allocType,
8523  bool canMakeOtherLost,
8524  uint32_t strategy,
8525  VmaAllocationRequest* pAllocationRequest)
8526 {
8527  VMA_ASSERT(allocSize > 0);
8528  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8529  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8530  VMA_HEAVY_ASSERT(Validate());
8531 
8532  const VkDeviceSize size = GetSize();
8533  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8534  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8535 
8536  if(upperAddress)
8537  {
8538  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8539  {
8540  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8541  return false;
8542  }
8543 
8544  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8545  if(allocSize > size)
8546  {
8547  return false;
8548  }
8549  VkDeviceSize resultBaseOffset = size - allocSize;
8550  if(!suballocations2nd.empty())
8551  {
8552  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8553  resultBaseOffset = lastSuballoc.offset - allocSize;
8554  if(allocSize > lastSuballoc.offset)
8555  {
8556  return false;
8557  }
8558  }
8559 
8560  // Start from offset equal to end of free space.
8561  VkDeviceSize resultOffset = resultBaseOffset;
8562 
8563  // Apply VMA_DEBUG_MARGIN at the end.
8564  if(VMA_DEBUG_MARGIN > 0)
8565  {
8566  if(resultOffset < VMA_DEBUG_MARGIN)
8567  {
8568  return false;
8569  }
8570  resultOffset -= VMA_DEBUG_MARGIN;
8571  }
8572 
8573  // Apply alignment.
8574  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8575 
8576  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8577  // Make bigger alignment if necessary.
8578  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8579  {
8580  bool bufferImageGranularityConflict = false;
8581  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8582  {
8583  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8584  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8585  {
8586  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8587  {
8588  bufferImageGranularityConflict = true;
8589  break;
8590  }
8591  }
8592  else
8593  // Already on previous page.
8594  break;
8595  }
8596  if(bufferImageGranularityConflict)
8597  {
8598  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8599  }
8600  }
8601 
8602  // There is enough free space.
8603  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8604  suballocations1st.back().offset + suballocations1st.back().size :
8605  0;
8606  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8607  {
8608  // Check previous suballocations for BufferImageGranularity conflicts.
8609  // If conflict exists, allocation cannot be made here.
8610  if(bufferImageGranularity > 1)
8611  {
8612  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8613  {
8614  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8615  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8616  {
8617  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8618  {
8619  return false;
8620  }
8621  }
8622  else
8623  {
8624  // Already on next page.
8625  break;
8626  }
8627  }
8628  }
8629 
8630  // All tests passed: Success.
8631  pAllocationRequest->offset = resultOffset;
8632  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8633  pAllocationRequest->sumItemSize = 0;
8634  // pAllocationRequest->item unused.
8635  pAllocationRequest->itemsToMakeLostCount = 0;
8636  return true;
8637  }
8638  }
8639  else // !upperAddress
8640  {
8641  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8642  {
8643  // Try to allocate at the end of 1st vector.
8644 
8645  VkDeviceSize resultBaseOffset = 0;
8646  if(!suballocations1st.empty())
8647  {
8648  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8649  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8650  }
8651 
8652  // Start from offset equal to beginning of free space.
8653  VkDeviceSize resultOffset = resultBaseOffset;
8654 
8655  // Apply VMA_DEBUG_MARGIN at the beginning.
8656  if(VMA_DEBUG_MARGIN > 0)
8657  {
8658  resultOffset += VMA_DEBUG_MARGIN;
8659  }
8660 
8661  // Apply alignment.
8662  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8663 
8664  // Check previous suballocations for BufferImageGranularity conflicts.
8665  // Make bigger alignment if necessary.
8666  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8667  {
8668  bool bufferImageGranularityConflict = false;
8669  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8670  {
8671  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8672  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8673  {
8674  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8675  {
8676  bufferImageGranularityConflict = true;
8677  break;
8678  }
8679  }
8680  else
8681  // Already on previous page.
8682  break;
8683  }
8684  if(bufferImageGranularityConflict)
8685  {
8686  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8687  }
8688  }
8689 
8690  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8691  suballocations2nd.back().offset : size;
8692 
8693  // There is enough free space at the end after alignment.
8694  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8695  {
8696  // Check next suballocations for BufferImageGranularity conflicts.
8697  // If conflict exists, allocation cannot be made here.
8698  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8699  {
8700  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8701  {
8702  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8703  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8704  {
8705  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8706  {
8707  return false;
8708  }
8709  }
8710  else
8711  {
8712  // Already on previous page.
8713  break;
8714  }
8715  }
8716  }
8717 
8718  // All tests passed: Success.
8719  pAllocationRequest->offset = resultOffset;
8720  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8721  pAllocationRequest->sumItemSize = 0;
8722  // pAllocationRequest->item unused.
8723  pAllocationRequest->itemsToMakeLostCount = 0;
8724  return true;
8725  }
8726  }
8727 
8728  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8729  // beginning of 1st vector as the end of free space.
8730  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8731  {
8732  VMA_ASSERT(!suballocations1st.empty());
8733 
8734  VkDeviceSize resultBaseOffset = 0;
8735  if(!suballocations2nd.empty())
8736  {
8737  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8738  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8739  }
8740 
8741  // Start from offset equal to beginning of free space.
8742  VkDeviceSize resultOffset = resultBaseOffset;
8743 
8744  // Apply VMA_DEBUG_MARGIN at the beginning.
8745  if(VMA_DEBUG_MARGIN > 0)
8746  {
8747  resultOffset += VMA_DEBUG_MARGIN;
8748  }
8749 
8750  // Apply alignment.
8751  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8752 
8753  // Check previous suballocations for BufferImageGranularity conflicts.
8754  // Make bigger alignment if necessary.
8755  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8756  {
8757  bool bufferImageGranularityConflict = false;
8758  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8759  {
8760  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8761  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8762  {
8763  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8764  {
8765  bufferImageGranularityConflict = true;
8766  break;
8767  }
8768  }
8769  else
8770  // Already on previous page.
8771  break;
8772  }
8773  if(bufferImageGranularityConflict)
8774  {
8775  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8776  }
8777  }
8778 
8779  pAllocationRequest->itemsToMakeLostCount = 0;
8780  pAllocationRequest->sumItemSize = 0;
8781  size_t index1st = m_1stNullItemsBeginCount;
8782 
8783  if(canMakeOtherLost)
8784  {
8785  while(index1st < suballocations1st.size() &&
8786  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8787  {
8788  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8789  const VmaSuballocation& suballoc = suballocations1st[index1st];
8790  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8791  {
8792  // No problem.
8793  }
8794  else
8795  {
8796  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8797  if(suballoc.hAllocation->CanBecomeLost() &&
8798  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8799  {
8800  ++pAllocationRequest->itemsToMakeLostCount;
8801  pAllocationRequest->sumItemSize += suballoc.size;
8802  }
8803  else
8804  {
8805  return false;
8806  }
8807  }
8808  ++index1st;
8809  }
8810 
8811  // Check next suballocations for BufferImageGranularity conflicts.
8812  // If conflict exists, we must mark more allocations lost or fail.
8813  if(bufferImageGranularity > 1)
8814  {
8815  while(index1st < suballocations1st.size())
8816  {
8817  const VmaSuballocation& suballoc = suballocations1st[index1st];
8818  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8819  {
8820  if(suballoc.hAllocation != VK_NULL_HANDLE)
8821  {
8822  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8823  if(suballoc.hAllocation->CanBecomeLost() &&
8824  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8825  {
8826  ++pAllocationRequest->itemsToMakeLostCount;
8827  pAllocationRequest->sumItemSize += suballoc.size;
8828  }
8829  else
8830  {
8831  return false;
8832  }
8833  }
8834  }
8835  else
8836  {
8837  // Already on next page.
8838  break;
8839  }
8840  ++index1st;
8841  }
8842  }
8843  }
8844 
8845  // There is enough free space at the end after alignment.
8846  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8847  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8848  {
8849  // Check next suballocations for BufferImageGranularity conflicts.
8850  // If conflict exists, allocation cannot be made here.
8851  if(bufferImageGranularity > 1)
8852  {
8853  for(size_t nextSuballocIndex = index1st;
8854  nextSuballocIndex < suballocations1st.size();
8855  nextSuballocIndex++)
8856  {
8857  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8858  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8859  {
8860  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8861  {
8862  return false;
8863  }
8864  }
8865  else
8866  {
8867  // Already on next page.
8868  break;
8869  }
8870  }
8871  }
8872 
8873  // All tests passed: Success.
8874  pAllocationRequest->offset = resultOffset;
8875  pAllocationRequest->sumFreeSize =
8876  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8877  - resultBaseOffset
8878  - pAllocationRequest->sumItemSize;
8879  // pAllocationRequest->item unused.
8880  return true;
8881  }
8882  }
8883  }
8884 
8885  return false;
8886 }
8887 
8888 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8889  uint32_t currentFrameIndex,
8890  uint32_t frameInUseCount,
8891  VmaAllocationRequest* pAllocationRequest)
8892 {
8893  if(pAllocationRequest->itemsToMakeLostCount == 0)
8894  {
8895  return true;
8896  }
8897 
8898  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8899 
8900  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8901  size_t index1st = m_1stNullItemsBeginCount;
8902  size_t madeLostCount = 0;
8903  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8904  {
8905  VMA_ASSERT(index1st < suballocations1st.size());
8906  VmaSuballocation& suballoc = suballocations1st[index1st];
8907  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8908  {
8909  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8910  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8911  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8912  {
8913  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8914  suballoc.hAllocation = VK_NULL_HANDLE;
8915  m_SumFreeSize += suballoc.size;
8916  ++m_1stNullItemsMiddleCount;
8917  ++madeLostCount;
8918  }
8919  else
8920  {
8921  return false;
8922  }
8923  }
8924  ++index1st;
8925  }
8926 
8927  CleanupAfterFree();
8928  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8929 
8930  return true;
8931 }
8932 
8933 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8934 {
8935  uint32_t lostAllocationCount = 0;
8936 
8937  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8938  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8939  {
8940  VmaSuballocation& suballoc = suballocations1st[i];
8941  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8942  suballoc.hAllocation->CanBecomeLost() &&
8943  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8944  {
8945  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8946  suballoc.hAllocation = VK_NULL_HANDLE;
8947  ++m_1stNullItemsMiddleCount;
8948  m_SumFreeSize += suballoc.size;
8949  ++lostAllocationCount;
8950  }
8951  }
8952 
8953  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8954  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8955  {
8956  VmaSuballocation& suballoc = suballocations2nd[i];
8957  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8958  suballoc.hAllocation->CanBecomeLost() &&
8959  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8960  {
8961  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8962  suballoc.hAllocation = VK_NULL_HANDLE;
8963  ++m_2ndNullItemsCount;
8964  ++lostAllocationCount;
8965  }
8966  }
8967 
8968  if(lostAllocationCount)
8969  {
8970  CleanupAfterFree();
8971  }
8972 
8973  return lostAllocationCount;
8974 }
8975 
8976 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8977 {
8978  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8979  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8980  {
8981  const VmaSuballocation& suballoc = suballocations1st[i];
8982  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8983  {
8984  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8985  {
8986  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8987  return VK_ERROR_VALIDATION_FAILED_EXT;
8988  }
8989  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8990  {
8991  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8992  return VK_ERROR_VALIDATION_FAILED_EXT;
8993  }
8994  }
8995  }
8996 
8997  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8998  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8999  {
9000  const VmaSuballocation& suballoc = suballocations2nd[i];
9001  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9002  {
9003  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9004  {
9005  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9006  return VK_ERROR_VALIDATION_FAILED_EXT;
9007  }
9008  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9009  {
9010  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9011  return VK_ERROR_VALIDATION_FAILED_EXT;
9012  }
9013  }
9014  }
9015 
9016  return VK_SUCCESS;
9017 }
9018 
9019 void VmaBlockMetadata_Linear::Alloc(
9020  const VmaAllocationRequest& request,
9021  VmaSuballocationType type,
9022  VkDeviceSize allocSize,
9023  bool upperAddress,
9024  VmaAllocation hAllocation)
9025 {
9026  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9027 
9028  if(upperAddress)
9029  {
9030  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9031  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9032  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9033  suballocations2nd.push_back(newSuballoc);
9034  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9035  }
9036  else
9037  {
9038  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9039 
9040  // First allocation.
9041  if(suballocations1st.empty())
9042  {
9043  suballocations1st.push_back(newSuballoc);
9044  }
9045  else
9046  {
9047  // New allocation at the end of 1st vector.
9048  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9049  {
9050  // Check if it fits before the end of the block.
9051  VMA_ASSERT(request.offset + allocSize <= GetSize());
9052  suballocations1st.push_back(newSuballoc);
9053  }
9054  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9055  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9056  {
9057  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9058 
9059  switch(m_2ndVectorMode)
9060  {
9061  case SECOND_VECTOR_EMPTY:
9062  // First allocation from second part ring buffer.
9063  VMA_ASSERT(suballocations2nd.empty());
9064  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9065  break;
9066  case SECOND_VECTOR_RING_BUFFER:
9067  // 2-part ring buffer is already started.
9068  VMA_ASSERT(!suballocations2nd.empty());
9069  break;
9070  case SECOND_VECTOR_DOUBLE_STACK:
9071  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9072  break;
9073  default:
9074  VMA_ASSERT(0);
9075  }
9076 
9077  suballocations2nd.push_back(newSuballoc);
9078  }
9079  else
9080  {
9081  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9082  }
9083  }
9084  }
9085 
9086  m_SumFreeSize -= newSuballoc.size;
9087 }
9088 
9089 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9090 {
9091  FreeAtOffset(allocation->GetOffset());
9092 }
9093 
9094 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9095 {
9096  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9097  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9098 
9099  if(!suballocations1st.empty())
9100  {
9101  // First allocation: Mark it as next empty at the beginning.
9102  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9103  if(firstSuballoc.offset == offset)
9104  {
9105  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9106  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9107  m_SumFreeSize += firstSuballoc.size;
9108  ++m_1stNullItemsBeginCount;
9109  CleanupAfterFree();
9110  return;
9111  }
9112  }
9113 
9114  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9115  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9117  {
9118  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9119  if(lastSuballoc.offset == offset)
9120  {
9121  m_SumFreeSize += lastSuballoc.size;
9122  suballocations2nd.pop_back();
9123  CleanupAfterFree();
9124  return;
9125  }
9126  }
9127  // Last allocation in 1st vector.
9128  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9129  {
9130  VmaSuballocation& lastSuballoc = suballocations1st.back();
9131  if(lastSuballoc.offset == offset)
9132  {
9133  m_SumFreeSize += lastSuballoc.size;
9134  suballocations1st.pop_back();
9135  CleanupAfterFree();
9136  return;
9137  }
9138  }
9139 
9140  // Item from the middle of 1st vector.
9141  {
9142  VmaSuballocation refSuballoc;
9143  refSuballoc.offset = offset;
9144  // Rest of members stays uninitialized intentionally for better performance.
9145  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9146  suballocations1st.begin() + m_1stNullItemsBeginCount,
9147  suballocations1st.end(),
9148  refSuballoc);
9149  if(it != suballocations1st.end())
9150  {
9151  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9152  it->hAllocation = VK_NULL_HANDLE;
9153  ++m_1stNullItemsMiddleCount;
9154  m_SumFreeSize += it->size;
9155  CleanupAfterFree();
9156  return;
9157  }
9158  }
9159 
9160  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9161  {
9162  // Item from the middle of 2nd vector.
9163  VmaSuballocation refSuballoc;
9164  refSuballoc.offset = offset;
9165  // Rest of members stays uninitialized intentionally for better performance.
9166  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9167  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9168  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9169  if(it != suballocations2nd.end())
9170  {
9171  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9172  it->hAllocation = VK_NULL_HANDLE;
9173  ++m_2ndNullItemsCount;
9174  m_SumFreeSize += it->size;
9175  CleanupAfterFree();
9176  return;
9177  }
9178  }
9179 
9180  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9181 }
9182 
9183 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9184 {
9185  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9186  const size_t suballocCount = AccessSuballocations1st().size();
9187  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9188 }
9189 
9190 void VmaBlockMetadata_Linear::CleanupAfterFree()
9191 {
9192  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9193  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9194 
9195  if(IsEmpty())
9196  {
9197  suballocations1st.clear();
9198  suballocations2nd.clear();
9199  m_1stNullItemsBeginCount = 0;
9200  m_1stNullItemsMiddleCount = 0;
9201  m_2ndNullItemsCount = 0;
9202  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9203  }
9204  else
9205  {
9206  const size_t suballoc1stCount = suballocations1st.size();
9207  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9208  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9209 
9210  // Find more null items at the beginning of 1st vector.
9211  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9212  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9213  {
9214  ++m_1stNullItemsBeginCount;
9215  --m_1stNullItemsMiddleCount;
9216  }
9217 
9218  // Find more null items at the end of 1st vector.
9219  while(m_1stNullItemsMiddleCount > 0 &&
9220  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9221  {
9222  --m_1stNullItemsMiddleCount;
9223  suballocations1st.pop_back();
9224  }
9225 
9226  // Find more null items at the end of 2nd vector.
9227  while(m_2ndNullItemsCount > 0 &&
9228  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9229  {
9230  --m_2ndNullItemsCount;
9231  suballocations2nd.pop_back();
9232  }
9233 
9234  if(ShouldCompact1st())
9235  {
9236  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9237  size_t srcIndex = m_1stNullItemsBeginCount;
9238  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9239  {
9240  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9241  {
9242  ++srcIndex;
9243  }
9244  if(dstIndex != srcIndex)
9245  {
9246  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9247  }
9248  ++srcIndex;
9249  }
9250  suballocations1st.resize(nonNullItemCount);
9251  m_1stNullItemsBeginCount = 0;
9252  m_1stNullItemsMiddleCount = 0;
9253  }
9254 
9255  // 2nd vector became empty.
9256  if(suballocations2nd.empty())
9257  {
9258  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9259  }
9260 
9261  // 1st vector became empty.
9262  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9263  {
9264  suballocations1st.clear();
9265  m_1stNullItemsBeginCount = 0;
9266 
9267  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9268  {
9269  // Swap 1st with 2nd. Now 2nd is empty.
9270  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9271  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9272  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9273  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9274  {
9275  ++m_1stNullItemsBeginCount;
9276  --m_1stNullItemsMiddleCount;
9277  }
9278  m_2ndNullItemsCount = 0;
9279  m_1stVectorIndex ^= 1;
9280  }
9281  }
9282  }
9283 
9284  VMA_HEAVY_ASSERT(Validate());
9285 }
9286 
9287 
9289 // class VmaBlockMetadata_Buddy
9290 
9291 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9292  VmaBlockMetadata(hAllocator),
9293  m_Root(VMA_NULL),
9294  m_AllocationCount(0),
9295  m_FreeCount(1),
9296  m_SumFreeSize(0)
9297 {
9298  memset(m_FreeList, 0, sizeof(m_FreeList));
9299 }
9300 
9301 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9302 {
9303  DeleteNode(m_Root);
9304 }
9305 
9306 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9307 {
9308  VmaBlockMetadata::Init(size);
9309 
9310  m_UsableSize = VmaPrevPow2(size);
9311  m_SumFreeSize = m_UsableSize;
9312 
9313  // Calculate m_LevelCount.
9314  m_LevelCount = 1;
9315  while(m_LevelCount < MAX_LEVELS &&
9316  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
9317  {
9318  ++m_LevelCount;
9319  }
9320 
9321  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
9322  rootNode->offset = 0;
9323  rootNode->type = Node::TYPE_FREE;
9324  rootNode->parent = VMA_NULL;
9325  rootNode->buddy = VMA_NULL;
9326 
9327  m_Root = rootNode;
9328  AddToFreeListFront(0, rootNode);
9329 }
9330 
9331 bool VmaBlockMetadata_Buddy::Validate() const
9332 {
9333  // Validate tree.
9334  ValidationContext ctx;
9335  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9336  {
9337  VMA_VALIDATE(false && "ValidateNode failed.");
9338  }
9339  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9340  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9341 
9342  // Validate free node lists.
9343  for(uint32_t level = 0; level < m_LevelCount; ++level)
9344  {
9345  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9346  m_FreeList[level].front->free.prev == VMA_NULL);
9347 
9348  for(Node* node = m_FreeList[level].front;
9349  node != VMA_NULL;
9350  node = node->free.next)
9351  {
9352  VMA_VALIDATE(node->type == Node::TYPE_FREE);
9353 
9354  if(node->free.next == VMA_NULL)
9355  {
9356  VMA_VALIDATE(m_FreeList[level].back == node);
9357  }
9358  else
9359  {
9360  VMA_VALIDATE(node->free.next->free.prev == node);
9361  }
9362  }
9363  }
9364 
9365  // Validate that free lists ar higher levels are empty.
9366  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9367  {
9368  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9369  }
9370 
9371  return true;
9372 }
9373 
9374 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9375 {
9376  for(uint32_t level = 0; level < m_LevelCount; ++level)
9377  {
9378  if(m_FreeList[level].front != VMA_NULL)
9379  {
9380  return LevelToNodeSize(level);
9381  }
9382  }
9383  return 0;
9384 }
9385 
9386 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9387 {
9388  const VkDeviceSize unusableSize = GetUnusableSize();
9389 
9390  outInfo.blockCount = 1;
9391 
9392  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9393  outInfo.usedBytes = outInfo.unusedBytes = 0;
9394 
9395  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9396  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9397  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9398 
9399  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
9400 
9401  if(unusableSize > 0)
9402  {
9403  ++outInfo.unusedRangeCount;
9404  outInfo.unusedBytes += unusableSize;
9405  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
9406  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
9407  }
9408 }
9409 
9410 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9411 {
9412  const VkDeviceSize unusableSize = GetUnusableSize();
9413 
9414  inoutStats.size += GetSize();
9415  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
9416  inoutStats.allocationCount += m_AllocationCount;
9417  inoutStats.unusedRangeCount += m_FreeCount;
9418  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9419 
9420  if(unusableSize > 0)
9421  {
9422  ++inoutStats.unusedRangeCount;
9423  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
9424  }
9425 }
9426 
9427 #if VMA_STATS_STRING_ENABLED
9428 
9429 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9430 {
9431  // TODO optimize
9432  VmaStatInfo stat;
9433  CalcAllocationStatInfo(stat);
9434 
9435  PrintDetailedMap_Begin(
9436  json,
9437  stat.unusedBytes,
9438  stat.allocationCount,
9439  stat.unusedRangeCount);
9440 
9441  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9442 
9443  const VkDeviceSize unusableSize = GetUnusableSize();
9444  if(unusableSize > 0)
9445  {
9446  PrintDetailedMap_UnusedRange(json,
9447  m_UsableSize, // offset
9448  unusableSize); // size
9449  }
9450 
9451  PrintDetailedMap_End(json);
9452 }
9453 
9454 #endif // #if VMA_STATS_STRING_ENABLED
9455 
9456 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9457  uint32_t currentFrameIndex,
9458  uint32_t frameInUseCount,
9459  VkDeviceSize bufferImageGranularity,
9460  VkDeviceSize allocSize,
9461  VkDeviceSize allocAlignment,
9462  bool upperAddress,
9463  VmaSuballocationType allocType,
9464  bool canMakeOtherLost,
9465  uint32_t strategy,
9466  VmaAllocationRequest* pAllocationRequest)
9467 {
9468  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9469 
9470  // Simple way to respect bufferImageGranularity. May be optimized some day.
9471  // Whenever it might be an OPTIMAL image...
9472  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9473  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9474  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9475  {
9476  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
9477  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
9478  }
9479 
9480  if(allocSize > m_UsableSize)
9481  {
9482  return false;
9483  }
9484 
9485  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9486  for(uint32_t level = targetLevel + 1; level--; )
9487  {
9488  for(Node* freeNode = m_FreeList[level].front;
9489  freeNode != VMA_NULL;
9490  freeNode = freeNode->free.next)
9491  {
9492  if(freeNode->offset % allocAlignment == 0)
9493  {
9494  pAllocationRequest->offset = freeNode->offset;
9495  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9496  pAllocationRequest->sumItemSize = 0;
9497  pAllocationRequest->itemsToMakeLostCount = 0;
9498  pAllocationRequest->customData = (void*)(uintptr_t)level;
9499  return true;
9500  }
9501  }
9502  }
9503 
9504  return false;
9505 }
9506 
9507 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9508  uint32_t currentFrameIndex,
9509  uint32_t frameInUseCount,
9510  VmaAllocationRequest* pAllocationRequest)
9511 {
9512  /*
9513  Lost allocations are not supported in buddy allocator at the moment.
9514  Support might be added in the future.
9515  */
9516  return pAllocationRequest->itemsToMakeLostCount == 0;
9517 }
9518 
9519 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9520 {
9521  /*
9522  Lost allocations are not supported in buddy allocator at the moment.
9523  Support might be added in the future.
9524  */
9525  return 0;
9526 }
9527 
9528 void VmaBlockMetadata_Buddy::Alloc(
9529  const VmaAllocationRequest& request,
9530  VmaSuballocationType type,
9531  VkDeviceSize allocSize,
9532  bool upperAddress,
9533  VmaAllocation hAllocation)
9534 {
9535  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9536  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9537 
9538  Node* currNode = m_FreeList[currLevel].front;
9539  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9540  while(currNode->offset != request.offset)
9541  {
9542  currNode = currNode->free.next;
9543  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9544  }
9545 
9546  // Go down, splitting free nodes.
9547  while(currLevel < targetLevel)
9548  {
9549  // currNode is already first free node at currLevel.
9550  // Remove it from list of free nodes at this currLevel.
9551  RemoveFromFreeList(currLevel, currNode);
9552 
9553  const uint32_t childrenLevel = currLevel + 1;
9554 
9555  // Create two free sub-nodes.
9556  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
9557  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
9558 
9559  leftChild->offset = currNode->offset;
9560  leftChild->type = Node::TYPE_FREE;
9561  leftChild->parent = currNode;
9562  leftChild->buddy = rightChild;
9563 
9564  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9565  rightChild->type = Node::TYPE_FREE;
9566  rightChild->parent = currNode;
9567  rightChild->buddy = leftChild;
9568 
9569  // Convert current currNode to split type.
9570  currNode->type = Node::TYPE_SPLIT;
9571  currNode->split.leftChild = leftChild;
9572 
9573  // Add child nodes to free list. Order is important!
9574  AddToFreeListFront(childrenLevel, rightChild);
9575  AddToFreeListFront(childrenLevel, leftChild);
9576 
9577  ++m_FreeCount;
9578  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
9579  ++currLevel;
9580  currNode = m_FreeList[currLevel].front;
9581 
9582  /*
9583  We can be sure that currNode, as left child of node previously split,
9584  also fullfills the alignment requirement.
9585  */
9586  }
9587 
9588  // Remove from free list.
9589  VMA_ASSERT(currLevel == targetLevel &&
9590  currNode != VMA_NULL &&
9591  currNode->type == Node::TYPE_FREE);
9592  RemoveFromFreeList(currLevel, currNode);
9593 
9594  // Convert to allocation node.
9595  currNode->type = Node::TYPE_ALLOCATION;
9596  currNode->allocation.alloc = hAllocation;
9597 
9598  ++m_AllocationCount;
9599  --m_FreeCount;
9600  m_SumFreeSize -= allocSize;
9601 }
9602 
9603 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9604 {
9605  if(node->type == Node::TYPE_SPLIT)
9606  {
9607  DeleteNode(node->split.leftChild->buddy);
9608  DeleteNode(node->split.leftChild);
9609  }
9610 
9611  vma_delete(GetAllocationCallbacks(), node);
9612 }
9613 
9614 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9615 {
9616  VMA_VALIDATE(level < m_LevelCount);
9617  VMA_VALIDATE(curr->parent == parent);
9618  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9619  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9620  switch(curr->type)
9621  {
9622  case Node::TYPE_FREE:
9623  // curr->free.prev, next are validated separately.
9624  ctx.calculatedSumFreeSize += levelNodeSize;
9625  ++ctx.calculatedFreeCount;
9626  break;
9627  case Node::TYPE_ALLOCATION:
9628  ++ctx.calculatedAllocationCount;
9629  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
9630  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
9631  break;
9632  case Node::TYPE_SPLIT:
9633  {
9634  const uint32_t childrenLevel = level + 1;
9635  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9636  const Node* const leftChild = curr->split.leftChild;
9637  VMA_VALIDATE(leftChild != VMA_NULL);
9638  VMA_VALIDATE(leftChild->offset == curr->offset);
9639  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9640  {
9641  VMA_VALIDATE(false && "ValidateNode for left child failed.");
9642  }
9643  const Node* const rightChild = leftChild->buddy;
9644  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9645  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9646  {
9647  VMA_VALIDATE(false && "ValidateNode for right child failed.");
9648  }
9649  }
9650  break;
9651  default:
9652  return false;
9653  }
9654 
9655  return true;
9656 }
9657 
9658 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9659 {
9660  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9661  uint32_t level = 0;
9662  VkDeviceSize currLevelNodeSize = m_UsableSize;
9663  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9664  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9665  {
9666  ++level;
9667  currLevelNodeSize = nextLevelNodeSize;
9668  nextLevelNodeSize = currLevelNodeSize >> 1;
9669  }
9670  return level;
9671 }
9672 
9673 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9674 {
9675  // Find node and level.
9676  Node* node = m_Root;
9677  VkDeviceSize nodeOffset = 0;
9678  uint32_t level = 0;
9679  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9680  while(node->type == Node::TYPE_SPLIT)
9681  {
9682  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
9683  if(offset < nodeOffset + nextLevelSize)
9684  {
9685  node = node->split.leftChild;
9686  }
9687  else
9688  {
9689  node = node->split.leftChild->buddy;
9690  nodeOffset += nextLevelSize;
9691  }
9692  ++level;
9693  levelNodeSize = nextLevelSize;
9694  }
9695 
9696  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9697  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9698 
9699  ++m_FreeCount;
9700  --m_AllocationCount;
9701  m_SumFreeSize += alloc->GetSize();
9702 
9703  node->type = Node::TYPE_FREE;
9704 
9705  // Join free nodes if possible.
9706  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9707  {
9708  RemoveFromFreeList(level, node->buddy);
9709  Node* const parent = node->parent;
9710 
9711  vma_delete(GetAllocationCallbacks(), node->buddy);
9712  vma_delete(GetAllocationCallbacks(), node);
9713  parent->type = Node::TYPE_FREE;
9714 
9715  node = parent;
9716  --level;
9717  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
9718  --m_FreeCount;
9719  }
9720 
9721  AddToFreeListFront(level, node);
9722 }
9723 
9724 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9725 {
9726  switch(node->type)
9727  {
9728  case Node::TYPE_FREE:
9729  ++outInfo.unusedRangeCount;
9730  outInfo.unusedBytes += levelNodeSize;
9731  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9732  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9733  break;
9734  case Node::TYPE_ALLOCATION:
9735  {
9736  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9737  ++outInfo.allocationCount;
9738  outInfo.usedBytes += allocSize;
9739  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
9740  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
9741 
9742  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
9743  if(unusedRangeSize > 0)
9744  {
9745  ++outInfo.unusedRangeCount;
9746  outInfo.unusedBytes += unusedRangeSize;
9747  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
9748  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
9749  }
9750  }
9751  break;
9752  case Node::TYPE_SPLIT:
9753  {
9754  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9755  const Node* const leftChild = node->split.leftChild;
9756  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9757  const Node* const rightChild = leftChild->buddy;
9758  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9759  }
9760  break;
9761  default:
9762  VMA_ASSERT(0);
9763  }
9764 }
9765 
9766 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9767 {
9768  VMA_ASSERT(node->type == Node::TYPE_FREE);
9769 
9770  // List is empty.
9771  Node* const frontNode = m_FreeList[level].front;
9772  if(frontNode == VMA_NULL)
9773  {
9774  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9775  node->free.prev = node->free.next = VMA_NULL;
9776  m_FreeList[level].front = m_FreeList[level].back = node;
9777  }
9778  else
9779  {
9780  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9781  node->free.prev = VMA_NULL;
9782  node->free.next = frontNode;
9783  frontNode->free.prev = node;
9784  m_FreeList[level].front = node;
9785  }
9786 }
9787 
9788 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9789 {
9790  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9791 
9792  // It is at the front.
9793  if(node->free.prev == VMA_NULL)
9794  {
9795  VMA_ASSERT(m_FreeList[level].front == node);
9796  m_FreeList[level].front = node->free.next;
9797  }
9798  else
9799  {
9800  Node* const prevFreeNode = node->free.prev;
9801  VMA_ASSERT(prevFreeNode->free.next == node);
9802  prevFreeNode->free.next = node->free.next;
9803  }
9804 
9805  // It is at the back.
9806  if(node->free.next == VMA_NULL)
9807  {
9808  VMA_ASSERT(m_FreeList[level].back == node);
9809  m_FreeList[level].back = node->free.prev;
9810  }
9811  else
9812  {
9813  Node* const nextFreeNode = node->free.next;
9814  VMA_ASSERT(nextFreeNode->free.prev == node);
9815  nextFreeNode->free.prev = node->free.prev;
9816  }
9817 }
9818 
9819 #if VMA_STATS_STRING_ENABLED
9820 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9821 {
9822  switch(node->type)
9823  {
9824  case Node::TYPE_FREE:
9825  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9826  break;
9827  case Node::TYPE_ALLOCATION:
9828  {
9829  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9830  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
9831  if(allocSize < levelNodeSize)
9832  {
9833  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
9834  }
9835  }
9836  break;
9837  case Node::TYPE_SPLIT:
9838  {
9839  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9840  const Node* const leftChild = node->split.leftChild;
9841  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9842  const Node* const rightChild = leftChild->buddy;
9843  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9844  }
9845  break;
9846  default:
9847  VMA_ASSERT(0);
9848  }
9849 }
9850 #endif // #if VMA_STATS_STRING_ENABLED
9851 
9852 
9854 // class VmaDeviceMemoryBlock
9855 
9856 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9857  m_pMetadata(VMA_NULL),
9858  m_MemoryTypeIndex(UINT32_MAX),
9859  m_Id(0),
9860  m_hMemory(VK_NULL_HANDLE),
9861  m_MapCount(0),
9862  m_pMappedData(VMA_NULL)
9863 {
9864 }
9865 
9866 void VmaDeviceMemoryBlock::Init(
9867  VmaAllocator hAllocator,
9868  uint32_t newMemoryTypeIndex,
9869  VkDeviceMemory newMemory,
9870  VkDeviceSize newSize,
9871  uint32_t id,
9872  uint32_t algorithm)
9873 {
9874  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9875 
9876  m_MemoryTypeIndex = newMemoryTypeIndex;
9877  m_Id = id;
9878  m_hMemory = newMemory;
9879 
9880  switch(algorithm)
9881  {
9883  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9884  break;
9886  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
9887  break;
9888  default:
9889  VMA_ASSERT(0);
9890  // Fall-through.
9891  case 0:
9892  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9893  }
9894  m_pMetadata->Init(newSize);
9895 }
9896 
9897 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9898 {
9899  // This is the most important assert in the entire library.
9900  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9901  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9902 
9903  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9904  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9905  m_hMemory = VK_NULL_HANDLE;
9906 
9907  vma_delete(allocator, m_pMetadata);
9908  m_pMetadata = VMA_NULL;
9909 }
9910 
9911 bool VmaDeviceMemoryBlock::Validate() const
9912 {
9913  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
9914  (m_pMetadata->GetSize() != 0));
9915 
9916  return m_pMetadata->Validate();
9917 }
9918 
9919 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9920 {
9921  void* pData = nullptr;
9922  VkResult res = Map(hAllocator, 1, &pData);
9923  if(res != VK_SUCCESS)
9924  {
9925  return res;
9926  }
9927 
9928  res = m_pMetadata->CheckCorruption(pData);
9929 
9930  Unmap(hAllocator, 1);
9931 
9932  return res;
9933 }
9934 
9935 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9936 {
9937  if(count == 0)
9938  {
9939  return VK_SUCCESS;
9940  }
9941 
9942  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9943  if(m_MapCount != 0)
9944  {
9945  m_MapCount += count;
9946  VMA_ASSERT(m_pMappedData != VMA_NULL);
9947  if(ppData != VMA_NULL)
9948  {
9949  *ppData = m_pMappedData;
9950  }
9951  return VK_SUCCESS;
9952  }
9953  else
9954  {
9955  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9956  hAllocator->m_hDevice,
9957  m_hMemory,
9958  0, // offset
9959  VK_WHOLE_SIZE,
9960  0, // flags
9961  &m_pMappedData);
9962  if(result == VK_SUCCESS)
9963  {
9964  if(ppData != VMA_NULL)
9965  {
9966  *ppData = m_pMappedData;
9967  }
9968  m_MapCount = count;
9969  }
9970  return result;
9971  }
9972 }
9973 
9974 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9975 {
9976  if(count == 0)
9977  {
9978  return;
9979  }
9980 
9981  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9982  if(m_MapCount >= count)
9983  {
9984  m_MapCount -= count;
9985  if(m_MapCount == 0)
9986  {
9987  m_pMappedData = VMA_NULL;
9988  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9989  }
9990  }
9991  else
9992  {
9993  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
9994  }
9995 }
9996 
9997 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9998 {
9999  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10000  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10001 
10002  void* pData;
10003  VkResult res = Map(hAllocator, 1, &pData);
10004  if(res != VK_SUCCESS)
10005  {
10006  return res;
10007  }
10008 
10009  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
10010  VmaWriteMagicValue(pData, allocOffset + allocSize);
10011 
10012  Unmap(hAllocator, 1);
10013 
10014  return VK_SUCCESS;
10015 }
10016 
10017 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
10018 {
10019  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
10020  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
10021 
10022  void* pData;
10023  VkResult res = Map(hAllocator, 1, &pData);
10024  if(res != VK_SUCCESS)
10025  {
10026  return res;
10027  }
10028 
10029  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
10030  {
10031  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
10032  }
10033  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
10034  {
10035  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
10036  }
10037 
10038  Unmap(hAllocator, 1);
10039 
10040  return VK_SUCCESS;
10041 }
10042 
10043 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
10044  const VmaAllocator hAllocator,
10045  const VmaAllocation hAllocation,
10046  VkBuffer hBuffer)
10047 {
10048  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10049  hAllocation->GetBlock() == this);
10050  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10051  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10052  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
10053  hAllocator->m_hDevice,
10054  hBuffer,
10055  m_hMemory,
10056  hAllocation->GetOffset());
10057 }
10058 
10059 VkResult VmaDeviceMemoryBlock::BindImageMemory(
10060  const VmaAllocator hAllocator,
10061  const VmaAllocation hAllocation,
10062  VkImage hImage)
10063 {
10064  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
10065  hAllocation->GetBlock() == this);
10066  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
10067  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10068  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
10069  hAllocator->m_hDevice,
10070  hImage,
10071  m_hMemory,
10072  hAllocation->GetOffset());
10073 }
10074 
10075 static void InitStatInfo(VmaStatInfo& outInfo)
10076 {
10077  memset(&outInfo, 0, sizeof(outInfo));
10078  outInfo.allocationSizeMin = UINT64_MAX;
10079  outInfo.unusedRangeSizeMin = UINT64_MAX;
10080 }
10081 
10082 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10083 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10084 {
10085  inoutInfo.blockCount += srcInfo.blockCount;
10086  inoutInfo.allocationCount += srcInfo.allocationCount;
10087  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10088  inoutInfo.usedBytes += srcInfo.usedBytes;
10089  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10090  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10091  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10092  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10093  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10094 }
10095 
10096 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10097 {
10098  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10099  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10100  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10101  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10102 }
10103 
10104 VmaPool_T::VmaPool_T(
10105  VmaAllocator hAllocator,
10106  const VmaPoolCreateInfo& createInfo,
10107  VkDeviceSize preferredBlockSize) :
10108  m_BlockVector(
10109  hAllocator,
10110  createInfo.memoryTypeIndex,
10111  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10112  createInfo.minBlockCount,
10113  createInfo.maxBlockCount,
10114  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10115  createInfo.frameInUseCount,
10116  true, // isCustomPool
10117  createInfo.blockSize != 0, // explicitBlockSize
10118  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10119  m_Id(0)
10120 {
10121 }
10122 
10123 VmaPool_T::~VmaPool_T()
10124 {
10125 }
10126 
10127 #if VMA_STATS_STRING_ENABLED
10128 
10129 #endif // #if VMA_STATS_STRING_ENABLED
10130 
10131 VmaBlockVector::VmaBlockVector(
10132  VmaAllocator hAllocator,
10133  uint32_t memoryTypeIndex,
10134  VkDeviceSize preferredBlockSize,
10135  size_t minBlockCount,
10136  size_t maxBlockCount,
10137  VkDeviceSize bufferImageGranularity,
10138  uint32_t frameInUseCount,
10139  bool isCustomPool,
10140  bool explicitBlockSize,
10141  uint32_t algorithm) :
10142  m_hAllocator(hAllocator),
10143  m_MemoryTypeIndex(memoryTypeIndex),
10144  m_PreferredBlockSize(preferredBlockSize),
10145  m_MinBlockCount(minBlockCount),
10146  m_MaxBlockCount(maxBlockCount),
10147  m_BufferImageGranularity(bufferImageGranularity),
10148  m_FrameInUseCount(frameInUseCount),
10149  m_IsCustomPool(isCustomPool),
10150  m_ExplicitBlockSize(explicitBlockSize),
10151  m_Algorithm(algorithm),
10152  m_HasEmptyBlock(false),
10153  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10154  m_pDefragmentator(VMA_NULL),
10155  m_NextBlockId(0)
10156 {
10157 }
10158 
10159 VmaBlockVector::~VmaBlockVector()
10160 {
10161  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10162 
10163  for(size_t i = m_Blocks.size(); i--; )
10164  {
10165  m_Blocks[i]->Destroy(m_hAllocator);
10166  vma_delete(m_hAllocator, m_Blocks[i]);
10167  }
10168 }
10169 
10170 VkResult VmaBlockVector::CreateMinBlocks()
10171 {
10172  for(size_t i = 0; i < m_MinBlockCount; ++i)
10173  {
10174  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10175  if(res != VK_SUCCESS)
10176  {
10177  return res;
10178  }
10179  }
10180  return VK_SUCCESS;
10181 }
10182 
10183 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10184 {
10185  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10186 
10187  const size_t blockCount = m_Blocks.size();
10188 
10189  pStats->size = 0;
10190  pStats->unusedSize = 0;
10191  pStats->allocationCount = 0;
10192  pStats->unusedRangeCount = 0;
10193  pStats->unusedRangeSizeMax = 0;
10194  pStats->blockCount = blockCount;
10195 
10196  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10197  {
10198  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10199  VMA_ASSERT(pBlock);
10200  VMA_HEAVY_ASSERT(pBlock->Validate());
10201  pBlock->m_pMetadata->AddPoolStats(*pStats);
10202  }
10203 }
10204 
10205 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10206 {
10207  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10208  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10209  (VMA_DEBUG_MARGIN > 0) &&
10210  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10211 }
10212 
10213 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10214 
10215 VkResult VmaBlockVector::Allocate(
10216  VmaPool hCurrentPool,
10217  uint32_t currentFrameIndex,
10218  VkDeviceSize size,
10219  VkDeviceSize alignment,
10220  const VmaAllocationCreateInfo& createInfo,
10221  VmaSuballocationType suballocType,
10222  VmaAllocation* pAllocation)
10223 {
10224  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10225  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10226  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10227  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10228  const bool canCreateNewBlock =
10229  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10230  (m_Blocks.size() < m_MaxBlockCount);
10231  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10232 
10233  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10234  // Which in turn is available only when maxBlockCount = 1.
10235  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10236  {
10237  canMakeOtherLost = false;
10238  }
10239 
10240  // Upper address can only be used with linear allocator and within single memory block.
10241  if(isUpperAddress &&
10242  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10243  {
10244  return VK_ERROR_FEATURE_NOT_PRESENT;
10245  }
10246 
10247  // Validate strategy.
10248  switch(strategy)
10249  {
10250  case 0:
10252  break;
10256  break;
10257  default:
10258  return VK_ERROR_FEATURE_NOT_PRESENT;
10259  }
10260 
10261  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10262  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10263  {
10264  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10265  }
10266 
10267  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10268 
10269  /*
10270  Under certain condition, this whole section can be skipped for optimization, so
10271  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10272  e.g. for custom pools with linear algorithm.
10273  */
10274  if(!canMakeOtherLost || canCreateNewBlock)
10275  {
10276  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10277  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10279 
10280  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10281  {
10282  // Use only last block.
10283  if(!m_Blocks.empty())
10284  {
10285  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10286  VMA_ASSERT(pCurrBlock);
10287  VkResult res = AllocateFromBlock(
10288  pCurrBlock,
10289  hCurrentPool,
10290  currentFrameIndex,
10291  size,
10292  alignment,
10293  allocFlagsCopy,
10294  createInfo.pUserData,
10295  suballocType,
10296  strategy,
10297  pAllocation);
10298  if(res == VK_SUCCESS)
10299  {
10300  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10301  return VK_SUCCESS;
10302  }
10303  }
10304  }
10305  else
10306  {
10308  {
10309  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10310  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10311  {
10312  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10313  VMA_ASSERT(pCurrBlock);
10314  VkResult res = AllocateFromBlock(
10315  pCurrBlock,
10316  hCurrentPool,
10317  currentFrameIndex,
10318  size,
10319  alignment,
10320  allocFlagsCopy,
10321  createInfo.pUserData,
10322  suballocType,
10323  strategy,
10324  pAllocation);
10325  if(res == VK_SUCCESS)
10326  {
10327  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10328  return VK_SUCCESS;
10329  }
10330  }
10331  }
10332  else // WORST_FIT, FIRST_FIT
10333  {
10334  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10335  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10336  {
10337  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10338  VMA_ASSERT(pCurrBlock);
10339  VkResult res = AllocateFromBlock(
10340  pCurrBlock,
10341  hCurrentPool,
10342  currentFrameIndex,
10343  size,
10344  alignment,
10345  allocFlagsCopy,
10346  createInfo.pUserData,
10347  suballocType,
10348  strategy,
10349  pAllocation);
10350  if(res == VK_SUCCESS)
10351  {
10352  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10353  return VK_SUCCESS;
10354  }
10355  }
10356  }
10357  }
10358 
10359  // 2. Try to create new block.
10360  if(canCreateNewBlock)
10361  {
10362  // Calculate optimal size for new block.
10363  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10364  uint32_t newBlockSizeShift = 0;
10365  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10366 
10367  if(!m_ExplicitBlockSize)
10368  {
10369  // Allocate 1/8, 1/4, 1/2 as first blocks.
10370  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10371  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10372  {
10373  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10374  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10375  {
10376  newBlockSize = smallerNewBlockSize;
10377  ++newBlockSizeShift;
10378  }
10379  else
10380  {
10381  break;
10382  }
10383  }
10384  }
10385 
10386  size_t newBlockIndex = 0;
10387  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10388  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10389  if(!m_ExplicitBlockSize)
10390  {
10391  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10392  {
10393  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10394  if(smallerNewBlockSize >= size)
10395  {
10396  newBlockSize = smallerNewBlockSize;
10397  ++newBlockSizeShift;
10398  res = CreateBlock(newBlockSize, &newBlockIndex);
10399  }
10400  else
10401  {
10402  break;
10403  }
10404  }
10405  }
10406 
10407  if(res == VK_SUCCESS)
10408  {
10409  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10410  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10411 
10412  res = AllocateFromBlock(
10413  pBlock,
10414  hCurrentPool,
10415  currentFrameIndex,
10416  size,
10417  alignment,
10418  allocFlagsCopy,
10419  createInfo.pUserData,
10420  suballocType,
10421  strategy,
10422  pAllocation);
10423  if(res == VK_SUCCESS)
10424  {
10425  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10426  return VK_SUCCESS;
10427  }
10428  else
10429  {
10430  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10431  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10432  }
10433  }
10434  }
10435  }
10436 
10437  // 3. Try to allocate from existing blocks with making other allocations lost.
10438  if(canMakeOtherLost)
10439  {
10440  uint32_t tryIndex = 0;
10441  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10442  {
10443  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10444  VmaAllocationRequest bestRequest = {};
10445  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10446 
10447  // 1. Search existing allocations.
10449  {
10450  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10451  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10452  {
10453  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10454  VMA_ASSERT(pCurrBlock);
10455  VmaAllocationRequest currRequest = {};
10456  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10457  currentFrameIndex,
10458  m_FrameInUseCount,
10459  m_BufferImageGranularity,
10460  size,
10461  alignment,
10462  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10463  suballocType,
10464  canMakeOtherLost,
10465  strategy,
10466  &currRequest))
10467  {
10468  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10469  if(pBestRequestBlock == VMA_NULL ||
10470  currRequestCost < bestRequestCost)
10471  {
10472  pBestRequestBlock = pCurrBlock;
10473  bestRequest = currRequest;
10474  bestRequestCost = currRequestCost;
10475 
10476  if(bestRequestCost == 0)
10477  {
10478  break;
10479  }
10480  }
10481  }
10482  }
10483  }
10484  else // WORST_FIT, FIRST_FIT
10485  {
10486  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10487  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10488  {
10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10490  VMA_ASSERT(pCurrBlock);
10491  VmaAllocationRequest currRequest = {};
10492  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10493  currentFrameIndex,
10494  m_FrameInUseCount,
10495  m_BufferImageGranularity,
10496  size,
10497  alignment,
10498  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10499  suballocType,
10500  canMakeOtherLost,
10501  strategy,
10502  &currRequest))
10503  {
10504  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10505  if(pBestRequestBlock == VMA_NULL ||
10506  currRequestCost < bestRequestCost ||
10508  {
10509  pBestRequestBlock = pCurrBlock;
10510  bestRequest = currRequest;
10511  bestRequestCost = currRequestCost;
10512 
10513  if(bestRequestCost == 0 ||
10515  {
10516  break;
10517  }
10518  }
10519  }
10520  }
10521  }
10522 
10523  if(pBestRequestBlock != VMA_NULL)
10524  {
10525  if(mapped)
10526  {
10527  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10528  if(res != VK_SUCCESS)
10529  {
10530  return res;
10531  }
10532  }
10533 
10534  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10535  currentFrameIndex,
10536  m_FrameInUseCount,
10537  &bestRequest))
10538  {
10539  // We no longer have an empty Allocation.
10540  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10541  {
10542  m_HasEmptyBlock = false;
10543  }
10544  // Allocate from this pBlock.
10545  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10546  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10547  (*pAllocation)->InitBlockAllocation(
10548  hCurrentPool,
10549  pBestRequestBlock,
10550  bestRequest.offset,
10551  alignment,
10552  size,
10553  suballocType,
10554  mapped,
10555  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10556  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10557  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10558  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10559  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10560  {
10561  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10562  }
10563  if(IsCorruptionDetectionEnabled())
10564  {
10565  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10566  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10567  }
10568  return VK_SUCCESS;
10569  }
10570  // else: Some allocations must have been touched while we are here. Next try.
10571  }
10572  else
10573  {
10574  // Could not find place in any of the blocks - break outer loop.
10575  break;
10576  }
10577  }
10578  /* Maximum number of tries exceeded - a very unlike event when many other
10579  threads are simultaneously touching allocations making it impossible to make
10580  lost at the same time as we try to allocate. */
10581  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10582  {
10583  return VK_ERROR_TOO_MANY_OBJECTS;
10584  }
10585  }
10586 
10587  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10588 }
10589 
10590 void VmaBlockVector::Free(
10591  VmaAllocation hAllocation)
10592 {
10593  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10594 
10595  // Scope for lock.
10596  {
10597  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10598 
10599  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10600 
10601  if(IsCorruptionDetectionEnabled())
10602  {
10603  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10604  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10605  }
10606 
10607  if(hAllocation->IsPersistentMap())
10608  {
10609  pBlock->Unmap(m_hAllocator, 1);
10610  }
10611 
10612  pBlock->m_pMetadata->Free(hAllocation);
10613  VMA_HEAVY_ASSERT(pBlock->Validate());
10614 
10615  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10616 
10617  // pBlock became empty after this deallocation.
10618  if(pBlock->m_pMetadata->IsEmpty())
10619  {
10620  // Already has empty Allocation. We don't want to have two, so delete this one.
10621  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10622  {
10623  pBlockToDelete = pBlock;
10624  Remove(pBlock);
10625  }
10626  // We now have first empty block.
10627  else
10628  {
10629  m_HasEmptyBlock = true;
10630  }
10631  }
10632  // pBlock didn't become empty, but we have another empty block - find and free that one.
10633  // (This is optional, heuristics.)
10634  else if(m_HasEmptyBlock)
10635  {
10636  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10637  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10638  {
10639  pBlockToDelete = pLastBlock;
10640  m_Blocks.pop_back();
10641  m_HasEmptyBlock = false;
10642  }
10643  }
10644 
10645  IncrementallySortBlocks();
10646  }
10647 
10648  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10649  // lock, for performance reason.
10650  if(pBlockToDelete != VMA_NULL)
10651  {
10652  VMA_DEBUG_LOG(" Deleted empty allocation");
10653  pBlockToDelete->Destroy(m_hAllocator);
10654  vma_delete(m_hAllocator, pBlockToDelete);
10655  }
10656 }
10657 
10658 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10659 {
10660  VkDeviceSize result = 0;
10661  for(size_t i = m_Blocks.size(); i--; )
10662  {
10663  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10664  if(result >= m_PreferredBlockSize)
10665  {
10666  break;
10667  }
10668  }
10669  return result;
10670 }
10671 
10672 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10673 {
10674  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10675  {
10676  if(m_Blocks[blockIndex] == pBlock)
10677  {
10678  VmaVectorRemove(m_Blocks, blockIndex);
10679  return;
10680  }
10681  }
10682  VMA_ASSERT(0);
10683 }
10684 
10685 void VmaBlockVector::IncrementallySortBlocks()
10686 {
10687  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10688  {
10689  // Bubble sort only until first swap.
10690  for(size_t i = 1; i < m_Blocks.size(); ++i)
10691  {
10692  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10693  {
10694  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10695  return;
10696  }
10697  }
10698  }
10699 }
10700 
10701 VkResult VmaBlockVector::AllocateFromBlock(
10702  VmaDeviceMemoryBlock* pBlock,
10703  VmaPool hCurrentPool,
10704  uint32_t currentFrameIndex,
10705  VkDeviceSize size,
10706  VkDeviceSize alignment,
10707  VmaAllocationCreateFlags allocFlags,
10708  void* pUserData,
10709  VmaSuballocationType suballocType,
10710  uint32_t strategy,
10711  VmaAllocation* pAllocation)
10712 {
10713  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10714  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10715  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10716  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10717 
10718  VmaAllocationRequest currRequest = {};
10719  if(pBlock->m_pMetadata->CreateAllocationRequest(
10720  currentFrameIndex,
10721  m_FrameInUseCount,
10722  m_BufferImageGranularity,
10723  size,
10724  alignment,
10725  isUpperAddress,
10726  suballocType,
10727  false, // canMakeOtherLost
10728  strategy,
10729  &currRequest))
10730  {
10731  // Allocate from pCurrBlock.
10732  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10733 
10734  if(mapped)
10735  {
10736  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10737  if(res != VK_SUCCESS)
10738  {
10739  return res;
10740  }
10741  }
10742 
10743  // We no longer have an empty Allocation.
10744  if(pBlock->m_pMetadata->IsEmpty())
10745  {
10746  m_HasEmptyBlock = false;
10747  }
10748 
10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10750  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10751  (*pAllocation)->InitBlockAllocation(
10752  hCurrentPool,
10753  pBlock,
10754  currRequest.offset,
10755  alignment,
10756  size,
10757  suballocType,
10758  mapped,
10759  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10760  VMA_HEAVY_ASSERT(pBlock->Validate());
10761  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10762  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10763  {
10764  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10765  }
10766  if(IsCorruptionDetectionEnabled())
10767  {
10768  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10769  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10770  }
10771  return VK_SUCCESS;
10772  }
10773  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10774 }
10775 
10776 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10777 {
10778  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10779  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10780  allocInfo.allocationSize = blockSize;
10781  VkDeviceMemory mem = VK_NULL_HANDLE;
10782  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10783  if(res < 0)
10784  {
10785  return res;
10786  }
10787 
10788  // New VkDeviceMemory successfully created.
10789 
10790  // Create new Allocation for it.
10791  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10792  pBlock->Init(
10793  m_hAllocator,
10794  m_MemoryTypeIndex,
10795  mem,
10796  allocInfo.allocationSize,
10797  m_NextBlockId++,
10798  m_Algorithm);
10799 
10800  m_Blocks.push_back(pBlock);
10801  if(pNewBlockIndex != VMA_NULL)
10802  {
10803  *pNewBlockIndex = m_Blocks.size() - 1;
10804  }
10805 
10806  return VK_SUCCESS;
10807 }
10808 
10809 #if VMA_STATS_STRING_ENABLED
10810 
10811 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10812 {
10813  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10814 
10815  json.BeginObject();
10816 
10817  if(m_IsCustomPool)
10818  {
10819  json.WriteString("MemoryTypeIndex");
10820  json.WriteNumber(m_MemoryTypeIndex);
10821 
10822  json.WriteString("BlockSize");
10823  json.WriteNumber(m_PreferredBlockSize);
10824 
10825  json.WriteString("BlockCount");
10826  json.BeginObject(true);
10827  if(m_MinBlockCount > 0)
10828  {
10829  json.WriteString("Min");
10830  json.WriteNumber((uint64_t)m_MinBlockCount);
10831  }
10832  if(m_MaxBlockCount < SIZE_MAX)
10833  {
10834  json.WriteString("Max");
10835  json.WriteNumber((uint64_t)m_MaxBlockCount);
10836  }
10837  json.WriteString("Cur");
10838  json.WriteNumber((uint64_t)m_Blocks.size());
10839  json.EndObject();
10840 
10841  if(m_FrameInUseCount > 0)
10842  {
10843  json.WriteString("FrameInUseCount");
10844  json.WriteNumber(m_FrameInUseCount);
10845  }
10846 
10847  if(m_Algorithm != 0)
10848  {
10849  json.WriteString("Algorithm");
10850  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10851  }
10852  }
10853  else
10854  {
10855  json.WriteString("PreferredBlockSize");
10856  json.WriteNumber(m_PreferredBlockSize);
10857  }
10858 
10859  json.WriteString("Blocks");
10860  json.BeginObject();
10861  for(size_t i = 0; i < m_Blocks.size(); ++i)
10862  {
10863  json.BeginString();
10864  json.ContinueString(m_Blocks[i]->GetId());
10865  json.EndString();
10866 
10867  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10868  }
10869  json.EndObject();
10870 
10871  json.EndObject();
10872 }
10873 
10874 #endif // #if VMA_STATS_STRING_ENABLED
10875 
10876 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10877  VmaAllocator hAllocator,
10878  uint32_t currentFrameIndex)
10879 {
10880  if(m_pDefragmentator == VMA_NULL)
10881  {
10882  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
10883  hAllocator,
10884  this,
10885  currentFrameIndex);
10886  }
10887 
10888  return m_pDefragmentator;
10889 }
10890 
10891 VkResult VmaBlockVector::Defragment(
10892  VmaDefragmentationStats* pDefragmentationStats,
10893  VkDeviceSize& maxBytesToMove,
10894  uint32_t& maxAllocationsToMove)
10895 {
10896  if(m_pDefragmentator == VMA_NULL)
10897  {
10898  return VK_SUCCESS;
10899  }
10900 
10901  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10902 
10903  // Defragment.
10904  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
10905 
10906  // Accumulate statistics.
10907  if(pDefragmentationStats != VMA_NULL)
10908  {
10909  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
10910  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
10911  pDefragmentationStats->bytesMoved += bytesMoved;
10912  pDefragmentationStats->allocationsMoved += allocationsMoved;
10913  VMA_ASSERT(bytesMoved <= maxBytesToMove);
10914  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
10915  maxBytesToMove -= bytesMoved;
10916  maxAllocationsToMove -= allocationsMoved;
10917  }
10918 
10919  // Free empty blocks.
10920  m_HasEmptyBlock = false;
10921  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10922  {
10923  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
10924  if(pBlock->m_pMetadata->IsEmpty())
10925  {
10926  if(m_Blocks.size() > m_MinBlockCount)
10927  {
10928  if(pDefragmentationStats != VMA_NULL)
10929  {
10930  ++pDefragmentationStats->deviceMemoryBlocksFreed;
10931  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
10932  }
10933 
10934  VmaVectorRemove(m_Blocks, blockIndex);
10935  pBlock->Destroy(m_hAllocator);
10936  vma_delete(m_hAllocator, pBlock);
10937  }
10938  else
10939  {
10940  m_HasEmptyBlock = true;
10941  }
10942  }
10943  }
10944 
10945  return result;
10946 }
10947 
10948 void VmaBlockVector::DestroyDefragmentator()
10949 {
10950  if(m_pDefragmentator != VMA_NULL)
10951  {
10952  vma_delete(m_hAllocator, m_pDefragmentator);
10953  m_pDefragmentator = VMA_NULL;
10954  }
10955 }
10956 
10957 void VmaBlockVector::MakePoolAllocationsLost(
10958  uint32_t currentFrameIndex,
10959  size_t* pLostAllocationCount)
10960 {
10961  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10962  size_t lostAllocationCount = 0;
10963  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10964  {
10965  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10966  VMA_ASSERT(pBlock);
10967  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10968  }
10969  if(pLostAllocationCount != VMA_NULL)
10970  {
10971  *pLostAllocationCount = lostAllocationCount;
10972  }
10973 }
10974 
10975 VkResult VmaBlockVector::CheckCorruption()
10976 {
10977  if(!IsCorruptionDetectionEnabled())
10978  {
10979  return VK_ERROR_FEATURE_NOT_PRESENT;
10980  }
10981 
10982  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10983  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10984  {
10985  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10986  VMA_ASSERT(pBlock);
10987  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10988  if(res != VK_SUCCESS)
10989  {
10990  return res;
10991  }
10992  }
10993  return VK_SUCCESS;
10994 }
10995 
10996 void VmaBlockVector::AddStats(VmaStats* pStats)
10997 {
10998  const uint32_t memTypeIndex = m_MemoryTypeIndex;
10999  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
11000 
11001  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
11002 
11003  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11004  {
11005  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11006  VMA_ASSERT(pBlock);
11007  VMA_HEAVY_ASSERT(pBlock->Validate());
11008  VmaStatInfo allocationStatInfo;
11009  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
11010  VmaAddStatInfo(pStats->total, allocationStatInfo);
11011  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11012  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11013  }
11014 }
11015 
11017 // VmaDefragmentator members definition
11018 
11019 VmaDefragmentator::VmaDefragmentator(
11020  VmaAllocator hAllocator,
11021  VmaBlockVector* pBlockVector,
11022  uint32_t currentFrameIndex) :
11023  m_hAllocator(hAllocator),
11024  m_pBlockVector(pBlockVector),
11025  m_CurrentFrameIndex(currentFrameIndex),
11026  m_BytesMoved(0),
11027  m_AllocationsMoved(0),
11028  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
11029  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
11030 {
11031  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
11032 }
11033 
11034 VmaDefragmentator::~VmaDefragmentator()
11035 {
11036  for(size_t i = m_Blocks.size(); i--; )
11037  {
11038  vma_delete(m_hAllocator, m_Blocks[i]);
11039  }
11040 }
11041 
11042 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
11043 {
11044  AllocationInfo allocInfo;
11045  allocInfo.m_hAllocation = hAlloc;
11046  allocInfo.m_pChanged = pChanged;
11047  m_Allocations.push_back(allocInfo);
11048 }
11049 
11050 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
11051 {
11052  // It has already been mapped for defragmentation.
11053  if(m_pMappedDataForDefragmentation)
11054  {
11055  *ppMappedData = m_pMappedDataForDefragmentation;
11056  return VK_SUCCESS;
11057  }
11058 
11059  // It is originally mapped.
11060  if(m_pBlock->GetMappedData())
11061  {
11062  *ppMappedData = m_pBlock->GetMappedData();
11063  return VK_SUCCESS;
11064  }
11065 
11066  // Map on first usage.
11067  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
11068  *ppMappedData = m_pMappedDataForDefragmentation;
11069  return res;
11070 }
11071 
11072 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
11073 {
11074  if(m_pMappedDataForDefragmentation != VMA_NULL)
11075  {
11076  m_pBlock->Unmap(hAllocator, 1);
11077  }
11078 }
11079 
11080 VkResult VmaDefragmentator::DefragmentRound(
11081  VkDeviceSize maxBytesToMove,
11082  uint32_t maxAllocationsToMove)
11083 {
11084  if(m_Blocks.empty())
11085  {
11086  return VK_SUCCESS;
11087  }
11088 
11089  size_t srcBlockIndex = m_Blocks.size() - 1;
11090  size_t srcAllocIndex = SIZE_MAX;
11091  for(;;)
11092  {
11093  // 1. Find next allocation to move.
11094  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11095  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11096  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11097  {
11098  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11099  {
11100  // Finished: no more allocations to process.
11101  if(srcBlockIndex == 0)
11102  {
11103  return VK_SUCCESS;
11104  }
11105  else
11106  {
11107  --srcBlockIndex;
11108  srcAllocIndex = SIZE_MAX;
11109  }
11110  }
11111  else
11112  {
11113  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11114  }
11115  }
11116 
11117  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11118  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11119 
11120  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11121  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11122  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11123  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11124 
11125  // 2. Try to find new place for this allocation in preceding or current block.
11126  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11127  {
11128  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11129  VmaAllocationRequest dstAllocRequest;
11130  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11131  m_CurrentFrameIndex,
11132  m_pBlockVector->GetFrameInUseCount(),
11133  m_pBlockVector->GetBufferImageGranularity(),
11134  size,
11135  alignment,
11136  false, // upperAddress
11137  suballocType,
11138  false, // canMakeOtherLost
11140  &dstAllocRequest) &&
11141  MoveMakesSense(
11142  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11143  {
11144  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11145 
11146  // Reached limit on number of allocations or bytes to move.
11147  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11148  (m_BytesMoved + size > maxBytesToMove))
11149  {
11150  return VK_INCOMPLETE;
11151  }
11152 
11153  void* pDstMappedData = VMA_NULL;
11154  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11155  if(res != VK_SUCCESS)
11156  {
11157  return res;
11158  }
11159 
11160  void* pSrcMappedData = VMA_NULL;
11161  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11162  if(res != VK_SUCCESS)
11163  {
11164  return res;
11165  }
11166 
11167  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11168  memcpy(
11169  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11170  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11171  static_cast<size_t>(size));
11172 
11173  if(VMA_DEBUG_MARGIN > 0)
11174  {
11175  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11176  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11177  }
11178 
11179  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11180  dstAllocRequest,
11181  suballocType,
11182  size,
11183  false, // upperAddress
11184  allocInfo.m_hAllocation);
11185  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11186 
11187  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11188 
11189  if(allocInfo.m_pChanged != VMA_NULL)
11190  {
11191  *allocInfo.m_pChanged = VK_TRUE;
11192  }
11193 
11194  ++m_AllocationsMoved;
11195  m_BytesMoved += size;
11196 
11197  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11198 
11199  break;
11200  }
11201  }
11202 
11203  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11204 
11205  if(srcAllocIndex > 0)
11206  {
11207  --srcAllocIndex;
11208  }
11209  else
11210  {
11211  if(srcBlockIndex > 0)
11212  {
11213  --srcBlockIndex;
11214  srcAllocIndex = SIZE_MAX;
11215  }
11216  else
11217  {
11218  return VK_SUCCESS;
11219  }
11220  }
11221  }
11222 }
11223 
11224 VkResult VmaDefragmentator::Defragment(
11225  VkDeviceSize maxBytesToMove,
11226  uint32_t maxAllocationsToMove)
11227 {
11228  if(m_Allocations.empty())
11229  {
11230  return VK_SUCCESS;
11231  }
11232 
11233  // Create block info for each block.
11234  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11235  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11236  {
11237  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11238  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11239  m_Blocks.push_back(pBlockInfo);
11240  }
11241 
11242  // Sort them by m_pBlock pointer value.
11243  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11244 
11245  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11246  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11247  {
11248  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11249  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11250  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11251  {
11252  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11253  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11254  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11255  {
11256  (*it)->m_Allocations.push_back(allocInfo);
11257  }
11258  else
11259  {
11260  VMA_ASSERT(0);
11261  }
11262  }
11263  }
11264  m_Allocations.clear();
11265 
11266  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11267  {
11268  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11269  pBlockInfo->CalcHasNonMovableAllocations();
11270  pBlockInfo->SortAllocationsBySizeDescecnding();
11271  }
11272 
11273  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11274  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11275 
11276  // Execute defragmentation rounds (the main part).
11277  VkResult result = VK_SUCCESS;
11278  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11279  {
11280  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11281  }
11282 
11283  // Unmap blocks that were mapped for defragmentation.
11284  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11285  {
11286  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11287  }
11288 
11289  return result;
11290 }
11291 
11292 bool VmaDefragmentator::MoveMakesSense(
11293  size_t dstBlockIndex, VkDeviceSize dstOffset,
11294  size_t srcBlockIndex, VkDeviceSize srcOffset)
11295 {
11296  if(dstBlockIndex < srcBlockIndex)
11297  {
11298  return true;
11299  }
11300  if(dstBlockIndex > srcBlockIndex)
11301  {
11302  return false;
11303  }
11304  if(dstOffset < srcOffset)
11305  {
11306  return true;
11307  }
11308  return false;
11309 }
11310 
11312 // VmaRecorder
11313 
11314 #if VMA_RECORDING_ENABLED
11315 
11316 VmaRecorder::VmaRecorder() :
11317  m_UseMutex(true),
11318  m_Flags(0),
11319  m_File(VMA_NULL),
11320  m_Freq(INT64_MAX),
11321  m_StartCounter(INT64_MAX)
11322 {
11323 }
11324 
11325 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11326 {
11327  m_UseMutex = useMutex;
11328  m_Flags = settings.flags;
11329 
11330  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11331  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11332 
11333  // Open file for writing.
11334  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11335  if(err != 0)
11336  {
11337  return VK_ERROR_INITIALIZATION_FAILED;
11338  }
11339 
11340  // Write header.
11341  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11342  fprintf(m_File, "%s\n", "1,3");
11343 
11344  return VK_SUCCESS;
11345 }
11346 
11347 VmaRecorder::~VmaRecorder()
11348 {
11349  if(m_File != VMA_NULL)
11350  {
11351  fclose(m_File);
11352  }
11353 }
11354 
11355 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11356 {
11357  CallParams callParams;
11358  GetBasicParams(callParams);
11359 
11360  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11361  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11362  Flush();
11363 }
11364 
11365 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11366 {
11367  CallParams callParams;
11368  GetBasicParams(callParams);
11369 
11370  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11371  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11372  Flush();
11373 }
11374 
11375 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11376 {
11377  CallParams callParams;
11378  GetBasicParams(callParams);
11379 
11380  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11381  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11382  createInfo.memoryTypeIndex,
11383  createInfo.flags,
11384  createInfo.blockSize,
11385  (uint64_t)createInfo.minBlockCount,
11386  (uint64_t)createInfo.maxBlockCount,
11387  createInfo.frameInUseCount,
11388  pool);
11389  Flush();
11390 }
11391 
11392 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11393 {
11394  CallParams callParams;
11395  GetBasicParams(callParams);
11396 
11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11398  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11399  pool);
11400  Flush();
11401 }
11402 
11403 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11404  const VkMemoryRequirements& vkMemReq,
11405  const VmaAllocationCreateInfo& createInfo,
11406  VmaAllocation allocation)
11407 {
11408  CallParams callParams;
11409  GetBasicParams(callParams);
11410 
11411  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11412  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11413  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11414  vkMemReq.size,
11415  vkMemReq.alignment,
11416  vkMemReq.memoryTypeBits,
11417  createInfo.flags,
11418  createInfo.usage,
11419  createInfo.requiredFlags,
11420  createInfo.preferredFlags,
11421  createInfo.memoryTypeBits,
11422  createInfo.pool,
11423  allocation,
11424  userDataStr.GetString());
11425  Flush();
11426 }
11427 
11428 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11429  const VkMemoryRequirements& vkMemReq,
11430  bool requiresDedicatedAllocation,
11431  bool prefersDedicatedAllocation,
11432  const VmaAllocationCreateInfo& createInfo,
11433  VmaAllocation allocation)
11434 {
11435  CallParams callParams;
11436  GetBasicParams(callParams);
11437 
11438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11439  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11440  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11441  vkMemReq.size,
11442  vkMemReq.alignment,
11443  vkMemReq.memoryTypeBits,
11444  requiresDedicatedAllocation ? 1 : 0,
11445  prefersDedicatedAllocation ? 1 : 0,
11446  createInfo.flags,
11447  createInfo.usage,
11448  createInfo.requiredFlags,
11449  createInfo.preferredFlags,
11450  createInfo.memoryTypeBits,
11451  createInfo.pool,
11452  allocation,
11453  userDataStr.GetString());
11454  Flush();
11455 }
11456 
11457 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11458  const VkMemoryRequirements& vkMemReq,
11459  bool requiresDedicatedAllocation,
11460  bool prefersDedicatedAllocation,
11461  const VmaAllocationCreateInfo& createInfo,
11462  VmaAllocation allocation)
11463 {
11464  CallParams callParams;
11465  GetBasicParams(callParams);
11466 
11467  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11468  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11469  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11470  vkMemReq.size,
11471  vkMemReq.alignment,
11472  vkMemReq.memoryTypeBits,
11473  requiresDedicatedAllocation ? 1 : 0,
11474  prefersDedicatedAllocation ? 1 : 0,
11475  createInfo.flags,
11476  createInfo.usage,
11477  createInfo.requiredFlags,
11478  createInfo.preferredFlags,
11479  createInfo.memoryTypeBits,
11480  createInfo.pool,
11481  allocation,
11482  userDataStr.GetString());
11483  Flush();
11484 }
11485 
11486 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11487  VmaAllocation allocation)
11488 {
11489  CallParams callParams;
11490  GetBasicParams(callParams);
11491 
11492  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11493  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11494  allocation);
11495  Flush();
11496 }
11497 
11498 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11499  VmaAllocation allocation,
11500  const void* pUserData)
11501 {
11502  CallParams callParams;
11503  GetBasicParams(callParams);
11504 
11505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11506  UserDataString userDataStr(
11507  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11508  pUserData);
11509  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11510  allocation,
11511  userDataStr.GetString());
11512  Flush();
11513 }
11514 
11515 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11516  VmaAllocation allocation)
11517 {
11518  CallParams callParams;
11519  GetBasicParams(callParams);
11520 
11521  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11522  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11523  allocation);
11524  Flush();
11525 }
11526 
11527 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11528  VmaAllocation allocation)
11529 {
11530  CallParams callParams;
11531  GetBasicParams(callParams);
11532 
11533  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11534  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11535  allocation);
11536  Flush();
11537 }
11538 
11539 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11540  VmaAllocation allocation)
11541 {
11542  CallParams callParams;
11543  GetBasicParams(callParams);
11544 
11545  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11546  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11547  allocation);
11548  Flush();
11549 }
11550 
11551 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11552  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11553 {
11554  CallParams callParams;
11555  GetBasicParams(callParams);
11556 
11557  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11558  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11559  allocation,
11560  offset,
11561  size);
11562  Flush();
11563 }
11564 
11565 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11566  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11567 {
11568  CallParams callParams;
11569  GetBasicParams(callParams);
11570 
11571  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11572  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11573  allocation,
11574  offset,
11575  size);
11576  Flush();
11577 }
11578 
11579 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11580  const VkBufferCreateInfo& bufCreateInfo,
11581  const VmaAllocationCreateInfo& allocCreateInfo,
11582  VmaAllocation allocation)
11583 {
11584  CallParams callParams;
11585  GetBasicParams(callParams);
11586 
11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11588  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11589  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11590  bufCreateInfo.flags,
11591  bufCreateInfo.size,
11592  bufCreateInfo.usage,
11593  bufCreateInfo.sharingMode,
11594  allocCreateInfo.flags,
11595  allocCreateInfo.usage,
11596  allocCreateInfo.requiredFlags,
11597  allocCreateInfo.preferredFlags,
11598  allocCreateInfo.memoryTypeBits,
11599  allocCreateInfo.pool,
11600  allocation,
11601  userDataStr.GetString());
11602  Flush();
11603 }
11604 
11605 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11606  const VkImageCreateInfo& imageCreateInfo,
11607  const VmaAllocationCreateInfo& allocCreateInfo,
11608  VmaAllocation allocation)
11609 {
11610  CallParams callParams;
11611  GetBasicParams(callParams);
11612 
11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11614  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11615  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11616  imageCreateInfo.flags,
11617  imageCreateInfo.imageType,
11618  imageCreateInfo.format,
11619  imageCreateInfo.extent.width,
11620  imageCreateInfo.extent.height,
11621  imageCreateInfo.extent.depth,
11622  imageCreateInfo.mipLevels,
11623  imageCreateInfo.arrayLayers,
11624  imageCreateInfo.samples,
11625  imageCreateInfo.tiling,
11626  imageCreateInfo.usage,
11627  imageCreateInfo.sharingMode,
11628  imageCreateInfo.initialLayout,
11629  allocCreateInfo.flags,
11630  allocCreateInfo.usage,
11631  allocCreateInfo.requiredFlags,
11632  allocCreateInfo.preferredFlags,
11633  allocCreateInfo.memoryTypeBits,
11634  allocCreateInfo.pool,
11635  allocation,
11636  userDataStr.GetString());
11637  Flush();
11638 }
11639 
11640 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11641  VmaAllocation allocation)
11642 {
11643  CallParams callParams;
11644  GetBasicParams(callParams);
11645 
11646  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11647  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11648  allocation);
11649  Flush();
11650 }
11651 
11652 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11653  VmaAllocation allocation)
11654 {
11655  CallParams callParams;
11656  GetBasicParams(callParams);
11657 
11658  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11659  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11660  allocation);
11661  Flush();
11662 }
11663 
11664 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11665  VmaAllocation allocation)
11666 {
11667  CallParams callParams;
11668  GetBasicParams(callParams);
11669 
11670  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11671  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11672  allocation);
11673  Flush();
11674 }
11675 
11676 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11677  VmaAllocation allocation)
11678 {
11679  CallParams callParams;
11680  GetBasicParams(callParams);
11681 
11682  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11683  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11684  allocation);
11685  Flush();
11686 }
11687 
11688 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11689  VmaPool pool)
11690 {
11691  CallParams callParams;
11692  GetBasicParams(callParams);
11693 
11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11695  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11696  pool);
11697  Flush();
11698 }
11699 
11700 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11701 {
11702  if(pUserData != VMA_NULL)
11703  {
11704  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11705  {
11706  m_Str = (const char*)pUserData;
11707  }
11708  else
11709  {
11710  sprintf_s(m_PtrStr, "%p", pUserData);
11711  m_Str = m_PtrStr;
11712  }
11713  }
11714  else
11715  {
11716  m_Str = "";
11717  }
11718 }
11719 
11720 void VmaRecorder::WriteConfiguration(
11721  const VkPhysicalDeviceProperties& devProps,
11722  const VkPhysicalDeviceMemoryProperties& memProps,
11723  bool dedicatedAllocationExtensionEnabled)
11724 {
11725  fprintf(m_File, "Config,Begin\n");
11726 
11727  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11728  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11729  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11730  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11731  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11732  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11733 
11734  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11735  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11736  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11737 
11738  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11739  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11740  {
11741  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11742  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11743  }
11744  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11745  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11746  {
11747  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11748  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11749  }
11750 
11751  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11752 
11753  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11754  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11755  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11756  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11757  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11758  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11759  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11760  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11761  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11762 
11763  fprintf(m_File, "Config,End\n");
11764 }
11765 
11766 void VmaRecorder::GetBasicParams(CallParams& outParams)
11767 {
11768  outParams.threadId = GetCurrentThreadId();
11769 
11770  LARGE_INTEGER counter;
11771  QueryPerformanceCounter(&counter);
11772  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11773 }
11774 
11775 void VmaRecorder::Flush()
11776 {
11777  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11778  {
11779  fflush(m_File);
11780  }
11781 }
11782 
11783 #endif // #if VMA_RECORDING_ENABLED
11784 
11786 // VmaAllocator_T
11787 
11788 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11789  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11790  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11791  m_hDevice(pCreateInfo->device),
11792  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11793  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11794  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11795  m_PreferredLargeHeapBlockSize(0),
11796  m_PhysicalDevice(pCreateInfo->physicalDevice),
11797  m_CurrentFrameIndex(0),
11798  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11799  m_NextPoolId(0)
11801  ,m_pRecorder(VMA_NULL)
11802 #endif
11803 {
11804  if(VMA_DEBUG_DETECT_CORRUPTION)
11805  {
11806  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11807  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11808  }
11809 
11810  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11811 
11812 #if !(VMA_DEDICATED_ALLOCATION)
11814  {
11815  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11816  }
11817 #endif
11818 
11819  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11820  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11821  memset(&m_MemProps, 0, sizeof(m_MemProps));
11822 
11823  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11824  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11825 
11826  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11827  {
11828  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11829  }
11830 
11831  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11832  {
11833  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11834  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11835  }
11836 
11837  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11838 
11839  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11840  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11841 
11842  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11843  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11844  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11845  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11846 
11847  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11848  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11849 
11850  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11851  {
11852  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
11853  {
11854  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
11855  if(limit != VK_WHOLE_SIZE)
11856  {
11857  m_HeapSizeLimit[heapIndex] = limit;
11858  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
11859  {
11860  m_MemProps.memoryHeaps[heapIndex].size = limit;
11861  }
11862  }
11863  }
11864  }
11865 
11866  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11867  {
11868  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
11869 
11870  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
11871  this,
11872  memTypeIndex,
11873  preferredBlockSize,
11874  0,
11875  SIZE_MAX,
11876  GetBufferImageGranularity(),
11877  pCreateInfo->frameInUseCount,
11878  false, // isCustomPool
11879  false, // explicitBlockSize
11880  false); // linearAlgorithm
11881  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
11882  // becase minBlockCount is 0.
11883  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
11884 
11885  }
11886 }
11887 
11888 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
11889 {
11890  VkResult res = VK_SUCCESS;
11891 
11892  if(pCreateInfo->pRecordSettings != VMA_NULL &&
11893  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
11894  {
11895 #if VMA_RECORDING_ENABLED
11896  m_pRecorder = vma_new(this, VmaRecorder)();
11897  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
11898  if(res != VK_SUCCESS)
11899  {
11900  return res;
11901  }
11902  m_pRecorder->WriteConfiguration(
11903  m_PhysicalDeviceProperties,
11904  m_MemProps,
11905  m_UseKhrDedicatedAllocation);
11906  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
11907 #else
11908  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
11909  return VK_ERROR_FEATURE_NOT_PRESENT;
11910 #endif
11911  }
11912 
11913  return res;
11914 }
11915 
11916 VmaAllocator_T::~VmaAllocator_T()
11917 {
11918 #if VMA_RECORDING_ENABLED
11919  if(m_pRecorder != VMA_NULL)
11920  {
11921  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
11922  vma_delete(this, m_pRecorder);
11923  }
11924 #endif
11925 
11926  VMA_ASSERT(m_Pools.empty());
11927 
11928  for(size_t i = GetMemoryTypeCount(); i--; )
11929  {
11930  vma_delete(this, m_pDedicatedAllocations[i]);
11931  vma_delete(this, m_pBlockVectors[i]);
11932  }
11933 }
11934 
11935 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
11936 {
11937 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11938  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
11939  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
11940  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
11941  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
11942  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
11943  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
11944  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
11945  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
11946  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
11947  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
11948  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
11949  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
11950  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
11951  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
11952  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
11953  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
11954 #if VMA_DEDICATED_ALLOCATION
11955  if(m_UseKhrDedicatedAllocation)
11956  {
11957  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
11958  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11959  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11960  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11961  }
11962 #endif // #if VMA_DEDICATED_ALLOCATION
11963 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11964 
11965 #define VMA_COPY_IF_NOT_NULL(funcName) \
11966  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11967 
11968  if(pVulkanFunctions != VMA_NULL)
11969  {
11970  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11971  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11972  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11973  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11974  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11975  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11976  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11977  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11978  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11979  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11980  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11981  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11982  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11983  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11984  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11985  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11986 #if VMA_DEDICATED_ALLOCATION
11987  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11988  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11989 #endif
11990  }
11991 
11992 #undef VMA_COPY_IF_NOT_NULL
11993 
11994  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
11995  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
11996  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
11997  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
11998  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
11999  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
12000  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
12001  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
12002  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
12003  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
12004  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
12005  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
12006  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
12007  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
12008  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
12009  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
12010  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
12011  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
12012 #if VMA_DEDICATED_ALLOCATION
12013  if(m_UseKhrDedicatedAllocation)
12014  {
12015  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
12016  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
12017  }
12018 #endif
12019 }
12020 
12021 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
12022 {
12023  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12024  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
12025  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
12026  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
12027 }
12028 
12029 VkResult VmaAllocator_T::AllocateMemoryOfType(
12030  VkDeviceSize size,
12031  VkDeviceSize alignment,
12032  bool dedicatedAllocation,
12033  VkBuffer dedicatedBuffer,
12034  VkImage dedicatedImage,
12035  const VmaAllocationCreateInfo& createInfo,
12036  uint32_t memTypeIndex,
12037  VmaSuballocationType suballocType,
12038  VmaAllocation* pAllocation)
12039 {
12040  VMA_ASSERT(pAllocation != VMA_NULL);
12041  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
12042 
12043  VmaAllocationCreateInfo finalCreateInfo = createInfo;
12044 
12045  // If memory type is not HOST_VISIBLE, disable MAPPED.
12046  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12047  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12048  {
12049  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
12050  }
12051 
12052  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
12053  VMA_ASSERT(blockVector);
12054 
12055  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
12056  bool preferDedicatedMemory =
12057  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
12058  dedicatedAllocation ||
12059  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
12060  size > preferredBlockSize / 2;
12061 
12062  if(preferDedicatedMemory &&
12063  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
12064  finalCreateInfo.pool == VK_NULL_HANDLE)
12065  {
12067  }
12068 
12069  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
12070  {
12071  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12072  {
12073  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12074  }
12075  else
12076  {
12077  return AllocateDedicatedMemory(
12078  size,
12079  suballocType,
12080  memTypeIndex,
12081  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12082  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12083  finalCreateInfo.pUserData,
12084  dedicatedBuffer,
12085  dedicatedImage,
12086  pAllocation);
12087  }
12088  }
12089  else
12090  {
12091  VkResult res = blockVector->Allocate(
12092  VK_NULL_HANDLE, // hCurrentPool
12093  m_CurrentFrameIndex.load(),
12094  size,
12095  alignment,
12096  finalCreateInfo,
12097  suballocType,
12098  pAllocation);
12099  if(res == VK_SUCCESS)
12100  {
12101  return res;
12102  }
12103 
12104  // 5. Try dedicated memory.
12105  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12106  {
12107  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12108  }
12109  else
12110  {
12111  res = AllocateDedicatedMemory(
12112  size,
12113  suballocType,
12114  memTypeIndex,
12115  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12116  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12117  finalCreateInfo.pUserData,
12118  dedicatedBuffer,
12119  dedicatedImage,
12120  pAllocation);
12121  if(res == VK_SUCCESS)
12122  {
12123  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12124  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12125  return VK_SUCCESS;
12126  }
12127  else
12128  {
12129  // Everything failed: Return error code.
12130  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12131  return res;
12132  }
12133  }
12134  }
12135 }
12136 
12137 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12138  VkDeviceSize size,
12139  VmaSuballocationType suballocType,
12140  uint32_t memTypeIndex,
12141  bool map,
12142  bool isUserDataString,
12143  void* pUserData,
12144  VkBuffer dedicatedBuffer,
12145  VkImage dedicatedImage,
12146  VmaAllocation* pAllocation)
12147 {
12148  VMA_ASSERT(pAllocation);
12149 
12150  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12151  allocInfo.memoryTypeIndex = memTypeIndex;
12152  allocInfo.allocationSize = size;
12153 
12154 #if VMA_DEDICATED_ALLOCATION
12155  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12156  if(m_UseKhrDedicatedAllocation)
12157  {
12158  if(dedicatedBuffer != VK_NULL_HANDLE)
12159  {
12160  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12161  dedicatedAllocInfo.buffer = dedicatedBuffer;
12162  allocInfo.pNext = &dedicatedAllocInfo;
12163  }
12164  else if(dedicatedImage != VK_NULL_HANDLE)
12165  {
12166  dedicatedAllocInfo.image = dedicatedImage;
12167  allocInfo.pNext = &dedicatedAllocInfo;
12168  }
12169  }
12170 #endif // #if VMA_DEDICATED_ALLOCATION
12171 
12172  // Allocate VkDeviceMemory.
12173  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12174  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12175  if(res < 0)
12176  {
12177  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12178  return res;
12179  }
12180 
12181  void* pMappedData = VMA_NULL;
12182  if(map)
12183  {
12184  res = (*m_VulkanFunctions.vkMapMemory)(
12185  m_hDevice,
12186  hMemory,
12187  0,
12188  VK_WHOLE_SIZE,
12189  0,
12190  &pMappedData);
12191  if(res < 0)
12192  {
12193  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12194  FreeVulkanMemory(memTypeIndex, size, hMemory);
12195  return res;
12196  }
12197  }
12198 
12199  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12200  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12201  (*pAllocation)->SetUserData(this, pUserData);
12202  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12203  {
12204  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12205  }
12206 
12207  // Register it in m_pDedicatedAllocations.
12208  {
12209  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12210  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12211  VMA_ASSERT(pDedicatedAllocations);
12212  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12213  }
12214 
12215  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12216 
12217  return VK_SUCCESS;
12218 }
12219 
12220 void VmaAllocator_T::GetBufferMemoryRequirements(
12221  VkBuffer hBuffer,
12222  VkMemoryRequirements& memReq,
12223  bool& requiresDedicatedAllocation,
12224  bool& prefersDedicatedAllocation) const
12225 {
12226 #if VMA_DEDICATED_ALLOCATION
12227  if(m_UseKhrDedicatedAllocation)
12228  {
12229  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12230  memReqInfo.buffer = hBuffer;
12231 
12232  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12233 
12234  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12235  memReq2.pNext = &memDedicatedReq;
12236 
12237  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12238 
12239  memReq = memReq2.memoryRequirements;
12240  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12241  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12242  }
12243  else
12244 #endif // #if VMA_DEDICATED_ALLOCATION
12245  {
12246  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12247  requiresDedicatedAllocation = false;
12248  prefersDedicatedAllocation = false;
12249  }
12250 }
12251 
12252 void VmaAllocator_T::GetImageMemoryRequirements(
12253  VkImage hImage,
12254  VkMemoryRequirements& memReq,
12255  bool& requiresDedicatedAllocation,
12256  bool& prefersDedicatedAllocation) const
12257 {
12258 #if VMA_DEDICATED_ALLOCATION
12259  if(m_UseKhrDedicatedAllocation)
12260  {
12261  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12262  memReqInfo.image = hImage;
12263 
12264  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12265 
12266  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12267  memReq2.pNext = &memDedicatedReq;
12268 
12269  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12270 
12271  memReq = memReq2.memoryRequirements;
12272  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12273  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12274  }
12275  else
12276 #endif // #if VMA_DEDICATED_ALLOCATION
12277  {
12278  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12279  requiresDedicatedAllocation = false;
12280  prefersDedicatedAllocation = false;
12281  }
12282 }
12283 
12284 VkResult VmaAllocator_T::AllocateMemory(
12285  const VkMemoryRequirements& vkMemReq,
12286  bool requiresDedicatedAllocation,
12287  bool prefersDedicatedAllocation,
12288  VkBuffer dedicatedBuffer,
12289  VkImage dedicatedImage,
12290  const VmaAllocationCreateInfo& createInfo,
12291  VmaSuballocationType suballocType,
12292  VmaAllocation* pAllocation)
12293 {
12294  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12295 
12296  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12297  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12298  {
12299  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12300  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12301  }
12302  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12304  {
12305  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12306  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12307  }
12308  if(requiresDedicatedAllocation)
12309  {
12310  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12311  {
12312  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12313  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12314  }
12315  if(createInfo.pool != VK_NULL_HANDLE)
12316  {
12317  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12318  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12319  }
12320  }
12321  if((createInfo.pool != VK_NULL_HANDLE) &&
12322  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12323  {
12324  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12326  }
12327 
12328  if(createInfo.pool != VK_NULL_HANDLE)
12329  {
12330  const VkDeviceSize alignmentForPool = VMA_MAX(
12331  vkMemReq.alignment,
12332  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12333  return createInfo.pool->m_BlockVector.Allocate(
12334  createInfo.pool,
12335  m_CurrentFrameIndex.load(),
12336  vkMemReq.size,
12337  alignmentForPool,
12338  createInfo,
12339  suballocType,
12340  pAllocation);
12341  }
12342  else
12343  {
12344  // Bit mask of memory Vulkan types acceptable for this allocation.
12345  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12346  uint32_t memTypeIndex = UINT32_MAX;
12347  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12348  if(res == VK_SUCCESS)
12349  {
12350  VkDeviceSize alignmentForMemType = VMA_MAX(
12351  vkMemReq.alignment,
12352  GetMemoryTypeMinAlignment(memTypeIndex));
12353 
12354  res = AllocateMemoryOfType(
12355  vkMemReq.size,
12356  alignmentForMemType,
12357  requiresDedicatedAllocation || prefersDedicatedAllocation,
12358  dedicatedBuffer,
12359  dedicatedImage,
12360  createInfo,
12361  memTypeIndex,
12362  suballocType,
12363  pAllocation);
12364  // Succeeded on first try.
12365  if(res == VK_SUCCESS)
12366  {
12367  return res;
12368  }
12369  // Allocation from this memory type failed. Try other compatible memory types.
12370  else
12371  {
12372  for(;;)
12373  {
12374  // Remove old memTypeIndex from list of possibilities.
12375  memoryTypeBits &= ~(1u << memTypeIndex);
12376  // Find alternative memTypeIndex.
12377  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12378  if(res == VK_SUCCESS)
12379  {
12380  alignmentForMemType = VMA_MAX(
12381  vkMemReq.alignment,
12382  GetMemoryTypeMinAlignment(memTypeIndex));
12383 
12384  res = AllocateMemoryOfType(
12385  vkMemReq.size,
12386  alignmentForMemType,
12387  requiresDedicatedAllocation || prefersDedicatedAllocation,
12388  dedicatedBuffer,
12389  dedicatedImage,
12390  createInfo,
12391  memTypeIndex,
12392  suballocType,
12393  pAllocation);
12394  // Allocation from this alternative memory type succeeded.
12395  if(res == VK_SUCCESS)
12396  {
12397  return res;
12398  }
12399  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12400  }
12401  // No other matching memory type index could be found.
12402  else
12403  {
12404  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12405  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12406  }
12407  }
12408  }
12409  }
12410  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12411  else
12412  return res;
12413  }
12414 }
12415 
12416 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12417 {
12418  VMA_ASSERT(allocation);
12419 
12420  if(TouchAllocation(allocation))
12421  {
12422  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12423  {
12424  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12425  }
12426 
12427  switch(allocation->GetType())
12428  {
12429  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12430  {
12431  VmaBlockVector* pBlockVector = VMA_NULL;
12432  VmaPool hPool = allocation->GetPool();
12433  if(hPool != VK_NULL_HANDLE)
12434  {
12435  pBlockVector = &hPool->m_BlockVector;
12436  }
12437  else
12438  {
12439  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12440  pBlockVector = m_pBlockVectors[memTypeIndex];
12441  }
12442  pBlockVector->Free(allocation);
12443  }
12444  break;
12445  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12446  FreeDedicatedMemory(allocation);
12447  break;
12448  default:
12449  VMA_ASSERT(0);
12450  }
12451  }
12452 
12453  allocation->SetUserData(this, VMA_NULL);
12454  vma_delete(this, allocation);
12455 }
12456 
12457 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12458 {
12459  // Initialize.
12460  InitStatInfo(pStats->total);
12461  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12462  InitStatInfo(pStats->memoryType[i]);
12463  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12464  InitStatInfo(pStats->memoryHeap[i]);
12465 
12466  // Process default pools.
12467  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12468  {
12469  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12470  VMA_ASSERT(pBlockVector);
12471  pBlockVector->AddStats(pStats);
12472  }
12473 
12474  // Process custom pools.
12475  {
12476  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12477  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12478  {
12479  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12480  }
12481  }
12482 
12483  // Process dedicated allocations.
12484  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12485  {
12486  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12487  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12488  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12489  VMA_ASSERT(pDedicatedAllocVector);
12490  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12491  {
12492  VmaStatInfo allocationStatInfo;
12493  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12494  VmaAddStatInfo(pStats->total, allocationStatInfo);
12495  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12496  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12497  }
12498  }
12499 
12500  // Postprocess.
12501  VmaPostprocessCalcStatInfo(pStats->total);
12502  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12503  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12504  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12505  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12506 }
12507 
12508 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12509 
12510 VkResult VmaAllocator_T::Defragment(
12511  VmaAllocation* pAllocations,
12512  size_t allocationCount,
12513  VkBool32* pAllocationsChanged,
12514  const VmaDefragmentationInfo* pDefragmentationInfo,
12515  VmaDefragmentationStats* pDefragmentationStats)
12516 {
12517  if(pAllocationsChanged != VMA_NULL)
12518  {
12519  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
12520  }
12521  if(pDefragmentationStats != VMA_NULL)
12522  {
12523  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12524  }
12525 
12526  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12527 
12528  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12529 
12530  const size_t poolCount = m_Pools.size();
12531 
12532  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12533  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12534  {
12535  VmaAllocation hAlloc = pAllocations[allocIndex];
12536  VMA_ASSERT(hAlloc);
12537  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12538  // DedicatedAlloc cannot be defragmented.
12539  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12540  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12541  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12542  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12543  // Lost allocation cannot be defragmented.
12544  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12545  {
12546  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12547 
12548  const VmaPool hAllocPool = hAlloc->GetPool();
12549  // This allocation belongs to custom pool.
12550  if(hAllocPool != VK_NULL_HANDLE)
12551  {
12552  // Pools with linear or buddy algorithm are not defragmented.
12553  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12554  {
12555  pAllocBlockVector = &hAllocPool->m_BlockVector;
12556  }
12557  }
12558  // This allocation belongs to general pool.
12559  else
12560  {
12561  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12562  }
12563 
12564  if(pAllocBlockVector != VMA_NULL)
12565  {
12566  VmaDefragmentator* const pDefragmentator =
12567  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12568  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12569  &pAllocationsChanged[allocIndex] : VMA_NULL;
12570  pDefragmentator->AddAllocation(hAlloc, pChanged);
12571  }
12572  }
12573  }
12574 
12575  VkResult result = VK_SUCCESS;
12576 
12577  // ======== Main processing.
12578 
12579  VkDeviceSize maxBytesToMove = SIZE_MAX;
12580  uint32_t maxAllocationsToMove = UINT32_MAX;
12581  if(pDefragmentationInfo != VMA_NULL)
12582  {
12583  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12584  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12585  }
12586 
12587  // Process standard memory.
12588  for(uint32_t memTypeIndex = 0;
12589  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12590  ++memTypeIndex)
12591  {
12592  // Only HOST_VISIBLE memory types can be defragmented.
12593  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12594  {
12595  result = m_pBlockVectors[memTypeIndex]->Defragment(
12596  pDefragmentationStats,
12597  maxBytesToMove,
12598  maxAllocationsToMove);
12599  }
12600  }
12601 
12602  // Process custom pools.
12603  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12604  {
12605  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12606  pDefragmentationStats,
12607  maxBytesToMove,
12608  maxAllocationsToMove);
12609  }
12610 
12611  // ======== Destroy defragmentators.
12612 
12613  // Process custom pools.
12614  for(size_t poolIndex = poolCount; poolIndex--; )
12615  {
12616  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12617  }
12618 
12619  // Process standard memory.
12620  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12621  {
12622  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12623  {
12624  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12625  }
12626  }
12627 
12628  return result;
12629 }
12630 
12631 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12632 {
12633  if(hAllocation->CanBecomeLost())
12634  {
12635  /*
12636  Warning: This is a carefully designed algorithm.
12637  Do not modify unless you really know what you're doing :)
12638  */
12639  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12640  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12641  for(;;)
12642  {
12643  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12644  {
12645  pAllocationInfo->memoryType = UINT32_MAX;
12646  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12647  pAllocationInfo->offset = 0;
12648  pAllocationInfo->size = hAllocation->GetSize();
12649  pAllocationInfo->pMappedData = VMA_NULL;
12650  pAllocationInfo->pUserData = hAllocation->GetUserData();
12651  return;
12652  }
12653  else if(localLastUseFrameIndex == localCurrFrameIndex)
12654  {
12655  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12656  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12657  pAllocationInfo->offset = hAllocation->GetOffset();
12658  pAllocationInfo->size = hAllocation->GetSize();
12659  pAllocationInfo->pMappedData = VMA_NULL;
12660  pAllocationInfo->pUserData = hAllocation->GetUserData();
12661  return;
12662  }
12663  else // Last use time earlier than current time.
12664  {
12665  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12666  {
12667  localLastUseFrameIndex = localCurrFrameIndex;
12668  }
12669  }
12670  }
12671  }
12672  else
12673  {
12674 #if VMA_STATS_STRING_ENABLED
12675  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12676  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12677  for(;;)
12678  {
12679  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12680  if(localLastUseFrameIndex == localCurrFrameIndex)
12681  {
12682  break;
12683  }
12684  else // Last use time earlier than current time.
12685  {
12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12687  {
12688  localLastUseFrameIndex = localCurrFrameIndex;
12689  }
12690  }
12691  }
12692 #endif
12693 
12694  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12695  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12696  pAllocationInfo->offset = hAllocation->GetOffset();
12697  pAllocationInfo->size = hAllocation->GetSize();
12698  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12699  pAllocationInfo->pUserData = hAllocation->GetUserData();
12700  }
12701 }
12702 
12703 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12704 {
12705  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12706  if(hAllocation->CanBecomeLost())
12707  {
12708  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12709  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12710  for(;;)
12711  {
12712  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12713  {
12714  return false;
12715  }
12716  else if(localLastUseFrameIndex == localCurrFrameIndex)
12717  {
12718  return true;
12719  }
12720  else // Last use time earlier than current time.
12721  {
12722  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12723  {
12724  localLastUseFrameIndex = localCurrFrameIndex;
12725  }
12726  }
12727  }
12728  }
12729  else
12730  {
12731 #if VMA_STATS_STRING_ENABLED
12732  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12733  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12734  for(;;)
12735  {
12736  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12737  if(localLastUseFrameIndex == localCurrFrameIndex)
12738  {
12739  break;
12740  }
12741  else // Last use time earlier than current time.
12742  {
12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12744  {
12745  localLastUseFrameIndex = localCurrFrameIndex;
12746  }
12747  }
12748  }
12749 #endif
12750 
12751  return true;
12752  }
12753 }
12754 
12755 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12756 {
12757  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12758 
12759  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12760 
12761  if(newCreateInfo.maxBlockCount == 0)
12762  {
12763  newCreateInfo.maxBlockCount = SIZE_MAX;
12764  }
12765  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12766  {
12767  return VK_ERROR_INITIALIZATION_FAILED;
12768  }
12769 
12770  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12771 
12772  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12773 
12774  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12775  if(res != VK_SUCCESS)
12776  {
12777  vma_delete(this, *pPool);
12778  *pPool = VMA_NULL;
12779  return res;
12780  }
12781 
12782  // Add to m_Pools.
12783  {
12784  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12785  (*pPool)->SetId(m_NextPoolId++);
12786  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12787  }
12788 
12789  return VK_SUCCESS;
12790 }
12791 
12792 void VmaAllocator_T::DestroyPool(VmaPool pool)
12793 {
12794  // Remove from m_Pools.
12795  {
12796  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12797  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
12798  VMA_ASSERT(success && "Pool not found in Allocator.");
12799  }
12800 
12801  vma_delete(this, pool);
12802 }
12803 
12804 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
12805 {
12806  pool->m_BlockVector.GetPoolStats(pPoolStats);
12807 }
12808 
12809 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
12810 {
12811  m_CurrentFrameIndex.store(frameIndex);
12812 }
12813 
12814 void VmaAllocator_T::MakePoolAllocationsLost(
12815  VmaPool hPool,
12816  size_t* pLostAllocationCount)
12817 {
12818  hPool->m_BlockVector.MakePoolAllocationsLost(
12819  m_CurrentFrameIndex.load(),
12820  pLostAllocationCount);
12821 }
12822 
12823 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
12824 {
12825  return hPool->m_BlockVector.CheckCorruption();
12826 }
12827 
12828 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
12829 {
12830  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
12831 
12832  // Process default pools.
12833  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12834  {
12835  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
12836  {
12837  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12838  VMA_ASSERT(pBlockVector);
12839  VkResult localRes = pBlockVector->CheckCorruption();
12840  switch(localRes)
12841  {
12842  case VK_ERROR_FEATURE_NOT_PRESENT:
12843  break;
12844  case VK_SUCCESS:
12845  finalRes = VK_SUCCESS;
12846  break;
12847  default:
12848  return localRes;
12849  }
12850  }
12851  }
12852 
12853  // Process custom pools.
12854  {
12855  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12856  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12857  {
12858  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
12859  {
12860  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
12861  switch(localRes)
12862  {
12863  case VK_ERROR_FEATURE_NOT_PRESENT:
12864  break;
12865  case VK_SUCCESS:
12866  finalRes = VK_SUCCESS;
12867  break;
12868  default:
12869  return localRes;
12870  }
12871  }
12872  }
12873  }
12874 
12875  return finalRes;
12876 }
12877 
12878 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
12879 {
12880  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
12881  (*pAllocation)->InitLost();
12882 }
12883 
12884 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
12885 {
12886  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
12887 
12888  VkResult res;
12889  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12890  {
12891  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12892  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
12893  {
12894  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12895  if(res == VK_SUCCESS)
12896  {
12897  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
12898  }
12899  }
12900  else
12901  {
12902  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
12903  }
12904  }
12905  else
12906  {
12907  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12908  }
12909 
12910  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
12911  {
12912  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
12913  }
12914 
12915  return res;
12916 }
12917 
12918 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
12919 {
12920  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
12921  {
12922  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
12923  }
12924 
12925  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
12926 
12927  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
12928  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12929  {
12930  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12931  m_HeapSizeLimit[heapIndex] += size;
12932  }
12933 }
12934 
12935 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
12936 {
12937  if(hAllocation->CanBecomeLost())
12938  {
12939  return VK_ERROR_MEMORY_MAP_FAILED;
12940  }
12941 
12942  switch(hAllocation->GetType())
12943  {
12944  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12945  {
12946  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12947  char *pBytes = VMA_NULL;
12948  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
12949  if(res == VK_SUCCESS)
12950  {
12951  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
12952  hAllocation->BlockAllocMap();
12953  }
12954  return res;
12955  }
12956  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12957  return hAllocation->DedicatedAllocMap(this, ppData);
12958  default:
12959  VMA_ASSERT(0);
12960  return VK_ERROR_MEMORY_MAP_FAILED;
12961  }
12962 }
12963 
12964 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12965 {
12966  switch(hAllocation->GetType())
12967  {
12968  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12969  {
12970  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12971  hAllocation->BlockAllocUnmap();
12972  pBlock->Unmap(this, 1);
12973  }
12974  break;
12975  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12976  hAllocation->DedicatedAllocUnmap(this);
12977  break;
12978  default:
12979  VMA_ASSERT(0);
12980  }
12981 }
12982 
12983 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12984 {
12985  VkResult res = VK_SUCCESS;
12986  switch(hAllocation->GetType())
12987  {
12988  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12989  res = GetVulkanFunctions().vkBindBufferMemory(
12990  m_hDevice,
12991  hBuffer,
12992  hAllocation->GetMemory(),
12993  0); //memoryOffset
12994  break;
12995  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12996  {
12997  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12998  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
12999  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
13000  break;
13001  }
13002  default:
13003  VMA_ASSERT(0);
13004  }
13005  return res;
13006 }
13007 
13008 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
13009 {
13010  VkResult res = VK_SUCCESS;
13011  switch(hAllocation->GetType())
13012  {
13013  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13014  res = GetVulkanFunctions().vkBindImageMemory(
13015  m_hDevice,
13016  hImage,
13017  hAllocation->GetMemory(),
13018  0); //memoryOffset
13019  break;
13020  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13021  {
13022  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13023  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
13024  res = pBlock->BindImageMemory(this, hAllocation, hImage);
13025  break;
13026  }
13027  default:
13028  VMA_ASSERT(0);
13029  }
13030  return res;
13031 }
13032 
13033 void VmaAllocator_T::FlushOrInvalidateAllocation(
13034  VmaAllocation hAllocation,
13035  VkDeviceSize offset, VkDeviceSize size,
13036  VMA_CACHE_OPERATION op)
13037 {
13038  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
13039  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
13040  {
13041  const VkDeviceSize allocationSize = hAllocation->GetSize();
13042  VMA_ASSERT(offset <= allocationSize);
13043 
13044  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13045 
13046  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13047  memRange.memory = hAllocation->GetMemory();
13048 
13049  switch(hAllocation->GetType())
13050  {
13051  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
13052  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13053  if(size == VK_WHOLE_SIZE)
13054  {
13055  memRange.size = allocationSize - memRange.offset;
13056  }
13057  else
13058  {
13059  VMA_ASSERT(offset + size <= allocationSize);
13060  memRange.size = VMA_MIN(
13061  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
13062  allocationSize - memRange.offset);
13063  }
13064  break;
13065 
13066  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
13067  {
13068  // 1. Still within this allocation.
13069  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
13070  if(size == VK_WHOLE_SIZE)
13071  {
13072  size = allocationSize - offset;
13073  }
13074  else
13075  {
13076  VMA_ASSERT(offset + size <= allocationSize);
13077  }
13078  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13079 
13080  // 2. Adjust to whole block.
13081  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13082  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13083  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13084  memRange.offset += allocationOffset;
13085  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13086 
13087  break;
13088  }
13089 
13090  default:
13091  VMA_ASSERT(0);
13092  }
13093 
13094  switch(op)
13095  {
13096  case VMA_CACHE_FLUSH:
13097  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13098  break;
13099  case VMA_CACHE_INVALIDATE:
13100  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13101  break;
13102  default:
13103  VMA_ASSERT(0);
13104  }
13105  }
13106  // else: Just ignore this call.
13107 }
13108 
13109 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13110 {
13111  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13112 
13113  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13114  {
13115  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13116  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13117  VMA_ASSERT(pDedicatedAllocations);
13118  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13119  VMA_ASSERT(success);
13120  }
13121 
13122  VkDeviceMemory hMemory = allocation->GetMemory();
13123 
13124  /*
13125  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
13126  before vkFreeMemory.
13127 
13128  if(allocation->GetMappedData() != VMA_NULL)
13129  {
13130  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13131  }
13132  */
13133 
13134  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13135 
13136  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13137 }
13138 
13139 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13140 {
13141  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13142  !hAllocation->CanBecomeLost() &&
13143  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13144  {
13145  void* pData = VMA_NULL;
13146  VkResult res = Map(hAllocation, &pData);
13147  if(res == VK_SUCCESS)
13148  {
13149  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13150  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13151  Unmap(hAllocation);
13152  }
13153  else
13154  {
13155  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13156  }
13157  }
13158 }
13159 
13160 #if VMA_STATS_STRING_ENABLED
13161 
13162 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13163 {
13164  bool dedicatedAllocationsStarted = false;
13165  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13166  {
13167  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13168  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13169  VMA_ASSERT(pDedicatedAllocVector);
13170  if(pDedicatedAllocVector->empty() == false)
13171  {
13172  if(dedicatedAllocationsStarted == false)
13173  {
13174  dedicatedAllocationsStarted = true;
13175  json.WriteString("DedicatedAllocations");
13176  json.BeginObject();
13177  }
13178 
13179  json.BeginString("Type ");
13180  json.ContinueString(memTypeIndex);
13181  json.EndString();
13182 
13183  json.BeginArray();
13184 
13185  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13186  {
13187  json.BeginObject(true);
13188  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13189  hAlloc->PrintParameters(json);
13190  json.EndObject();
13191  }
13192 
13193  json.EndArray();
13194  }
13195  }
13196  if(dedicatedAllocationsStarted)
13197  {
13198  json.EndObject();
13199  }
13200 
13201  {
13202  bool allocationsStarted = false;
13203  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13204  {
13205  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13206  {
13207  if(allocationsStarted == false)
13208  {
13209  allocationsStarted = true;
13210  json.WriteString("DefaultPools");
13211  json.BeginObject();
13212  }
13213 
13214  json.BeginString("Type ");
13215  json.ContinueString(memTypeIndex);
13216  json.EndString();
13217 
13218  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13219  }
13220  }
13221  if(allocationsStarted)
13222  {
13223  json.EndObject();
13224  }
13225  }
13226 
13227  // Custom pools
13228  {
13229  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13230  const size_t poolCount = m_Pools.size();
13231  if(poolCount > 0)
13232  {
13233  json.WriteString("Pools");
13234  json.BeginObject();
13235  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13236  {
13237  json.BeginString();
13238  json.ContinueString(m_Pools[poolIndex]->GetId());
13239  json.EndString();
13240 
13241  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13242  }
13243  json.EndObject();
13244  }
13245  }
13246 }
13247 
13248 #endif // #if VMA_STATS_STRING_ENABLED
13249 
13251 // Public interface
13252 
13253 VkResult vmaCreateAllocator(
13254  const VmaAllocatorCreateInfo* pCreateInfo,
13255  VmaAllocator* pAllocator)
13256 {
13257  VMA_ASSERT(pCreateInfo && pAllocator);
13258  VMA_DEBUG_LOG("vmaCreateAllocator");
13259  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13260  return (*pAllocator)->Init(pCreateInfo);
13261 }
13262 
13263 void vmaDestroyAllocator(
13264  VmaAllocator allocator)
13265 {
13266  if(allocator != VK_NULL_HANDLE)
13267  {
13268  VMA_DEBUG_LOG("vmaDestroyAllocator");
13269  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13270  vma_delete(&allocationCallbacks, allocator);
13271  }
13272 }
13273 
13275  VmaAllocator allocator,
13276  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13277 {
13278  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13279  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13280 }
13281 
13283  VmaAllocator allocator,
13284  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13285 {
13286  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13287  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13288 }
13289 
13291  VmaAllocator allocator,
13292  uint32_t memoryTypeIndex,
13293  VkMemoryPropertyFlags* pFlags)
13294 {
13295  VMA_ASSERT(allocator && pFlags);
13296  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13297  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13298 }
13299 
13301  VmaAllocator allocator,
13302  uint32_t frameIndex)
13303 {
13304  VMA_ASSERT(allocator);
13305  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13306 
13307  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13308 
13309  allocator->SetCurrentFrameIndex(frameIndex);
13310 }
13311 
13312 void vmaCalculateStats(
13313  VmaAllocator allocator,
13314  VmaStats* pStats)
13315 {
13316  VMA_ASSERT(allocator && pStats);
13317  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13318  allocator->CalculateStats(pStats);
13319 }
13320 
13321 #if VMA_STATS_STRING_ENABLED
13322 
13323 void vmaBuildStatsString(
13324  VmaAllocator allocator,
13325  char** ppStatsString,
13326  VkBool32 detailedMap)
13327 {
13328  VMA_ASSERT(allocator && ppStatsString);
13329  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13330 
13331  VmaStringBuilder sb(allocator);
13332  {
13333  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13334  json.BeginObject();
13335 
13336  VmaStats stats;
13337  allocator->CalculateStats(&stats);
13338 
13339  json.WriteString("Total");
13340  VmaPrintStatInfo(json, stats.total);
13341 
13342  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13343  {
13344  json.BeginString("Heap ");
13345  json.ContinueString(heapIndex);
13346  json.EndString();
13347  json.BeginObject();
13348 
13349  json.WriteString("Size");
13350  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13351 
13352  json.WriteString("Flags");
13353  json.BeginArray(true);
13354  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13355  {
13356  json.WriteString("DEVICE_LOCAL");
13357  }
13358  json.EndArray();
13359 
13360  if(stats.memoryHeap[heapIndex].blockCount > 0)
13361  {
13362  json.WriteString("Stats");
13363  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13364  }
13365 
13366  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13367  {
13368  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13369  {
13370  json.BeginString("Type ");
13371  json.ContinueString(typeIndex);
13372  json.EndString();
13373 
13374  json.BeginObject();
13375 
13376  json.WriteString("Flags");
13377  json.BeginArray(true);
13378  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13379  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13380  {
13381  json.WriteString("DEVICE_LOCAL");
13382  }
13383  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13384  {
13385  json.WriteString("HOST_VISIBLE");
13386  }
13387  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13388  {
13389  json.WriteString("HOST_COHERENT");
13390  }
13391  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13392  {
13393  json.WriteString("HOST_CACHED");
13394  }
13395  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13396  {
13397  json.WriteString("LAZILY_ALLOCATED");
13398  }
13399  json.EndArray();
13400 
13401  if(stats.memoryType[typeIndex].blockCount > 0)
13402  {
13403  json.WriteString("Stats");
13404  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13405  }
13406 
13407  json.EndObject();
13408  }
13409  }
13410 
13411  json.EndObject();
13412  }
13413  if(detailedMap == VK_TRUE)
13414  {
13415  allocator->PrintDetailedMap(json);
13416  }
13417 
13418  json.EndObject();
13419  }
13420 
13421  const size_t len = sb.GetLength();
13422  char* const pChars = vma_new_array(allocator, char, len + 1);
13423  if(len > 0)
13424  {
13425  memcpy(pChars, sb.GetData(), len);
13426  }
13427  pChars[len] = '\0';
13428  *ppStatsString = pChars;
13429 }
13430 
13431 void vmaFreeStatsString(
13432  VmaAllocator allocator,
13433  char* pStatsString)
13434 {
13435  if(pStatsString != VMA_NULL)
13436  {
13437  VMA_ASSERT(allocator);
13438  size_t len = strlen(pStatsString);
13439  vma_delete_array(allocator, pStatsString, len + 1);
13440  }
13441 }
13442 
13443 #endif // #if VMA_STATS_STRING_ENABLED
13444 
13445 /*
13446 This function is not protected by any mutex because it just reads immutable data.
13447 */
13448 VkResult vmaFindMemoryTypeIndex(
13449  VmaAllocator allocator,
13450  uint32_t memoryTypeBits,
13451  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13452  uint32_t* pMemoryTypeIndex)
13453 {
13454  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13455  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13456  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13457 
13458  if(pAllocationCreateInfo->memoryTypeBits != 0)
13459  {
13460  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13461  }
13462 
13463  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13464  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13465 
13466  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13467  if(mapped)
13468  {
13469  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13470  }
13471 
13472  // Convert usage to requiredFlags and preferredFlags.
13473  switch(pAllocationCreateInfo->usage)
13474  {
13476  break;
13478  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13479  {
13480  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13481  }
13482  break;
13484  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13485  break;
13487  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13488  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13489  {
13490  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13491  }
13492  break;
13494  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13495  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13496  break;
13497  default:
13498  break;
13499  }
13500 
13501  *pMemoryTypeIndex = UINT32_MAX;
13502  uint32_t minCost = UINT32_MAX;
13503  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13504  memTypeIndex < allocator->GetMemoryTypeCount();
13505  ++memTypeIndex, memTypeBit <<= 1)
13506  {
13507  // This memory type is acceptable according to memoryTypeBits bitmask.
13508  if((memTypeBit & memoryTypeBits) != 0)
13509  {
13510  const VkMemoryPropertyFlags currFlags =
13511  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13512  // This memory type contains requiredFlags.
13513  if((requiredFlags & ~currFlags) == 0)
13514  {
13515  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13516  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13517  // Remember memory type with lowest cost.
13518  if(currCost < minCost)
13519  {
13520  *pMemoryTypeIndex = memTypeIndex;
13521  if(currCost == 0)
13522  {
13523  return VK_SUCCESS;
13524  }
13525  minCost = currCost;
13526  }
13527  }
13528  }
13529  }
13530  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13531 }
13532 
13534  VmaAllocator allocator,
13535  const VkBufferCreateInfo* pBufferCreateInfo,
13536  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13537  uint32_t* pMemoryTypeIndex)
13538 {
13539  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13540  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13541  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13542  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13543 
13544  const VkDevice hDev = allocator->m_hDevice;
13545  VkBuffer hBuffer = VK_NULL_HANDLE;
13546  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13547  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13548  if(res == VK_SUCCESS)
13549  {
13550  VkMemoryRequirements memReq = {};
13551  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13552  hDev, hBuffer, &memReq);
13553 
13554  res = vmaFindMemoryTypeIndex(
13555  allocator,
13556  memReq.memoryTypeBits,
13557  pAllocationCreateInfo,
13558  pMemoryTypeIndex);
13559 
13560  allocator->GetVulkanFunctions().vkDestroyBuffer(
13561  hDev, hBuffer, allocator->GetAllocationCallbacks());
13562  }
13563  return res;
13564 }
13565 
13567  VmaAllocator allocator,
13568  const VkImageCreateInfo* pImageCreateInfo,
13569  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13570  uint32_t* pMemoryTypeIndex)
13571 {
13572  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13573  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13574  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13575  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13576 
13577  const VkDevice hDev = allocator->m_hDevice;
13578  VkImage hImage = VK_NULL_HANDLE;
13579  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13580  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13581  if(res == VK_SUCCESS)
13582  {
13583  VkMemoryRequirements memReq = {};
13584  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13585  hDev, hImage, &memReq);
13586 
13587  res = vmaFindMemoryTypeIndex(
13588  allocator,
13589  memReq.memoryTypeBits,
13590  pAllocationCreateInfo,
13591  pMemoryTypeIndex);
13592 
13593  allocator->GetVulkanFunctions().vkDestroyImage(
13594  hDev, hImage, allocator->GetAllocationCallbacks());
13595  }
13596  return res;
13597 }
13598 
13599 VkResult vmaCreatePool(
13600  VmaAllocator allocator,
13601  const VmaPoolCreateInfo* pCreateInfo,
13602  VmaPool* pPool)
13603 {
13604  VMA_ASSERT(allocator && pCreateInfo && pPool);
13605 
13606  VMA_DEBUG_LOG("vmaCreatePool");
13607 
13608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13609 
13610  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13611 
13612 #if VMA_RECORDING_ENABLED
13613  if(allocator->GetRecorder() != VMA_NULL)
13614  {
13615  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13616  }
13617 #endif
13618 
13619  return res;
13620 }
13621 
13622 void vmaDestroyPool(
13623  VmaAllocator allocator,
13624  VmaPool pool)
13625 {
13626  VMA_ASSERT(allocator);
13627 
13628  if(pool == VK_NULL_HANDLE)
13629  {
13630  return;
13631  }
13632 
13633  VMA_DEBUG_LOG("vmaDestroyPool");
13634 
13635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13636 
13637 #if VMA_RECORDING_ENABLED
13638  if(allocator->GetRecorder() != VMA_NULL)
13639  {
13640  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13641  }
13642 #endif
13643 
13644  allocator->DestroyPool(pool);
13645 }
13646 
13647 void vmaGetPoolStats(
13648  VmaAllocator allocator,
13649  VmaPool pool,
13650  VmaPoolStats* pPoolStats)
13651 {
13652  VMA_ASSERT(allocator && pool && pPoolStats);
13653 
13654  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13655 
13656  allocator->GetPoolStats(pool, pPoolStats);
13657 }
13658 
13660  VmaAllocator allocator,
13661  VmaPool pool,
13662  size_t* pLostAllocationCount)
13663 {
13664  VMA_ASSERT(allocator && pool);
13665 
13666  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13667 
13668 #if VMA_RECORDING_ENABLED
13669  if(allocator->GetRecorder() != VMA_NULL)
13670  {
13671  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13672  }
13673 #endif
13674 
13675  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13676 }
13677 
13678 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13679 {
13680  VMA_ASSERT(allocator && pool);
13681 
13682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13683 
13684  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13685 
13686  return allocator->CheckPoolCorruption(pool);
13687 }
13688 
13689 VkResult vmaAllocateMemory(
13690  VmaAllocator allocator,
13691  const VkMemoryRequirements* pVkMemoryRequirements,
13692  const VmaAllocationCreateInfo* pCreateInfo,
13693  VmaAllocation* pAllocation,
13694  VmaAllocationInfo* pAllocationInfo)
13695 {
13696  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13697 
13698  VMA_DEBUG_LOG("vmaAllocateMemory");
13699 
13700  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13701 
13702  VkResult result = allocator->AllocateMemory(
13703  *pVkMemoryRequirements,
13704  false, // requiresDedicatedAllocation
13705  false, // prefersDedicatedAllocation
13706  VK_NULL_HANDLE, // dedicatedBuffer
13707  VK_NULL_HANDLE, // dedicatedImage
13708  *pCreateInfo,
13709  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13710  pAllocation);
13711 
13712 #if VMA_RECORDING_ENABLED
13713  if(allocator->GetRecorder() != VMA_NULL)
13714  {
13715  allocator->GetRecorder()->RecordAllocateMemory(
13716  allocator->GetCurrentFrameIndex(),
13717  *pVkMemoryRequirements,
13718  *pCreateInfo,
13719  *pAllocation);
13720  }
13721 #endif
13722 
13723  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13724  {
13725  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13726  }
13727 
13728  return result;
13729 }
13730 
13732  VmaAllocator allocator,
13733  VkBuffer buffer,
13734  const VmaAllocationCreateInfo* pCreateInfo,
13735  VmaAllocation* pAllocation,
13736  VmaAllocationInfo* pAllocationInfo)
13737 {
13738  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13739 
13740  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13741 
13742  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13743 
13744  VkMemoryRequirements vkMemReq = {};
13745  bool requiresDedicatedAllocation = false;
13746  bool prefersDedicatedAllocation = false;
13747  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
13748  requiresDedicatedAllocation,
13749  prefersDedicatedAllocation);
13750 
13751  VkResult result = allocator->AllocateMemory(
13752  vkMemReq,
13753  requiresDedicatedAllocation,
13754  prefersDedicatedAllocation,
13755  buffer, // dedicatedBuffer
13756  VK_NULL_HANDLE, // dedicatedImage
13757  *pCreateInfo,
13758  VMA_SUBALLOCATION_TYPE_BUFFER,
13759  pAllocation);
13760 
13761 #if VMA_RECORDING_ENABLED
13762  if(allocator->GetRecorder() != VMA_NULL)
13763  {
13764  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
13765  allocator->GetCurrentFrameIndex(),
13766  vkMemReq,
13767  requiresDedicatedAllocation,
13768  prefersDedicatedAllocation,
13769  *pCreateInfo,
13770  *pAllocation);
13771  }
13772 #endif
13773 
13774  if(pAllocationInfo && result == VK_SUCCESS)
13775  {
13776  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13777  }
13778 
13779  return result;
13780 }
13781 
13782 VkResult vmaAllocateMemoryForImage(
13783  VmaAllocator allocator,
13784  VkImage image,
13785  const VmaAllocationCreateInfo* pCreateInfo,
13786  VmaAllocation* pAllocation,
13787  VmaAllocationInfo* pAllocationInfo)
13788 {
13789  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13790 
13791  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
13792 
13793  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13794 
13795  VkMemoryRequirements vkMemReq = {};
13796  bool requiresDedicatedAllocation = false;
13797  bool prefersDedicatedAllocation = false;
13798  allocator->GetImageMemoryRequirements(image, vkMemReq,
13799  requiresDedicatedAllocation, prefersDedicatedAllocation);
13800 
13801  VkResult result = allocator->AllocateMemory(
13802  vkMemReq,
13803  requiresDedicatedAllocation,
13804  prefersDedicatedAllocation,
13805  VK_NULL_HANDLE, // dedicatedBuffer
13806  image, // dedicatedImage
13807  *pCreateInfo,
13808  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
13809  pAllocation);
13810 
13811 #if VMA_RECORDING_ENABLED
13812  if(allocator->GetRecorder() != VMA_NULL)
13813  {
13814  allocator->GetRecorder()->RecordAllocateMemoryForImage(
13815  allocator->GetCurrentFrameIndex(),
13816  vkMemReq,
13817  requiresDedicatedAllocation,
13818  prefersDedicatedAllocation,
13819  *pCreateInfo,
13820  *pAllocation);
13821  }
13822 #endif
13823 
13824  if(pAllocationInfo && result == VK_SUCCESS)
13825  {
13826  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13827  }
13828 
13829  return result;
13830 }
13831 
13832 void vmaFreeMemory(
13833  VmaAllocator allocator,
13834  VmaAllocation allocation)
13835 {
13836  VMA_ASSERT(allocator);
13837 
13838  if(allocation == VK_NULL_HANDLE)
13839  {
13840  return;
13841  }
13842 
13843  VMA_DEBUG_LOG("vmaFreeMemory");
13844 
13845  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13846 
13847 #if VMA_RECORDING_ENABLED
13848  if(allocator->GetRecorder() != VMA_NULL)
13849  {
13850  allocator->GetRecorder()->RecordFreeMemory(
13851  allocator->GetCurrentFrameIndex(),
13852  allocation);
13853  }
13854 #endif
13855 
13856  allocator->FreeMemory(allocation);
13857 }
13858 
13860  VmaAllocator allocator,
13861  VmaAllocation allocation,
13862  VmaAllocationInfo* pAllocationInfo)
13863 {
13864  VMA_ASSERT(allocator && allocation && pAllocationInfo);
13865 
13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13867 
13868 #if VMA_RECORDING_ENABLED
13869  if(allocator->GetRecorder() != VMA_NULL)
13870  {
13871  allocator->GetRecorder()->RecordGetAllocationInfo(
13872  allocator->GetCurrentFrameIndex(),
13873  allocation);
13874  }
13875 #endif
13876 
13877  allocator->GetAllocationInfo(allocation, pAllocationInfo);
13878 }
13879 
13880 VkBool32 vmaTouchAllocation(
13881  VmaAllocator allocator,
13882  VmaAllocation allocation)
13883 {
13884  VMA_ASSERT(allocator && allocation);
13885 
13886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13887 
13888 #if VMA_RECORDING_ENABLED
13889  if(allocator->GetRecorder() != VMA_NULL)
13890  {
13891  allocator->GetRecorder()->RecordTouchAllocation(
13892  allocator->GetCurrentFrameIndex(),
13893  allocation);
13894  }
13895 #endif
13896 
13897  return allocator->TouchAllocation(allocation);
13898 }
13899 
13901  VmaAllocator allocator,
13902  VmaAllocation allocation,
13903  void* pUserData)
13904 {
13905  VMA_ASSERT(allocator && allocation);
13906 
13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13908 
13909  allocation->SetUserData(allocator, pUserData);
13910 
13911 #if VMA_RECORDING_ENABLED
13912  if(allocator->GetRecorder() != VMA_NULL)
13913  {
13914  allocator->GetRecorder()->RecordSetAllocationUserData(
13915  allocator->GetCurrentFrameIndex(),
13916  allocation,
13917  pUserData);
13918  }
13919 #endif
13920 }
13921 
13923  VmaAllocator allocator,
13924  VmaAllocation* pAllocation)
13925 {
13926  VMA_ASSERT(allocator && pAllocation);
13927 
13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
13929 
13930  allocator->CreateLostAllocation(pAllocation);
13931 
13932 #if VMA_RECORDING_ENABLED
13933  if(allocator->GetRecorder() != VMA_NULL)
13934  {
13935  allocator->GetRecorder()->RecordCreateLostAllocation(
13936  allocator->GetCurrentFrameIndex(),
13937  *pAllocation);
13938  }
13939 #endif
13940 }
13941 
13942 VkResult vmaMapMemory(
13943  VmaAllocator allocator,
13944  VmaAllocation allocation,
13945  void** ppData)
13946 {
13947  VMA_ASSERT(allocator && allocation && ppData);
13948 
13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13950 
13951  VkResult res = allocator->Map(allocation, ppData);
13952 
13953 #if VMA_RECORDING_ENABLED
13954  if(allocator->GetRecorder() != VMA_NULL)
13955  {
13956  allocator->GetRecorder()->RecordMapMemory(
13957  allocator->GetCurrentFrameIndex(),
13958  allocation);
13959  }
13960 #endif
13961 
13962  return res;
13963 }
13964 
13965 void vmaUnmapMemory(
13966  VmaAllocator allocator,
13967  VmaAllocation allocation)
13968 {
13969  VMA_ASSERT(allocator && allocation);
13970 
13971  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13972 
13973 #if VMA_RECORDING_ENABLED
13974  if(allocator->GetRecorder() != VMA_NULL)
13975  {
13976  allocator->GetRecorder()->RecordUnmapMemory(
13977  allocator->GetCurrentFrameIndex(),
13978  allocation);
13979  }
13980 #endif
13981 
13982  allocator->Unmap(allocation);
13983 }
13984 
13985 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13986 {
13987  VMA_ASSERT(allocator && allocation);
13988 
13989  VMA_DEBUG_LOG("vmaFlushAllocation");
13990 
13991  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13992 
13993  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
13994 
13995 #if VMA_RECORDING_ENABLED
13996  if(allocator->GetRecorder() != VMA_NULL)
13997  {
13998  allocator->GetRecorder()->RecordFlushAllocation(
13999  allocator->GetCurrentFrameIndex(),
14000  allocation, offset, size);
14001  }
14002 #endif
14003 }
14004 
14005 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
14006 {
14007  VMA_ASSERT(allocator && allocation);
14008 
14009  VMA_DEBUG_LOG("vmaInvalidateAllocation");
14010 
14011  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14012 
14013  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
14014 
14015 #if VMA_RECORDING_ENABLED
14016  if(allocator->GetRecorder() != VMA_NULL)
14017  {
14018  allocator->GetRecorder()->RecordInvalidateAllocation(
14019  allocator->GetCurrentFrameIndex(),
14020  allocation, offset, size);
14021  }
14022 #endif
14023 }
14024 
14025 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
14026 {
14027  VMA_ASSERT(allocator);
14028 
14029  VMA_DEBUG_LOG("vmaCheckCorruption");
14030 
14031  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14032 
14033  return allocator->CheckCorruption(memoryTypeBits);
14034 }
14035 
14036 VkResult vmaDefragment(
14037  VmaAllocator allocator,
14038  VmaAllocation* pAllocations,
14039  size_t allocationCount,
14040  VkBool32* pAllocationsChanged,
14041  const VmaDefragmentationInfo *pDefragmentationInfo,
14042  VmaDefragmentationStats* pDefragmentationStats)
14043 {
14044  VMA_ASSERT(allocator && pAllocations);
14045 
14046  VMA_DEBUG_LOG("vmaDefragment");
14047 
14048  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14049 
14050  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
14051 }
14052 
14053 VkResult vmaBindBufferMemory(
14054  VmaAllocator allocator,
14055  VmaAllocation allocation,
14056  VkBuffer buffer)
14057 {
14058  VMA_ASSERT(allocator && allocation && buffer);
14059 
14060  VMA_DEBUG_LOG("vmaBindBufferMemory");
14061 
14062  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14063 
14064  return allocator->BindBufferMemory(allocation, buffer);
14065 }
14066 
14067 VkResult vmaBindImageMemory(
14068  VmaAllocator allocator,
14069  VmaAllocation allocation,
14070  VkImage image)
14071 {
14072  VMA_ASSERT(allocator && allocation && image);
14073 
14074  VMA_DEBUG_LOG("vmaBindImageMemory");
14075 
14076  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14077 
14078  return allocator->BindImageMemory(allocation, image);
14079 }
14080 
14081 VkResult vmaCreateBuffer(
14082  VmaAllocator allocator,
14083  const VkBufferCreateInfo* pBufferCreateInfo,
14084  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14085  VkBuffer* pBuffer,
14086  VmaAllocation* pAllocation,
14087  VmaAllocationInfo* pAllocationInfo)
14088 {
14089  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14090 
14091  VMA_DEBUG_LOG("vmaCreateBuffer");
14092 
14093  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14094 
14095  *pBuffer = VK_NULL_HANDLE;
14096  *pAllocation = VK_NULL_HANDLE;
14097 
14098  // 1. Create VkBuffer.
14099  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14100  allocator->m_hDevice,
14101  pBufferCreateInfo,
14102  allocator->GetAllocationCallbacks(),
14103  pBuffer);
14104  if(res >= 0)
14105  {
14106  // 2. vkGetBufferMemoryRequirements.
14107  VkMemoryRequirements vkMemReq = {};
14108  bool requiresDedicatedAllocation = false;
14109  bool prefersDedicatedAllocation = false;
14110  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14111  requiresDedicatedAllocation, prefersDedicatedAllocation);
14112 
14113  // Make sure alignment requirements for specific buffer usages reported
14114  // in Physical Device Properties are included in alignment reported by memory requirements.
14115  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14116  {
14117  VMA_ASSERT(vkMemReq.alignment %
14118  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14119  }
14120  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14121  {
14122  VMA_ASSERT(vkMemReq.alignment %
14123  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14124  }
14125  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14126  {
14127  VMA_ASSERT(vkMemReq.alignment %
14128  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14129  }
14130 
14131  // 3. Allocate memory using allocator.
14132  res = allocator->AllocateMemory(
14133  vkMemReq,
14134  requiresDedicatedAllocation,
14135  prefersDedicatedAllocation,
14136  *pBuffer, // dedicatedBuffer
14137  VK_NULL_HANDLE, // dedicatedImage
14138  *pAllocationCreateInfo,
14139  VMA_SUBALLOCATION_TYPE_BUFFER,
14140  pAllocation);
14141 
14142 #if VMA_RECORDING_ENABLED
14143  if(allocator->GetRecorder() != VMA_NULL)
14144  {
14145  allocator->GetRecorder()->RecordCreateBuffer(
14146  allocator->GetCurrentFrameIndex(),
14147  *pBufferCreateInfo,
14148  *pAllocationCreateInfo,
14149  *pAllocation);
14150  }
14151 #endif
14152 
14153  if(res >= 0)
14154  {
14155  // 3. Bind buffer with memory.
14156  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14157  if(res >= 0)
14158  {
14159  // All steps succeeded.
14160  #if VMA_STATS_STRING_ENABLED
14161  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14162  #endif
14163  if(pAllocationInfo != VMA_NULL)
14164  {
14165  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14166  }
14167 
14168  return VK_SUCCESS;
14169  }
14170  allocator->FreeMemory(*pAllocation);
14171  *pAllocation = VK_NULL_HANDLE;
14172  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14173  *pBuffer = VK_NULL_HANDLE;
14174  return res;
14175  }
14176  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14177  *pBuffer = VK_NULL_HANDLE;
14178  return res;
14179  }
14180  return res;
14181 }
14182 
14183 void vmaDestroyBuffer(
14184  VmaAllocator allocator,
14185  VkBuffer buffer,
14186  VmaAllocation allocation)
14187 {
14188  VMA_ASSERT(allocator);
14189 
14190  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14191  {
14192  return;
14193  }
14194 
14195  VMA_DEBUG_LOG("vmaDestroyBuffer");
14196 
14197  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14198 
14199 #if VMA_RECORDING_ENABLED
14200  if(allocator->GetRecorder() != VMA_NULL)
14201  {
14202  allocator->GetRecorder()->RecordDestroyBuffer(
14203  allocator->GetCurrentFrameIndex(),
14204  allocation);
14205  }
14206 #endif
14207 
14208  if(buffer != VK_NULL_HANDLE)
14209  {
14210  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14211  }
14212 
14213  if(allocation != VK_NULL_HANDLE)
14214  {
14215  allocator->FreeMemory(allocation);
14216  }
14217 }
14218 
14219 VkResult vmaCreateImage(
14220  VmaAllocator allocator,
14221  const VkImageCreateInfo* pImageCreateInfo,
14222  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14223  VkImage* pImage,
14224  VmaAllocation* pAllocation,
14225  VmaAllocationInfo* pAllocationInfo)
14226 {
14227  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14228 
14229  VMA_DEBUG_LOG("vmaCreateImage");
14230 
14231  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14232 
14233  *pImage = VK_NULL_HANDLE;
14234  *pAllocation = VK_NULL_HANDLE;
14235 
14236  // 1. Create VkImage.
14237  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14238  allocator->m_hDevice,
14239  pImageCreateInfo,
14240  allocator->GetAllocationCallbacks(),
14241  pImage);
14242  if(res >= 0)
14243  {
14244  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14245  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14246  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14247 
14248  // 2. Allocate memory using allocator.
14249  VkMemoryRequirements vkMemReq = {};
14250  bool requiresDedicatedAllocation = false;
14251  bool prefersDedicatedAllocation = false;
14252  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14253  requiresDedicatedAllocation, prefersDedicatedAllocation);
14254 
14255  res = allocator->AllocateMemory(
14256  vkMemReq,
14257  requiresDedicatedAllocation,
14258  prefersDedicatedAllocation,
14259  VK_NULL_HANDLE, // dedicatedBuffer
14260  *pImage, // dedicatedImage
14261  *pAllocationCreateInfo,
14262  suballocType,
14263  pAllocation);
14264 
14265 #if VMA_RECORDING_ENABLED
14266  if(allocator->GetRecorder() != VMA_NULL)
14267  {
14268  allocator->GetRecorder()->RecordCreateImage(
14269  allocator->GetCurrentFrameIndex(),
14270  *pImageCreateInfo,
14271  *pAllocationCreateInfo,
14272  *pAllocation);
14273  }
14274 #endif
14275 
14276  if(res >= 0)
14277  {
14278  // 3. Bind image with memory.
14279  res = allocator->BindImageMemory(*pAllocation, *pImage);
14280  if(res >= 0)
14281  {
14282  // All steps succeeded.
14283  #if VMA_STATS_STRING_ENABLED
14284  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14285  #endif
14286  if(pAllocationInfo != VMA_NULL)
14287  {
14288  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14289  }
14290 
14291  return VK_SUCCESS;
14292  }
14293  allocator->FreeMemory(*pAllocation);
14294  *pAllocation = VK_NULL_HANDLE;
14295  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14296  *pImage = VK_NULL_HANDLE;
14297  return res;
14298  }
14299  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14300  *pImage = VK_NULL_HANDLE;
14301  return res;
14302  }
14303  return res;
14304 }
14305 
14306 void vmaDestroyImage(
14307  VmaAllocator allocator,
14308  VkImage image,
14309  VmaAllocation allocation)
14310 {
14311  VMA_ASSERT(allocator);
14312 
14313  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14314  {
14315  return;
14316  }
14317 
14318  VMA_DEBUG_LOG("vmaDestroyImage");
14319 
14320  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14321 
14322 #if VMA_RECORDING_ENABLED
14323  if(allocator->GetRecorder() != VMA_NULL)
14324  {
14325  allocator->GetRecorder()->RecordDestroyImage(
14326  allocator->GetCurrentFrameIndex(),
14327  allocation);
14328  }
14329 #endif
14330 
14331  if(image != VK_NULL_HANDLE)
14332  {
14333  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14334  }
14335  if(allocation != VK_NULL_HANDLE)
14336  {
14337  allocator->FreeMemory(allocation);
14338  }
14339 }
14340 
14341 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1567
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1868
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1624
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1598
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2190
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1579
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1825
Definition: vk_mem_alloc.h:1928
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1571
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2290
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1621
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2535
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2079
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1468
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2171
Definition: vk_mem_alloc.h:1905
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1560
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1978
Definition: vk_mem_alloc.h:1852
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1633
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2107
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1686
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1618
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1856
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1758
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1576
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1757
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2539
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1650
VmaStatInfo total
Definition: vk_mem_alloc.h:1767
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2547
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1962
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2530
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1577
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1502
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1627
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2121
Definition: vk_mem_alloc.h:2115
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1693
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2300
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1572
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1596
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1999
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2141
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2177
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1558
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2124
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1803
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2525
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2543
Definition: vk_mem_alloc.h:1842
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1986
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1575
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1763
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1508
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1529
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1600
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1534
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2545
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1973
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2187
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1568
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1746
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2136
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1521
Definition: vk_mem_alloc.h:2111
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1912
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1759
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1525
Definition: vk_mem_alloc.h:1936
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2127
Definition: vk_mem_alloc.h:1851
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1574
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1968
Definition: vk_mem_alloc.h:1959
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1749
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1570
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2149
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1636
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2180
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1957
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1992
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1674
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1765
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1892
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1758
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1581
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1606
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1523
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1580
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2163
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1573
Definition: vk_mem_alloc.h:1923
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1614
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2314
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1630
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1758
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1755
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2168
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1932
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2295
Definition: vk_mem_alloc.h:1943
Definition: vk_mem_alloc.h:1955
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2541
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1566
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1753
Definition: vk_mem_alloc.h:1808
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2117
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1603
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1751
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1578
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1582
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1879
Definition: vk_mem_alloc.h:1950
Definition: vk_mem_alloc.h:1835
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2309
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1556
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1569
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2096
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2276
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1940
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2061
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1759
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1590
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1766
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2174
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1759
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2281