Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1359 #include <vulkan/vulkan.h>
1360 
1361 #if !defined(VMA_DEDICATED_ALLOCATION)
1362  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1363  #define VMA_DEDICATED_ALLOCATION 1
1364  #else
1365  #define VMA_DEDICATED_ALLOCATION 0
1366  #endif
1367 #endif
1368 
1378 VK_DEFINE_HANDLE(VmaAllocator)
1379 
1380 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1382  VmaAllocator allocator,
1383  uint32_t memoryType,
1384  VkDeviceMemory memory,
1385  VkDeviceSize size);
1387 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1388  VmaAllocator allocator,
1389  uint32_t memoryType,
1390  VkDeviceMemory memory,
1391  VkDeviceSize size);
1392 
1406 
1436 
1439 typedef VkFlags VmaAllocatorCreateFlags;
1440 
1445 typedef struct VmaVulkanFunctions {
1446  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1447  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1448  PFN_vkAllocateMemory vkAllocateMemory;
1449  PFN_vkFreeMemory vkFreeMemory;
1450  PFN_vkMapMemory vkMapMemory;
1451  PFN_vkUnmapMemory vkUnmapMemory;
1452  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1453  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1454  PFN_vkBindBufferMemory vkBindBufferMemory;
1455  PFN_vkBindImageMemory vkBindImageMemory;
1456  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1457  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1458  PFN_vkCreateBuffer vkCreateBuffer;
1459  PFN_vkDestroyBuffer vkDestroyBuffer;
1460  PFN_vkCreateImage vkCreateImage;
1461  PFN_vkDestroyImage vkDestroyImage;
1462 #if VMA_DEDICATED_ALLOCATION
1463  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1464  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1465 #endif
1467 
1469 typedef enum VmaRecordFlagBits {
1476 
1479 typedef VkFlags VmaRecordFlags;
1480 
1481 /*
1482 Define this macro to 0/1 to disable/enable support for recording functionality,
1483 available through VmaAllocatorCreateInfo::pRecordSettings.
1484 */
1485 #ifndef VMA_RECORDING_ENABLED
1486  #ifdef _WIN32
1487  #define VMA_RECORDING_ENABLED 1
1488  #else
1489  #define VMA_RECORDING_ENABLED 0
1490  #endif
1491 #endif
1492 
1494 typedef struct VmaRecordSettings
1495 {
1505  const char* pFilePath;
1507 
1510 {
1514 
1515  VkPhysicalDevice physicalDevice;
1517 
1518  VkDevice device;
1520 
1523 
1524  const VkAllocationCallbacks* pAllocationCallbacks;
1526 
1565  const VkDeviceSize* pHeapSizeLimit;
1586 
1588 VkResult vmaCreateAllocator(
1589  const VmaAllocatorCreateInfo* pCreateInfo,
1590  VmaAllocator* pAllocator);
1591 
1593 void vmaDestroyAllocator(
1594  VmaAllocator allocator);
1595 
1601  VmaAllocator allocator,
1602  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1603 
1609  VmaAllocator allocator,
1610  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1611 
1619  VmaAllocator allocator,
1620  uint32_t memoryTypeIndex,
1621  VkMemoryPropertyFlags* pFlags);
1622 
1632  VmaAllocator allocator,
1633  uint32_t frameIndex);
1634 
1637 typedef struct VmaStatInfo
1638 {
1640  uint32_t blockCount;
1646  VkDeviceSize usedBytes;
1648  VkDeviceSize unusedBytes;
1651 } VmaStatInfo;
1652 
1654 typedef struct VmaStats
1655 {
1656  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1657  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1659 } VmaStats;
1660 
1662 void vmaCalculateStats(
1663  VmaAllocator allocator,
1664  VmaStats* pStats);
1665 
1666 #define VMA_STATS_STRING_ENABLED 1
1667 
1668 #if VMA_STATS_STRING_ENABLED
1669 
1671 
1673 void vmaBuildStatsString(
1674  VmaAllocator allocator,
1675  char** ppStatsString,
1676  VkBool32 detailedMap);
1677 
1678 void vmaFreeStatsString(
1679  VmaAllocator allocator,
1680  char* pStatsString);
1681 
1682 #endif // #if VMA_STATS_STRING_ENABLED
1683 
1692 VK_DEFINE_HANDLE(VmaPool)
1693 
1694 typedef enum VmaMemoryUsage
1695 {
1744 } VmaMemoryUsage;
1745 
1760 
1815 
1828 
1838 
1845 
1849 
1851 {
1864  VkMemoryPropertyFlags requiredFlags;
1869  VkMemoryPropertyFlags preferredFlags;
1877  uint32_t memoryTypeBits;
1890  void* pUserData;
1892 
1909 VkResult vmaFindMemoryTypeIndex(
1910  VmaAllocator allocator,
1911  uint32_t memoryTypeBits,
1912  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1913  uint32_t* pMemoryTypeIndex);
1914 
1928  VmaAllocator allocator,
1929  const VkBufferCreateInfo* pBufferCreateInfo,
1930  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1931  uint32_t* pMemoryTypeIndex);
1932 
1946  VmaAllocator allocator,
1947  const VkImageCreateInfo* pImageCreateInfo,
1948  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1949  uint32_t* pMemoryTypeIndex);
1950 
1971 
1986 
1989 
1995 
1998 typedef VkFlags VmaPoolCreateFlags;
1999 
2002 typedef struct VmaPoolCreateInfo {
2017  VkDeviceSize blockSize;
2046 
2049 typedef struct VmaPoolStats {
2052  VkDeviceSize size;
2055  VkDeviceSize unusedSize;
2068  VkDeviceSize unusedRangeSizeMax;
2071  size_t blockCount;
2072 } VmaPoolStats;
2073 
2080 VkResult vmaCreatePool(
2081  VmaAllocator allocator,
2082  const VmaPoolCreateInfo* pCreateInfo,
2083  VmaPool* pPool);
2084 
2087 void vmaDestroyPool(
2088  VmaAllocator allocator,
2089  VmaPool pool);
2090 
2097 void vmaGetPoolStats(
2098  VmaAllocator allocator,
2099  VmaPool pool,
2100  VmaPoolStats* pPoolStats);
2101 
2109  VmaAllocator allocator,
2110  VmaPool pool,
2111  size_t* pLostAllocationCount);
2112 
2127 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2128 
2153 VK_DEFINE_HANDLE(VmaAllocation)
2154 
2155 
2157 typedef struct VmaAllocationInfo {
2162  uint32_t memoryType;
2171  VkDeviceMemory deviceMemory;
2176  VkDeviceSize offset;
2181  VkDeviceSize size;
2195  void* pUserData;
2197 
2208 VkResult vmaAllocateMemory(
2209  VmaAllocator allocator,
2210  const VkMemoryRequirements* pVkMemoryRequirements,
2211  const VmaAllocationCreateInfo* pCreateInfo,
2212  VmaAllocation* pAllocation,
2213  VmaAllocationInfo* pAllocationInfo);
2214 
2222  VmaAllocator allocator,
2223  VkBuffer buffer,
2224  const VmaAllocationCreateInfo* pCreateInfo,
2225  VmaAllocation* pAllocation,
2226  VmaAllocationInfo* pAllocationInfo);
2227 
2229 VkResult vmaAllocateMemoryForImage(
2230  VmaAllocator allocator,
2231  VkImage image,
2232  const VmaAllocationCreateInfo* pCreateInfo,
2233  VmaAllocation* pAllocation,
2234  VmaAllocationInfo* pAllocationInfo);
2235 
2237 void vmaFreeMemory(
2238  VmaAllocator allocator,
2239  VmaAllocation allocation);
2240 
2258  VmaAllocator allocator,
2259  VmaAllocation allocation,
2260  VmaAllocationInfo* pAllocationInfo);
2261 
2276 VkBool32 vmaTouchAllocation(
2277  VmaAllocator allocator,
2278  VmaAllocation allocation);
2279 
2294  VmaAllocator allocator,
2295  VmaAllocation allocation,
2296  void* pUserData);
2297 
2309  VmaAllocator allocator,
2310  VmaAllocation* pAllocation);
2311 
2346 VkResult vmaMapMemory(
2347  VmaAllocator allocator,
2348  VmaAllocation allocation,
2349  void** ppData);
2350 
2355 void vmaUnmapMemory(
2356  VmaAllocator allocator,
2357  VmaAllocation allocation);
2358 
2371 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2372 
2385 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2386 
2403 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2404 
2406 typedef struct VmaDefragmentationInfo {
2411  VkDeviceSize maxBytesToMove;
2418 
2420 typedef struct VmaDefragmentationStats {
2422  VkDeviceSize bytesMoved;
2424  VkDeviceSize bytesFreed;
2430 
2517 VkResult vmaDefragment(
2518  VmaAllocator allocator,
2519  VmaAllocation* pAllocations,
2520  size_t allocationCount,
2521  VkBool32* pAllocationsChanged,
2522  const VmaDefragmentationInfo *pDefragmentationInfo,
2523  VmaDefragmentationStats* pDefragmentationStats);
2524 
2537 VkResult vmaBindBufferMemory(
2538  VmaAllocator allocator,
2539  VmaAllocation allocation,
2540  VkBuffer buffer);
2541 
2554 VkResult vmaBindImageMemory(
2555  VmaAllocator allocator,
2556  VmaAllocation allocation,
2557  VkImage image);
2558 
2585 VkResult vmaCreateBuffer(
2586  VmaAllocator allocator,
2587  const VkBufferCreateInfo* pBufferCreateInfo,
2588  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2589  VkBuffer* pBuffer,
2590  VmaAllocation* pAllocation,
2591  VmaAllocationInfo* pAllocationInfo);
2592 
2604 void vmaDestroyBuffer(
2605  VmaAllocator allocator,
2606  VkBuffer buffer,
2607  VmaAllocation allocation);
2608 
2610 VkResult vmaCreateImage(
2611  VmaAllocator allocator,
2612  const VkImageCreateInfo* pImageCreateInfo,
2613  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2614  VkImage* pImage,
2615  VmaAllocation* pAllocation,
2616  VmaAllocationInfo* pAllocationInfo);
2617 
2629 void vmaDestroyImage(
2630  VmaAllocator allocator,
2631  VkImage image,
2632  VmaAllocation allocation);
2633 
2634 #ifdef __cplusplus
2635 }
2636 #endif
2637 
2638 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2639 
2640 // For Visual Studio IntelliSense.
2641 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2642 #define VMA_IMPLEMENTATION
2643 #endif
2644 
2645 #ifdef VMA_IMPLEMENTATION
2646 #undef VMA_IMPLEMENTATION
2647 
2648 #include <cstdint>
2649 #include <cstdlib>
2650 #include <cstring>
2651 
2652 /*******************************************************************************
2653 CONFIGURATION SECTION
2654 
2655 Define some of these macros before each #include of this header or change them
2656 here if you need other then default behavior depending on your environment.
2657 */
2658 
2659 /*
2660 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2661 internally, like:
2662 
2663  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2664 
2665 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2666 VmaAllocatorCreateInfo::pVulkanFunctions.
2667 */
2668 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2669 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2670 #endif
2671 
2672 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2673 //#define VMA_USE_STL_CONTAINERS 1
2674 
2675 /* Set this macro to 1 to make the library including and using STL containers:
2676 std::pair, std::vector, std::list, std::unordered_map.
2677 
2678 Set it to 0 or undefined to make the library using its own implementation of
2679 the containers.
2680 */
2681 #if VMA_USE_STL_CONTAINERS
2682  #define VMA_USE_STL_VECTOR 1
2683  #define VMA_USE_STL_UNORDERED_MAP 1
2684  #define VMA_USE_STL_LIST 1
2685 #endif
2686 
2687 #if VMA_USE_STL_VECTOR
2688  #include <vector>
2689 #endif
2690 
2691 #if VMA_USE_STL_UNORDERED_MAP
2692  #include <unordered_map>
2693 #endif
2694 
2695 #if VMA_USE_STL_LIST
2696  #include <list>
2697 #endif
2698 
2699 /*
2700 Following headers are used in this CONFIGURATION section only, so feel free to
2701 remove them if not needed.
2702 */
2703 #include <cassert> // for assert
2704 #include <algorithm> // for min, max
2705 #include <mutex> // for std::mutex
2706 #include <atomic> // for std::atomic
2707 
2708 #ifndef VMA_NULL
2709  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2710  #define VMA_NULL nullptr
2711 #endif
2712 
2713 #if defined(__APPLE__) || defined(__ANDROID__)
2714 #include <cstdlib>
2715 void *aligned_alloc(size_t alignment, size_t size)
2716 {
2717  // alignment must be >= sizeof(void*)
2718  if(alignment < sizeof(void*))
2719  {
2720  alignment = sizeof(void*);
2721  }
2722 
2723  void *pointer;
2724  if(posix_memalign(&pointer, alignment, size) == 0)
2725  return pointer;
2726  return VMA_NULL;
2727 }
2728 #endif
2729 
2730 // If your compiler is not compatible with C++11 and definition of
2731 // aligned_alloc() function is missing, uncommeting following line may help:
2732 
2733 //#include <malloc.h>
2734 
2735 // Normal assert to check for programmer's errors, especially in Debug configuration.
2736 #ifndef VMA_ASSERT
2737  #ifdef _DEBUG
2738  #define VMA_ASSERT(expr) assert(expr)
2739  #else
2740  #define VMA_ASSERT(expr)
2741  #endif
2742 #endif
2743 
2744 // Assert that will be called very often, like inside data structures e.g. operator[].
2745 // Making it non-empty can make program slow.
2746 #ifndef VMA_HEAVY_ASSERT
2747  #ifdef _DEBUG
2748  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2749  #else
2750  #define VMA_HEAVY_ASSERT(expr)
2751  #endif
2752 #endif
2753 
2754 #ifndef VMA_ALIGN_OF
2755  #define VMA_ALIGN_OF(type) (__alignof(type))
2756 #endif
2757 
2758 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2759  #if defined(_WIN32)
2760  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2761  #else
2762  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2763  #endif
2764 #endif
2765 
2766 #ifndef VMA_SYSTEM_FREE
2767  #if defined(_WIN32)
2768  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2769  #else
2770  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2771  #endif
2772 #endif
2773 
2774 #ifndef VMA_MIN
2775  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2776 #endif
2777 
2778 #ifndef VMA_MAX
2779  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2780 #endif
2781 
2782 #ifndef VMA_SWAP
2783  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2784 #endif
2785 
2786 #ifndef VMA_SORT
2787  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2788 #endif
2789 
2790 #ifndef VMA_DEBUG_LOG
2791  #define VMA_DEBUG_LOG(format, ...)
2792  /*
2793  #define VMA_DEBUG_LOG(format, ...) do { \
2794  printf(format, __VA_ARGS__); \
2795  printf("\n"); \
2796  } while(false)
2797  */
2798 #endif
2799 
2800 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2801 #if VMA_STATS_STRING_ENABLED
2802  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2803  {
2804  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2805  }
2806  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2807  {
2808  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2809  }
2810  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2811  {
2812  snprintf(outStr, strLen, "%p", ptr);
2813  }
2814 #endif
2815 
2816 #ifndef VMA_MUTEX
2817  class VmaMutex
2818  {
2819  public:
2820  VmaMutex() { }
2821  ~VmaMutex() { }
2822  void Lock() { m_Mutex.lock(); }
2823  void Unlock() { m_Mutex.unlock(); }
2824  private:
2825  std::mutex m_Mutex;
2826  };
2827  #define VMA_MUTEX VmaMutex
2828 #endif
2829 
2830 /*
2831 If providing your own implementation, you need to implement a subset of std::atomic:
2832 
2833 - Constructor(uint32_t desired)
2834 - uint32_t load() const
2835 - void store(uint32_t desired)
2836 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2837 */
2838 #ifndef VMA_ATOMIC_UINT32
2839  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2840 #endif
2841 
2842 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2843 
2847  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2848 #endif
2849 
2850 #ifndef VMA_DEBUG_ALIGNMENT
2851 
2855  #define VMA_DEBUG_ALIGNMENT (1)
2856 #endif
2857 
2858 #ifndef VMA_DEBUG_MARGIN
2859 
2863  #define VMA_DEBUG_MARGIN (0)
2864 #endif
2865 
2866 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2867 
2871  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2872 #endif
2873 
2874 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2875 
2880  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2881 #endif
2882 
2883 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2884 
2888  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2889 #endif
2890 
2891 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2892 
2896  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2897 #endif
2898 
2899 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2900  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2902 #endif
2903 
2904 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2905  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2907 #endif
2908 
2909 #ifndef VMA_CLASS_NO_COPY
2910  #define VMA_CLASS_NO_COPY(className) \
2911  private: \
2912  className(const className&) = delete; \
2913  className& operator=(const className&) = delete;
2914 #endif
2915 
2916 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2917 
2918 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2919 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2920 
2921 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2922 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2923 
2924 /*******************************************************************************
2925 END OF CONFIGURATION
2926 */
2927 
2928 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2929  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2930 
2931 // Returns number of bits set to 1 in (v).
2932 static inline uint32_t VmaCountBitsSet(uint32_t v)
2933 {
2934  uint32_t c = v - ((v >> 1) & 0x55555555);
2935  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2936  c = ((c >> 4) + c) & 0x0F0F0F0F;
2937  c = ((c >> 8) + c) & 0x00FF00FF;
2938  c = ((c >> 16) + c) & 0x0000FFFF;
2939  return c;
2940 }
2941 
2942 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2943 // Use types like uint32_t, uint64_t as T.
2944 template <typename T>
2945 static inline T VmaAlignUp(T val, T align)
2946 {
2947  return (val + align - 1) / align * align;
2948 }
2949 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2950 // Use types like uint32_t, uint64_t as T.
2951 template <typename T>
2952 static inline T VmaAlignDown(T val, T align)
2953 {
2954  return val / align * align;
2955 }
2956 
2957 // Division with mathematical rounding to nearest number.
2958 template <typename T>
2959 static inline T VmaRoundDiv(T x, T y)
2960 {
2961  return (x + (y / (T)2)) / y;
2962 }
2963 
2964 /*
2965 Returns true if given number is a power of two.
2966 T must be unsigned integer number or signed integer but always nonnegative.
2967 For 0 returns true.
2968 */
2969 template <typename T>
2970 inline bool VmaIsPow2(T x)
2971 {
2972  return (x & (x-1)) == 0;
2973 }
2974 
2975 // Returns smallest power of 2 greater or equal to v.
2976 static inline uint32_t VmaNextPow2(uint32_t v)
2977 {
2978  v--;
2979  v |= v >> 1;
2980  v |= v >> 2;
2981  v |= v >> 4;
2982  v |= v >> 8;
2983  v |= v >> 16;
2984  v++;
2985  return v;
2986 }
2987 static inline uint64_t VmaNextPow2(uint64_t v)
2988 {
2989  v--;
2990  v |= v >> 1;
2991  v |= v >> 2;
2992  v |= v >> 4;
2993  v |= v >> 8;
2994  v |= v >> 16;
2995  v |= v >> 32;
2996  v++;
2997  return v;
2998 }
2999 
3000 static inline bool VmaStrIsEmpty(const char* pStr)
3001 {
3002  return pStr == VMA_NULL || *pStr == '\0';
3003 }
3004 
3005 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3006 {
3007  switch(algorithm)
3008  {
3010  return "Linear";
3012  return "Buddy";
3013  case 0:
3014  return "Default";
3015  default:
3016  VMA_ASSERT(0);
3017  return "";
3018  }
3019 }
3020 
3021 #ifndef VMA_SORT
3022 
3023 template<typename Iterator, typename Compare>
3024 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3025 {
3026  Iterator centerValue = end; --centerValue;
3027  Iterator insertIndex = beg;
3028  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3029  {
3030  if(cmp(*memTypeIndex, *centerValue))
3031  {
3032  if(insertIndex != memTypeIndex)
3033  {
3034  VMA_SWAP(*memTypeIndex, *insertIndex);
3035  }
3036  ++insertIndex;
3037  }
3038  }
3039  if(insertIndex != centerValue)
3040  {
3041  VMA_SWAP(*insertIndex, *centerValue);
3042  }
3043  return insertIndex;
3044 }
3045 
3046 template<typename Iterator, typename Compare>
3047 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3048 {
3049  if(beg < end)
3050  {
3051  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3052  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3053  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3054  }
3055 }
3056 
3057 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3058 
3059 #endif // #ifndef VMA_SORT
3060 
3061 /*
3062 Returns true if two memory blocks occupy overlapping pages.
3063 ResourceA must be in less memory offset than ResourceB.
3064 
3065 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3066 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3067 */
3068 static inline bool VmaBlocksOnSamePage(
3069  VkDeviceSize resourceAOffset,
3070  VkDeviceSize resourceASize,
3071  VkDeviceSize resourceBOffset,
3072  VkDeviceSize pageSize)
3073 {
3074  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3075  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3076  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3077  VkDeviceSize resourceBStart = resourceBOffset;
3078  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3079  return resourceAEndPage == resourceBStartPage;
3080 }
3081 
3082 enum VmaSuballocationType
3083 {
3084  VMA_SUBALLOCATION_TYPE_FREE = 0,
3085  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3086  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3087  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3088  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3089  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3090  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3091 };
3092 
3093 /*
3094 Returns true if given suballocation types could conflict and must respect
3095 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3096 or linear image and another one is optimal image. If type is unknown, behave
3097 conservatively.
3098 */
3099 static inline bool VmaIsBufferImageGranularityConflict(
3100  VmaSuballocationType suballocType1,
3101  VmaSuballocationType suballocType2)
3102 {
3103  if(suballocType1 > suballocType2)
3104  {
3105  VMA_SWAP(suballocType1, suballocType2);
3106  }
3107 
3108  switch(suballocType1)
3109  {
3110  case VMA_SUBALLOCATION_TYPE_FREE:
3111  return false;
3112  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3113  return true;
3114  case VMA_SUBALLOCATION_TYPE_BUFFER:
3115  return
3116  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3117  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3118  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3119  return
3120  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3121  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3122  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3123  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3124  return
3125  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3126  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3127  return false;
3128  default:
3129  VMA_ASSERT(0);
3130  return true;
3131  }
3132 }
3133 
3134 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3135 {
3136  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3137  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3138  // This condition is to silence clang compiler error: "comparison of unsigned expression < 0 is always false"
3139  if(numberCount > 0)
3140  {
3141  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3142  {
3143  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3144  }
3145  }
3146 }
3147 
3148 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3149 {
3150  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3151  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3152  // This condition is to silence clang compiler error: "comparison of unsigned expression < 0 is always false"
3153  if(numberCount > 0)
3154  {
3155  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3156  {
3157  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3158  {
3159  return false;
3160  }
3161  }
3162  }
3163  return true;
3164 }
3165 
3166 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3167 struct VmaMutexLock
3168 {
3169  VMA_CLASS_NO_COPY(VmaMutexLock)
3170 public:
3171  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3172  m_pMutex(useMutex ? &mutex : VMA_NULL)
3173  {
3174  if(m_pMutex)
3175  {
3176  m_pMutex->Lock();
3177  }
3178  }
3179 
3180  ~VmaMutexLock()
3181  {
3182  if(m_pMutex)
3183  {
3184  m_pMutex->Unlock();
3185  }
3186  }
3187 
3188 private:
3189  VMA_MUTEX* m_pMutex;
3190 };
3191 
3192 #if VMA_DEBUG_GLOBAL_MUTEX
3193  static VMA_MUTEX gDebugGlobalMutex;
3194  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3195 #else
3196  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3197 #endif
3198 
3199 // Minimum size of a free suballocation to register it in the free suballocation collection.
3200 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3201 
3202 /*
3203 Performs binary search and returns iterator to first element that is greater or
3204 equal to (key), according to comparison (cmp).
3205 
3206 Cmp should return true if first argument is less than second argument.
3207 
3208 Returned value is the found element, if present in the collection or place where
3209 new element with value (key) should be inserted.
3210 */
3211 template <typename CmpLess, typename IterT, typename KeyT>
3212 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3213 {
3214  size_t down = 0, up = (end - beg);
3215  while(down < up)
3216  {
3217  const size_t mid = (down + up) / 2;
3218  if(cmp(*(beg+mid), key))
3219  {
3220  down = mid + 1;
3221  }
3222  else
3223  {
3224  up = mid;
3225  }
3226  }
3227  return beg + down;
3228 }
3229 
3231 // Memory allocation
3232 
3233 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3234 {
3235  if((pAllocationCallbacks != VMA_NULL) &&
3236  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3237  {
3238  return (*pAllocationCallbacks->pfnAllocation)(
3239  pAllocationCallbacks->pUserData,
3240  size,
3241  alignment,
3242  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3243  }
3244  else
3245  {
3246  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3247  }
3248 }
3249 
3250 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3251 {
3252  if((pAllocationCallbacks != VMA_NULL) &&
3253  (pAllocationCallbacks->pfnFree != VMA_NULL))
3254  {
3255  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3256  }
3257  else
3258  {
3259  VMA_SYSTEM_FREE(ptr);
3260  }
3261 }
3262 
3263 template<typename T>
3264 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3265 {
3266  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3267 }
3268 
3269 template<typename T>
3270 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3271 {
3272  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3273 }
3274 
3275 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3276 
3277 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3278 
3279 template<typename T>
3280 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3281 {
3282  ptr->~T();
3283  VmaFree(pAllocationCallbacks, ptr);
3284 }
3285 
3286 template<typename T>
3287 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3288 {
3289  if(ptr != VMA_NULL)
3290  {
3291  for(size_t i = count; i--; )
3292  {
3293  ptr[i].~T();
3294  }
3295  VmaFree(pAllocationCallbacks, ptr);
3296  }
3297 }
3298 
3299 // STL-compatible allocator.
3300 template<typename T>
3301 class VmaStlAllocator
3302 {
3303 public:
3304  const VkAllocationCallbacks* const m_pCallbacks;
3305  typedef T value_type;
3306 
3307  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3308  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3309 
3310  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3311  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3312 
3313  template<typename U>
3314  bool operator==(const VmaStlAllocator<U>& rhs) const
3315  {
3316  return m_pCallbacks == rhs.m_pCallbacks;
3317  }
3318  template<typename U>
3319  bool operator!=(const VmaStlAllocator<U>& rhs) const
3320  {
3321  return m_pCallbacks != rhs.m_pCallbacks;
3322  }
3323 
3324  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3325 };
3326 
3327 #if VMA_USE_STL_VECTOR
3328 
3329 #define VmaVector std::vector
3330 
3331 template<typename T, typename allocatorT>
3332 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3333 {
3334  vec.insert(vec.begin() + index, item);
3335 }
3336 
3337 template<typename T, typename allocatorT>
3338 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3339 {
3340  vec.erase(vec.begin() + index);
3341 }
3342 
3343 #else // #if VMA_USE_STL_VECTOR
3344 
3345 /* Class with interface compatible with subset of std::vector.
3346 T must be POD because constructors and destructors are not called and memcpy is
3347 used for these objects. */
3348 template<typename T, typename AllocatorT>
3349 class VmaVector
3350 {
3351 public:
3352  typedef T value_type;
3353 
3354  VmaVector(const AllocatorT& allocator) :
3355  m_Allocator(allocator),
3356  m_pArray(VMA_NULL),
3357  m_Count(0),
3358  m_Capacity(0)
3359  {
3360  }
3361 
3362  VmaVector(size_t count, const AllocatorT& allocator) :
3363  m_Allocator(allocator),
3364  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3365  m_Count(count),
3366  m_Capacity(count)
3367  {
3368  }
3369 
3370  VmaVector(const VmaVector<T, AllocatorT>& src) :
3371  m_Allocator(src.m_Allocator),
3372  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3373  m_Count(src.m_Count),
3374  m_Capacity(src.m_Count)
3375  {
3376  if(m_Count != 0)
3377  {
3378  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3379  }
3380  }
3381 
3382  ~VmaVector()
3383  {
3384  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3385  }
3386 
3387  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3388  {
3389  if(&rhs != this)
3390  {
3391  resize(rhs.m_Count);
3392  if(m_Count != 0)
3393  {
3394  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3395  }
3396  }
3397  return *this;
3398  }
3399 
3400  bool empty() const { return m_Count == 0; }
3401  size_t size() const { return m_Count; }
3402  T* data() { return m_pArray; }
3403  const T* data() const { return m_pArray; }
3404 
3405  T& operator[](size_t index)
3406  {
3407  VMA_HEAVY_ASSERT(index < m_Count);
3408  return m_pArray[index];
3409  }
3410  const T& operator[](size_t index) const
3411  {
3412  VMA_HEAVY_ASSERT(index < m_Count);
3413  return m_pArray[index];
3414  }
3415 
3416  T& front()
3417  {
3418  VMA_HEAVY_ASSERT(m_Count > 0);
3419  return m_pArray[0];
3420  }
3421  const T& front() const
3422  {
3423  VMA_HEAVY_ASSERT(m_Count > 0);
3424  return m_pArray[0];
3425  }
3426  T& back()
3427  {
3428  VMA_HEAVY_ASSERT(m_Count > 0);
3429  return m_pArray[m_Count - 1];
3430  }
3431  const T& back() const
3432  {
3433  VMA_HEAVY_ASSERT(m_Count > 0);
3434  return m_pArray[m_Count - 1];
3435  }
3436 
3437  void reserve(size_t newCapacity, bool freeMemory = false)
3438  {
3439  newCapacity = VMA_MAX(newCapacity, m_Count);
3440 
3441  if((newCapacity < m_Capacity) && !freeMemory)
3442  {
3443  newCapacity = m_Capacity;
3444  }
3445 
3446  if(newCapacity != m_Capacity)
3447  {
3448  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3449  if(m_Count != 0)
3450  {
3451  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3452  }
3453  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3454  m_Capacity = newCapacity;
3455  m_pArray = newArray;
3456  }
3457  }
3458 
3459  void resize(size_t newCount, bool freeMemory = false)
3460  {
3461  size_t newCapacity = m_Capacity;
3462  if(newCount > m_Capacity)
3463  {
3464  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3465  }
3466  else if(freeMemory)
3467  {
3468  newCapacity = newCount;
3469  }
3470 
3471  if(newCapacity != m_Capacity)
3472  {
3473  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3474  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3475  if(elementsToCopy != 0)
3476  {
3477  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3478  }
3479  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3480  m_Capacity = newCapacity;
3481  m_pArray = newArray;
3482  }
3483 
3484  m_Count = newCount;
3485  }
3486 
3487  void clear(bool freeMemory = false)
3488  {
3489  resize(0, freeMemory);
3490  }
3491 
3492  void insert(size_t index, const T& src)
3493  {
3494  VMA_HEAVY_ASSERT(index <= m_Count);
3495  const size_t oldCount = size();
3496  resize(oldCount + 1);
3497  if(index < oldCount)
3498  {
3499  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3500  }
3501  m_pArray[index] = src;
3502  }
3503 
3504  void remove(size_t index)
3505  {
3506  VMA_HEAVY_ASSERT(index < m_Count);
3507  const size_t oldCount = size();
3508  if(index < oldCount - 1)
3509  {
3510  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3511  }
3512  resize(oldCount - 1);
3513  }
3514 
3515  void push_back(const T& src)
3516  {
3517  const size_t newIndex = size();
3518  resize(newIndex + 1);
3519  m_pArray[newIndex] = src;
3520  }
3521 
3522  void pop_back()
3523  {
3524  VMA_HEAVY_ASSERT(m_Count > 0);
3525  resize(size() - 1);
3526  }
3527 
3528  void push_front(const T& src)
3529  {
3530  insert(0, src);
3531  }
3532 
3533  void pop_front()
3534  {
3535  VMA_HEAVY_ASSERT(m_Count > 0);
3536  remove(0);
3537  }
3538 
3539  typedef T* iterator;
3540 
3541  iterator begin() { return m_pArray; }
3542  iterator end() { return m_pArray + m_Count; }
3543 
3544 private:
3545  AllocatorT m_Allocator;
3546  T* m_pArray;
3547  size_t m_Count;
3548  size_t m_Capacity;
3549 };
3550 
3551 template<typename T, typename allocatorT>
3552 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3553 {
3554  vec.insert(index, item);
3555 }
3556 
3557 template<typename T, typename allocatorT>
3558 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3559 {
3560  vec.remove(index);
3561 }
3562 
3563 #endif // #if VMA_USE_STL_VECTOR
3564 
3565 template<typename CmpLess, typename VectorT>
3566 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3567 {
3568  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3569  vector.data(),
3570  vector.data() + vector.size(),
3571  value,
3572  CmpLess()) - vector.data();
3573  VmaVectorInsert(vector, indexToInsert, value);
3574  return indexToInsert;
3575 }
3576 
3577 template<typename CmpLess, typename VectorT>
3578 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3579 {
3580  CmpLess comparator;
3581  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3582  vector.begin(),
3583  vector.end(),
3584  value,
3585  comparator);
3586  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3587  {
3588  size_t indexToRemove = it - vector.begin();
3589  VmaVectorRemove(vector, indexToRemove);
3590  return true;
3591  }
3592  return false;
3593 }
3594 
3595 template<typename CmpLess, typename IterT, typename KeyT>
3596 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3597 {
3598  CmpLess comparator;
3599  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3600  beg, end, value, comparator);
3601  if(it == end ||
3602  (!comparator(*it, value) && !comparator(value, *it)))
3603  {
3604  return it;
3605  }
3606  return end;
3607 }
3608 
3610 // class VmaPoolAllocator
3611 
3612 /*
3613 Allocator for objects of type T using a list of arrays (pools) to speed up
3614 allocation. Number of elements that can be allocated is not bounded because
3615 allocator can create multiple blocks.
3616 */
3617 template<typename T>
3618 class VmaPoolAllocator
3619 {
3620  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3621 public:
3622  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3623  ~VmaPoolAllocator();
3624  void Clear();
3625  T* Alloc();
3626  void Free(T* ptr);
3627 
3628 private:
3629  union Item
3630  {
3631  uint32_t NextFreeIndex;
3632  T Value;
3633  };
3634 
3635  struct ItemBlock
3636  {
3637  Item* pItems;
3638  uint32_t FirstFreeIndex;
3639  };
3640 
3641  const VkAllocationCallbacks* m_pAllocationCallbacks;
3642  size_t m_ItemsPerBlock;
3643  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3644 
3645  ItemBlock& CreateNewBlock();
3646 };
3647 
3648 template<typename T>
3649 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3650  m_pAllocationCallbacks(pAllocationCallbacks),
3651  m_ItemsPerBlock(itemsPerBlock),
3652  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3653 {
3654  VMA_ASSERT(itemsPerBlock > 0);
3655 }
3656 
3657 template<typename T>
3658 VmaPoolAllocator<T>::~VmaPoolAllocator()
3659 {
3660  Clear();
3661 }
3662 
3663 template<typename T>
3664 void VmaPoolAllocator<T>::Clear()
3665 {
3666  for(size_t i = m_ItemBlocks.size(); i--; )
3667  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3668  m_ItemBlocks.clear();
3669 }
3670 
3671 template<typename T>
3672 T* VmaPoolAllocator<T>::Alloc()
3673 {
3674  for(size_t i = m_ItemBlocks.size(); i--; )
3675  {
3676  ItemBlock& block = m_ItemBlocks[i];
3677  // This block has some free items: Use first one.
3678  if(block.FirstFreeIndex != UINT32_MAX)
3679  {
3680  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3681  block.FirstFreeIndex = pItem->NextFreeIndex;
3682  return &pItem->Value;
3683  }
3684  }
3685 
3686  // No block has free item: Create new one and use it.
3687  ItemBlock& newBlock = CreateNewBlock();
3688  Item* const pItem = &newBlock.pItems[0];
3689  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3690  return &pItem->Value;
3691 }
3692 
3693 template<typename T>
3694 void VmaPoolAllocator<T>::Free(T* ptr)
3695 {
3696  // Search all memory blocks to find ptr.
3697  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3698  {
3699  ItemBlock& block = m_ItemBlocks[i];
3700 
3701  // Casting to union.
3702  Item* pItemPtr;
3703  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3704 
3705  // Check if pItemPtr is in address range of this block.
3706  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3707  {
3708  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3709  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3710  block.FirstFreeIndex = index;
3711  return;
3712  }
3713  }
3714  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3715 }
3716 
3717 template<typename T>
3718 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3719 {
3720  ItemBlock newBlock = {
3721  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3722 
3723  m_ItemBlocks.push_back(newBlock);
3724 
3725  // Setup singly-linked list of all free items in this block.
3726  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3727  newBlock.pItems[i].NextFreeIndex = i + 1;
3728  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3729  return m_ItemBlocks.back();
3730 }
3731 
3733 // class VmaRawList, VmaList
3734 
3735 #if VMA_USE_STL_LIST
3736 
3737 #define VmaList std::list
3738 
3739 #else // #if VMA_USE_STL_LIST
3740 
3741 template<typename T>
3742 struct VmaListItem
3743 {
3744  VmaListItem* pPrev;
3745  VmaListItem* pNext;
3746  T Value;
3747 };
3748 
3749 // Doubly linked list.
3750 template<typename T>
3751 class VmaRawList
3752 {
3753  VMA_CLASS_NO_COPY(VmaRawList)
3754 public:
3755  typedef VmaListItem<T> ItemType;
3756 
3757  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3758  ~VmaRawList();
3759  void Clear();
3760 
3761  size_t GetCount() const { return m_Count; }
3762  bool IsEmpty() const { return m_Count == 0; }
3763 
3764  ItemType* Front() { return m_pFront; }
3765  const ItemType* Front() const { return m_pFront; }
3766  ItemType* Back() { return m_pBack; }
3767  const ItemType* Back() const { return m_pBack; }
3768 
3769  ItemType* PushBack();
3770  ItemType* PushFront();
3771  ItemType* PushBack(const T& value);
3772  ItemType* PushFront(const T& value);
3773  void PopBack();
3774  void PopFront();
3775 
3776  // Item can be null - it means PushBack.
3777  ItemType* InsertBefore(ItemType* pItem);
3778  // Item can be null - it means PushFront.
3779  ItemType* InsertAfter(ItemType* pItem);
3780 
3781  ItemType* InsertBefore(ItemType* pItem, const T& value);
3782  ItemType* InsertAfter(ItemType* pItem, const T& value);
3783 
3784  void Remove(ItemType* pItem);
3785 
3786 private:
3787  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3788  VmaPoolAllocator<ItemType> m_ItemAllocator;
3789  ItemType* m_pFront;
3790  ItemType* m_pBack;
3791  size_t m_Count;
3792 };
3793 
3794 template<typename T>
3795 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3796  m_pAllocationCallbacks(pAllocationCallbacks),
3797  m_ItemAllocator(pAllocationCallbacks, 128),
3798  m_pFront(VMA_NULL),
3799  m_pBack(VMA_NULL),
3800  m_Count(0)
3801 {
3802 }
3803 
3804 template<typename T>
3805 VmaRawList<T>::~VmaRawList()
3806 {
3807  // Intentionally not calling Clear, because that would be unnecessary
3808  // computations to return all items to m_ItemAllocator as free.
3809 }
3810 
3811 template<typename T>
3812 void VmaRawList<T>::Clear()
3813 {
3814  if(IsEmpty() == false)
3815  {
3816  ItemType* pItem = m_pBack;
3817  while(pItem != VMA_NULL)
3818  {
3819  ItemType* const pPrevItem = pItem->pPrev;
3820  m_ItemAllocator.Free(pItem);
3821  pItem = pPrevItem;
3822  }
3823  m_pFront = VMA_NULL;
3824  m_pBack = VMA_NULL;
3825  m_Count = 0;
3826  }
3827 }
3828 
3829 template<typename T>
3830 VmaListItem<T>* VmaRawList<T>::PushBack()
3831 {
3832  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3833  pNewItem->pNext = VMA_NULL;
3834  if(IsEmpty())
3835  {
3836  pNewItem->pPrev = VMA_NULL;
3837  m_pFront = pNewItem;
3838  m_pBack = pNewItem;
3839  m_Count = 1;
3840  }
3841  else
3842  {
3843  pNewItem->pPrev = m_pBack;
3844  m_pBack->pNext = pNewItem;
3845  m_pBack = pNewItem;
3846  ++m_Count;
3847  }
3848  return pNewItem;
3849 }
3850 
3851 template<typename T>
3852 VmaListItem<T>* VmaRawList<T>::PushFront()
3853 {
3854  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3855  pNewItem->pPrev = VMA_NULL;
3856  if(IsEmpty())
3857  {
3858  pNewItem->pNext = VMA_NULL;
3859  m_pFront = pNewItem;
3860  m_pBack = pNewItem;
3861  m_Count = 1;
3862  }
3863  else
3864  {
3865  pNewItem->pNext = m_pFront;
3866  m_pFront->pPrev = pNewItem;
3867  m_pFront = pNewItem;
3868  ++m_Count;
3869  }
3870  return pNewItem;
3871 }
3872 
3873 template<typename T>
3874 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3875 {
3876  ItemType* const pNewItem = PushBack();
3877  pNewItem->Value = value;
3878  return pNewItem;
3879 }
3880 
3881 template<typename T>
3882 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3883 {
3884  ItemType* const pNewItem = PushFront();
3885  pNewItem->Value = value;
3886  return pNewItem;
3887 }
3888 
3889 template<typename T>
3890 void VmaRawList<T>::PopBack()
3891 {
3892  VMA_HEAVY_ASSERT(m_Count > 0);
3893  ItemType* const pBackItem = m_pBack;
3894  ItemType* const pPrevItem = pBackItem->pPrev;
3895  if(pPrevItem != VMA_NULL)
3896  {
3897  pPrevItem->pNext = VMA_NULL;
3898  }
3899  m_pBack = pPrevItem;
3900  m_ItemAllocator.Free(pBackItem);
3901  --m_Count;
3902 }
3903 
3904 template<typename T>
3905 void VmaRawList<T>::PopFront()
3906 {
3907  VMA_HEAVY_ASSERT(m_Count > 0);
3908  ItemType* const pFrontItem = m_pFront;
3909  ItemType* const pNextItem = pFrontItem->pNext;
3910  if(pNextItem != VMA_NULL)
3911  {
3912  pNextItem->pPrev = VMA_NULL;
3913  }
3914  m_pFront = pNextItem;
3915  m_ItemAllocator.Free(pFrontItem);
3916  --m_Count;
3917 }
3918 
3919 template<typename T>
3920 void VmaRawList<T>::Remove(ItemType* pItem)
3921 {
3922  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3923  VMA_HEAVY_ASSERT(m_Count > 0);
3924 
3925  if(pItem->pPrev != VMA_NULL)
3926  {
3927  pItem->pPrev->pNext = pItem->pNext;
3928  }
3929  else
3930  {
3931  VMA_HEAVY_ASSERT(m_pFront == pItem);
3932  m_pFront = pItem->pNext;
3933  }
3934 
3935  if(pItem->pNext != VMA_NULL)
3936  {
3937  pItem->pNext->pPrev = pItem->pPrev;
3938  }
3939  else
3940  {
3941  VMA_HEAVY_ASSERT(m_pBack == pItem);
3942  m_pBack = pItem->pPrev;
3943  }
3944 
3945  m_ItemAllocator.Free(pItem);
3946  --m_Count;
3947 }
3948 
3949 template<typename T>
3950 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3951 {
3952  if(pItem != VMA_NULL)
3953  {
3954  ItemType* const prevItem = pItem->pPrev;
3955  ItemType* const newItem = m_ItemAllocator.Alloc();
3956  newItem->pPrev = prevItem;
3957  newItem->pNext = pItem;
3958  pItem->pPrev = newItem;
3959  if(prevItem != VMA_NULL)
3960  {
3961  prevItem->pNext = newItem;
3962  }
3963  else
3964  {
3965  VMA_HEAVY_ASSERT(m_pFront == pItem);
3966  m_pFront = newItem;
3967  }
3968  ++m_Count;
3969  return newItem;
3970  }
3971  else
3972  return PushBack();
3973 }
3974 
3975 template<typename T>
3976 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3977 {
3978  if(pItem != VMA_NULL)
3979  {
3980  ItemType* const nextItem = pItem->pNext;
3981  ItemType* const newItem = m_ItemAllocator.Alloc();
3982  newItem->pNext = nextItem;
3983  newItem->pPrev = pItem;
3984  pItem->pNext = newItem;
3985  if(nextItem != VMA_NULL)
3986  {
3987  nextItem->pPrev = newItem;
3988  }
3989  else
3990  {
3991  VMA_HEAVY_ASSERT(m_pBack == pItem);
3992  m_pBack = newItem;
3993  }
3994  ++m_Count;
3995  return newItem;
3996  }
3997  else
3998  return PushFront();
3999 }
4000 
4001 template<typename T>
4002 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4003 {
4004  ItemType* const newItem = InsertBefore(pItem);
4005  newItem->Value = value;
4006  return newItem;
4007 }
4008 
4009 template<typename T>
4010 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4011 {
4012  ItemType* const newItem = InsertAfter(pItem);
4013  newItem->Value = value;
4014  return newItem;
4015 }
4016 
4017 template<typename T, typename AllocatorT>
4018 class VmaList
4019 {
4020  VMA_CLASS_NO_COPY(VmaList)
4021 public:
4022  class iterator
4023  {
4024  public:
4025  iterator() :
4026  m_pList(VMA_NULL),
4027  m_pItem(VMA_NULL)
4028  {
4029  }
4030 
4031  T& operator*() const
4032  {
4033  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4034  return m_pItem->Value;
4035  }
4036  T* operator->() const
4037  {
4038  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4039  return &m_pItem->Value;
4040  }
4041 
4042  iterator& operator++()
4043  {
4044  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4045  m_pItem = m_pItem->pNext;
4046  return *this;
4047  }
4048  iterator& operator--()
4049  {
4050  if(m_pItem != VMA_NULL)
4051  {
4052  m_pItem = m_pItem->pPrev;
4053  }
4054  else
4055  {
4056  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4057  m_pItem = m_pList->Back();
4058  }
4059  return *this;
4060  }
4061 
4062  iterator operator++(int)
4063  {
4064  iterator result = *this;
4065  ++*this;
4066  return result;
4067  }
4068  iterator operator--(int)
4069  {
4070  iterator result = *this;
4071  --*this;
4072  return result;
4073  }
4074 
4075  bool operator==(const iterator& rhs) const
4076  {
4077  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4078  return m_pItem == rhs.m_pItem;
4079  }
4080  bool operator!=(const iterator& rhs) const
4081  {
4082  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4083  return m_pItem != rhs.m_pItem;
4084  }
4085 
4086  private:
4087  VmaRawList<T>* m_pList;
4088  VmaListItem<T>* m_pItem;
4089 
4090  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4091  m_pList(pList),
4092  m_pItem(pItem)
4093  {
4094  }
4095 
4096  friend class VmaList<T, AllocatorT>;
4097  };
4098 
4099  class const_iterator
4100  {
4101  public:
4102  const_iterator() :
4103  m_pList(VMA_NULL),
4104  m_pItem(VMA_NULL)
4105  {
4106  }
4107 
4108  const_iterator(const iterator& src) :
4109  m_pList(src.m_pList),
4110  m_pItem(src.m_pItem)
4111  {
4112  }
4113 
4114  const T& operator*() const
4115  {
4116  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4117  return m_pItem->Value;
4118  }
4119  const T* operator->() const
4120  {
4121  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4122  return &m_pItem->Value;
4123  }
4124 
4125  const_iterator& operator++()
4126  {
4127  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4128  m_pItem = m_pItem->pNext;
4129  return *this;
4130  }
4131  const_iterator& operator--()
4132  {
4133  if(m_pItem != VMA_NULL)
4134  {
4135  m_pItem = m_pItem->pPrev;
4136  }
4137  else
4138  {
4139  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4140  m_pItem = m_pList->Back();
4141  }
4142  return *this;
4143  }
4144 
4145  const_iterator operator++(int)
4146  {
4147  const_iterator result = *this;
4148  ++*this;
4149  return result;
4150  }
4151  const_iterator operator--(int)
4152  {
4153  const_iterator result = *this;
4154  --*this;
4155  return result;
4156  }
4157 
4158  bool operator==(const const_iterator& rhs) const
4159  {
4160  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4161  return m_pItem == rhs.m_pItem;
4162  }
4163  bool operator!=(const const_iterator& rhs) const
4164  {
4165  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4166  return m_pItem != rhs.m_pItem;
4167  }
4168 
4169  private:
4170  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4171  m_pList(pList),
4172  m_pItem(pItem)
4173  {
4174  }
4175 
4176  const VmaRawList<T>* m_pList;
4177  const VmaListItem<T>* m_pItem;
4178 
4179  friend class VmaList<T, AllocatorT>;
4180  };
4181 
4182  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4183 
4184  bool empty() const { return m_RawList.IsEmpty(); }
4185  size_t size() const { return m_RawList.GetCount(); }
4186 
4187  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4188  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4189 
4190  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4191  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4192 
4193  void clear() { m_RawList.Clear(); }
4194  void push_back(const T& value) { m_RawList.PushBack(value); }
4195  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4196  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4197 
4198 private:
4199  VmaRawList<T> m_RawList;
4200 };
4201 
4202 #endif // #if VMA_USE_STL_LIST
4203 
4205 // class VmaMap
4206 
4207 // Unused in this version.
4208 #if 0
4209 
4210 #if VMA_USE_STL_UNORDERED_MAP
4211 
4212 #define VmaPair std::pair
4213 
4214 #define VMA_MAP_TYPE(KeyT, ValueT) \
4215  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4216 
4217 #else // #if VMA_USE_STL_UNORDERED_MAP
4218 
4219 template<typename T1, typename T2>
4220 struct VmaPair
4221 {
4222  T1 first;
4223  T2 second;
4224 
4225  VmaPair() : first(), second() { }
4226  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4227 };
4228 
4229 /* Class compatible with subset of interface of std::unordered_map.
4230 KeyT, ValueT must be POD because they will be stored in VmaVector.
4231 */
4232 template<typename KeyT, typename ValueT>
4233 class VmaMap
4234 {
4235 public:
4236  typedef VmaPair<KeyT, ValueT> PairType;
4237  typedef PairType* iterator;
4238 
4239  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4240 
4241  iterator begin() { return m_Vector.begin(); }
4242  iterator end() { return m_Vector.end(); }
4243 
4244  void insert(const PairType& pair);
4245  iterator find(const KeyT& key);
4246  void erase(iterator it);
4247 
4248 private:
4249  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4250 };
4251 
4252 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4253 
4254 template<typename FirstT, typename SecondT>
4255 struct VmaPairFirstLess
4256 {
4257  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4258  {
4259  return lhs.first < rhs.first;
4260  }
4261  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4262  {
4263  return lhs.first < rhsFirst;
4264  }
4265 };
4266 
4267 template<typename KeyT, typename ValueT>
4268 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4269 {
4270  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4271  m_Vector.data(),
4272  m_Vector.data() + m_Vector.size(),
4273  pair,
4274  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4275  VmaVectorInsert(m_Vector, indexToInsert, pair);
4276 }
4277 
4278 template<typename KeyT, typename ValueT>
4279 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4280 {
4281  PairType* it = VmaBinaryFindFirstNotLess(
4282  m_Vector.data(),
4283  m_Vector.data() + m_Vector.size(),
4284  key,
4285  VmaPairFirstLess<KeyT, ValueT>());
4286  if((it != m_Vector.end()) && (it->first == key))
4287  {
4288  return it;
4289  }
4290  else
4291  {
4292  return m_Vector.end();
4293  }
4294 }
4295 
4296 template<typename KeyT, typename ValueT>
4297 void VmaMap<KeyT, ValueT>::erase(iterator it)
4298 {
4299  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4300 }
4301 
4302 #endif // #if VMA_USE_STL_UNORDERED_MAP
4303 
4304 #endif // #if 0
4305 
4307 
4308 class VmaDeviceMemoryBlock;
4309 
4310 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4311 
4312 struct VmaAllocation_T
4313 {
4314  VMA_CLASS_NO_COPY(VmaAllocation_T)
4315 private:
4316  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4317 
4318  enum FLAGS
4319  {
4320  FLAG_USER_DATA_STRING = 0x01,
4321  };
4322 
4323 public:
4324  enum ALLOCATION_TYPE
4325  {
4326  ALLOCATION_TYPE_NONE,
4327  ALLOCATION_TYPE_BLOCK,
4328  ALLOCATION_TYPE_DEDICATED,
4329  };
4330 
4331  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4332  m_Alignment(1),
4333  m_Size(0),
4334  m_pUserData(VMA_NULL),
4335  m_LastUseFrameIndex(currentFrameIndex),
4336  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4337  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4338  m_MapCount(0),
4339  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4340  {
4341 #if VMA_STATS_STRING_ENABLED
4342  m_CreationFrameIndex = currentFrameIndex;
4343  m_BufferImageUsage = 0;
4344 #endif
4345  }
4346 
4347  ~VmaAllocation_T()
4348  {
4349  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4350 
4351  // Check if owned string was freed.
4352  VMA_ASSERT(m_pUserData == VMA_NULL);
4353  }
4354 
4355  void InitBlockAllocation(
4356  VmaPool hPool,
4357  VmaDeviceMemoryBlock* block,
4358  VkDeviceSize offset,
4359  VkDeviceSize alignment,
4360  VkDeviceSize size,
4361  VmaSuballocationType suballocationType,
4362  bool mapped,
4363  bool canBecomeLost)
4364  {
4365  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4366  VMA_ASSERT(block != VMA_NULL);
4367  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4368  m_Alignment = alignment;
4369  m_Size = size;
4370  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4371  m_SuballocationType = (uint8_t)suballocationType;
4372  m_BlockAllocation.m_hPool = hPool;
4373  m_BlockAllocation.m_Block = block;
4374  m_BlockAllocation.m_Offset = offset;
4375  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4376  }
4377 
4378  void InitLost()
4379  {
4380  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4381  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4382  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4383  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4384  m_BlockAllocation.m_Block = VMA_NULL;
4385  m_BlockAllocation.m_Offset = 0;
4386  m_BlockAllocation.m_CanBecomeLost = true;
4387  }
4388 
4389  void ChangeBlockAllocation(
4390  VmaAllocator hAllocator,
4391  VmaDeviceMemoryBlock* block,
4392  VkDeviceSize offset);
4393 
4394  // pMappedData not null means allocation is created with MAPPED flag.
4395  void InitDedicatedAllocation(
4396  uint32_t memoryTypeIndex,
4397  VkDeviceMemory hMemory,
4398  VmaSuballocationType suballocationType,
4399  void* pMappedData,
4400  VkDeviceSize size)
4401  {
4402  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4403  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4404  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4405  m_Alignment = 0;
4406  m_Size = size;
4407  m_SuballocationType = (uint8_t)suballocationType;
4408  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4409  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4410  m_DedicatedAllocation.m_hMemory = hMemory;
4411  m_DedicatedAllocation.m_pMappedData = pMappedData;
4412  }
4413 
4414  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4415  VkDeviceSize GetAlignment() const { return m_Alignment; }
4416  VkDeviceSize GetSize() const { return m_Size; }
4417  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4418  void* GetUserData() const { return m_pUserData; }
4419  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4420  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4421 
4422  VmaDeviceMemoryBlock* GetBlock() const
4423  {
4424  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4425  return m_BlockAllocation.m_Block;
4426  }
4427  VkDeviceSize GetOffset() const;
4428  VkDeviceMemory GetMemory() const;
4429  uint32_t GetMemoryTypeIndex() const;
4430  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4431  void* GetMappedData() const;
4432  bool CanBecomeLost() const;
4433  VmaPool GetPool() const;
4434 
4435  uint32_t GetLastUseFrameIndex() const
4436  {
4437  return m_LastUseFrameIndex.load();
4438  }
4439  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4440  {
4441  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4442  }
4443  /*
4444  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4445  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4446  - Else, returns false.
4447 
4448  If hAllocation is already lost, assert - you should not call it then.
4449  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4450  */
4451  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4452 
4453  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4454  {
4455  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4456  outInfo.blockCount = 1;
4457  outInfo.allocationCount = 1;
4458  outInfo.unusedRangeCount = 0;
4459  outInfo.usedBytes = m_Size;
4460  outInfo.unusedBytes = 0;
4461  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4462  outInfo.unusedRangeSizeMin = UINT64_MAX;
4463  outInfo.unusedRangeSizeMax = 0;
4464  }
4465 
4466  void BlockAllocMap();
4467  void BlockAllocUnmap();
4468  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4469  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4470 
4471 #if VMA_STATS_STRING_ENABLED
4472  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4473  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4474 
4475  void InitBufferImageUsage(uint32_t bufferImageUsage)
4476  {
4477  VMA_ASSERT(m_BufferImageUsage == 0);
4478  m_BufferImageUsage = bufferImageUsage;
4479  }
4480 
4481  void PrintParameters(class VmaJsonWriter& json) const;
4482 #endif
4483 
4484 private:
4485  VkDeviceSize m_Alignment;
4486  VkDeviceSize m_Size;
4487  void* m_pUserData;
4488  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4489  uint8_t m_Type; // ALLOCATION_TYPE
4490  uint8_t m_SuballocationType; // VmaSuballocationType
4491  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4492  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4493  uint8_t m_MapCount;
4494  uint8_t m_Flags; // enum FLAGS
4495 
4496  // Allocation out of VmaDeviceMemoryBlock.
4497  struct BlockAllocation
4498  {
4499  VmaPool m_hPool; // Null if belongs to general memory.
4500  VmaDeviceMemoryBlock* m_Block;
4501  VkDeviceSize m_Offset;
4502  bool m_CanBecomeLost;
4503  };
4504 
4505  // Allocation for an object that has its own private VkDeviceMemory.
4506  struct DedicatedAllocation
4507  {
4508  uint32_t m_MemoryTypeIndex;
4509  VkDeviceMemory m_hMemory;
4510  void* m_pMappedData; // Not null means memory is mapped.
4511  };
4512 
4513  union
4514  {
4515  // Allocation out of VmaDeviceMemoryBlock.
4516  BlockAllocation m_BlockAllocation;
4517  // Allocation for an object that has its own private VkDeviceMemory.
4518  DedicatedAllocation m_DedicatedAllocation;
4519  };
4520 
4521 #if VMA_STATS_STRING_ENABLED
4522  uint32_t m_CreationFrameIndex;
4523  uint32_t m_BufferImageUsage; // 0 if unknown.
4524 #endif
4525 
4526  void FreeUserDataString(VmaAllocator hAllocator);
4527 };
4528 
4529 /*
4530 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4531 allocated memory block or free.
4532 */
4533 struct VmaSuballocation
4534 {
4535  VkDeviceSize offset;
4536  VkDeviceSize size;
4537  VmaAllocation hAllocation;
4538  VmaSuballocationType type;
4539 };
4540 
4541 // Comparator for offsets.
4542 struct VmaSuballocationOffsetLess
4543 {
4544  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4545  {
4546  return lhs.offset < rhs.offset;
4547  }
4548 };
4549 struct VmaSuballocationOffsetGreater
4550 {
4551  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4552  {
4553  return lhs.offset > rhs.offset;
4554  }
4555 };
4556 
4557 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4558 
4559 // Cost of one additional allocation lost, as equivalent in bytes.
4560 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4561 
4562 /*
4563 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4564 
4565 If canMakeOtherLost was false:
4566 - item points to a FREE suballocation.
4567 - itemsToMakeLostCount is 0.
4568 
4569 If canMakeOtherLost was true:
4570 - item points to first of sequence of suballocations, which are either FREE,
4571  or point to VmaAllocations that can become lost.
4572 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4573  the requested allocation to succeed.
4574 */
4575 struct VmaAllocationRequest
4576 {
4577  VkDeviceSize offset;
4578  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4579  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4580  VmaSuballocationList::iterator item;
4581  size_t itemsToMakeLostCount;
4582  void* customData;
4583 
4584  VkDeviceSize CalcCost() const
4585  {
4586  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4587  }
4588 };
4589 
4590 /*
4591 Data structure used for bookkeeping of allocations and unused ranges of memory
4592 in a single VkDeviceMemory block.
4593 */
4594 class VmaBlockMetadata
4595 {
4596 public:
4597  VmaBlockMetadata() : m_Size(0) { }
4598  virtual ~VmaBlockMetadata() { }
4599  virtual void Init(VkDeviceSize size) { m_Size = size; }
4600 
4601  // Validates all data structures inside this object. If not valid, returns false.
4602  virtual bool Validate() const = 0;
4603  VkDeviceSize GetSize() const { return m_Size; }
4604  virtual size_t GetAllocationCount() const = 0;
4605  virtual VkDeviceSize GetSumFreeSize() const = 0;
4606  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4607  // Returns true if this block is empty - contains only single free suballocation.
4608  virtual bool IsEmpty() const = 0;
4609 
4610  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4611  // Shouldn't modify blockCount.
4612  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4613 
4614 #if VMA_STATS_STRING_ENABLED
4615  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4616 #endif
4617 
4618  // Tries to find a place for suballocation with given parameters inside this block.
4619  // If succeeded, fills pAllocationRequest and returns true.
4620  // If failed, returns false.
4621  virtual bool CreateAllocationRequest(
4622  uint32_t currentFrameIndex,
4623  uint32_t frameInUseCount,
4624  VkDeviceSize bufferImageGranularity,
4625  VkDeviceSize allocSize,
4626  VkDeviceSize allocAlignment,
4627  bool upperAddress,
4628  VmaSuballocationType allocType,
4629  bool canMakeOtherLost,
4630  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
4631  VmaAllocationRequest* pAllocationRequest) = 0;
4632 
4633  virtual bool MakeRequestedAllocationsLost(
4634  uint32_t currentFrameIndex,
4635  uint32_t frameInUseCount,
4636  VmaAllocationRequest* pAllocationRequest) = 0;
4637 
4638  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4639 
4640  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4641 
4642  // Makes actual allocation based on request. Request must already be checked and valid.
4643  virtual void Alloc(
4644  const VmaAllocationRequest& request,
4645  VmaSuballocationType type,
4646  VkDeviceSize allocSize,
4647  bool upperAddress,
4648  VmaAllocation hAllocation) = 0;
4649 
4650  // Frees suballocation assigned to given memory region.
4651  virtual void Free(const VmaAllocation allocation) = 0;
4652  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4653 
4654 protected:
4655 #if VMA_STATS_STRING_ENABLED
4656  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4657  VkDeviceSize unusedBytes,
4658  size_t allocationCount,
4659  size_t unusedRangeCount) const;
4660  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4661  VkDeviceSize offset,
4662  VmaAllocation hAllocation) const;
4663  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4664  VkDeviceSize offset,
4665  VkDeviceSize size) const;
4666  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4667 #endif
4668 
4669 private:
4670  VkDeviceSize m_Size;
4671 };
4672 
4673 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4674 {
4675  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4676 public:
4677  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4678  virtual ~VmaBlockMetadata_Generic();
4679  virtual void Init(VkDeviceSize size);
4680 
4681  virtual bool Validate() const;
4682  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4683  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4684  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4685  virtual bool IsEmpty() const;
4686 
4687  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4688  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4689 
4690 #if VMA_STATS_STRING_ENABLED
4691  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4692 #endif
4693 
4694  virtual bool CreateAllocationRequest(
4695  uint32_t currentFrameIndex,
4696  uint32_t frameInUseCount,
4697  VkDeviceSize bufferImageGranularity,
4698  VkDeviceSize allocSize,
4699  VkDeviceSize allocAlignment,
4700  bool upperAddress,
4701  VmaSuballocationType allocType,
4702  bool canMakeOtherLost,
4703  uint32_t strategy,
4704  VmaAllocationRequest* pAllocationRequest);
4705 
4706  virtual bool MakeRequestedAllocationsLost(
4707  uint32_t currentFrameIndex,
4708  uint32_t frameInUseCount,
4709  VmaAllocationRequest* pAllocationRequest);
4710 
4711  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4712 
4713  virtual VkResult CheckCorruption(const void* pBlockData);
4714 
4715  virtual void Alloc(
4716  const VmaAllocationRequest& request,
4717  VmaSuballocationType type,
4718  VkDeviceSize allocSize,
4719  bool upperAddress,
4720  VmaAllocation hAllocation);
4721 
4722  virtual void Free(const VmaAllocation allocation);
4723  virtual void FreeAtOffset(VkDeviceSize offset);
4724 
4725 private:
4726  uint32_t m_FreeCount;
4727  VkDeviceSize m_SumFreeSize;
4728  VmaSuballocationList m_Suballocations;
4729  // Suballocations that are free and have size greater than certain threshold.
4730  // Sorted by size, ascending.
4731  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4732 
4733  bool ValidateFreeSuballocationList() const;
4734 
4735  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4736  // If yes, fills pOffset and returns true. If no, returns false.
4737  bool CheckAllocation(
4738  uint32_t currentFrameIndex,
4739  uint32_t frameInUseCount,
4740  VkDeviceSize bufferImageGranularity,
4741  VkDeviceSize allocSize,
4742  VkDeviceSize allocAlignment,
4743  VmaSuballocationType allocType,
4744  VmaSuballocationList::const_iterator suballocItem,
4745  bool canMakeOtherLost,
4746  VkDeviceSize* pOffset,
4747  size_t* itemsToMakeLostCount,
4748  VkDeviceSize* pSumFreeSize,
4749  VkDeviceSize* pSumItemSize) const;
4750  // Given free suballocation, it merges it with following one, which must also be free.
4751  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4752  // Releases given suballocation, making it free.
4753  // Merges it with adjacent free suballocations if applicable.
4754  // Returns iterator to new free suballocation at this place.
4755  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4756  // Given free suballocation, it inserts it into sorted list of
4757  // m_FreeSuballocationsBySize if it's suitable.
4758  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4759  // Given free suballocation, it removes it from sorted list of
4760  // m_FreeSuballocationsBySize if it's suitable.
4761  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4762 };
4763 
4764 /*
4765 Allocations and their references in internal data structure look like this:
4766 
4767 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4768 
4769  0 +-------+
4770  | |
4771  | |
4772  | |
4773  +-------+
4774  | Alloc | 1st[m_1stNullItemsBeginCount]
4775  +-------+
4776  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4777  +-------+
4778  | ... |
4779  +-------+
4780  | Alloc | 1st[1st.size() - 1]
4781  +-------+
4782  | |
4783  | |
4784  | |
4785 GetSize() +-------+
4786 
4787 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4788 
4789  0 +-------+
4790  | Alloc | 2nd[0]
4791  +-------+
4792  | Alloc | 2nd[1]
4793  +-------+
4794  | ... |
4795  +-------+
4796  | Alloc | 2nd[2nd.size() - 1]
4797  +-------+
4798  | |
4799  | |
4800  | |
4801  +-------+
4802  | Alloc | 1st[m_1stNullItemsBeginCount]
4803  +-------+
4804  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4805  +-------+
4806  | ... |
4807  +-------+
4808  | Alloc | 1st[1st.size() - 1]
4809  +-------+
4810  | |
4811 GetSize() +-------+
4812 
4813 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4814 
4815  0 +-------+
4816  | |
4817  | |
4818  | |
4819  +-------+
4820  | Alloc | 1st[m_1stNullItemsBeginCount]
4821  +-------+
4822  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4823  +-------+
4824  | ... |
4825  +-------+
4826  | Alloc | 1st[1st.size() - 1]
4827  +-------+
4828  | |
4829  | |
4830  | |
4831  +-------+
4832  | Alloc | 2nd[2nd.size() - 1]
4833  +-------+
4834  | ... |
4835  +-------+
4836  | Alloc | 2nd[1]
4837  +-------+
4838  | Alloc | 2nd[0]
4839 GetSize() +-------+
4840 
4841 */
4842 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4843 {
4844  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4845 public:
4846  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4847  virtual ~VmaBlockMetadata_Linear();
4848  virtual void Init(VkDeviceSize size);
4849 
4850  virtual bool Validate() const;
4851  virtual size_t GetAllocationCount() const;
4852  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4853  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4854  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4855 
4856  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4857  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4858 
4859 #if VMA_STATS_STRING_ENABLED
4860  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4861 #endif
4862 
4863  virtual bool CreateAllocationRequest(
4864  uint32_t currentFrameIndex,
4865  uint32_t frameInUseCount,
4866  VkDeviceSize bufferImageGranularity,
4867  VkDeviceSize allocSize,
4868  VkDeviceSize allocAlignment,
4869  bool upperAddress,
4870  VmaSuballocationType allocType,
4871  bool canMakeOtherLost,
4872  uint32_t strategy,
4873  VmaAllocationRequest* pAllocationRequest);
4874 
4875  virtual bool MakeRequestedAllocationsLost(
4876  uint32_t currentFrameIndex,
4877  uint32_t frameInUseCount,
4878  VmaAllocationRequest* pAllocationRequest);
4879 
4880  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4881 
4882  virtual VkResult CheckCorruption(const void* pBlockData);
4883 
4884  virtual void Alloc(
4885  const VmaAllocationRequest& request,
4886  VmaSuballocationType type,
4887  VkDeviceSize allocSize,
4888  bool upperAddress,
4889  VmaAllocation hAllocation);
4890 
4891  virtual void Free(const VmaAllocation allocation);
4892  virtual void FreeAtOffset(VkDeviceSize offset);
4893 
4894 private:
4895  /*
4896  There are two suballocation vectors, used in ping-pong way.
4897  The one with index m_1stVectorIndex is called 1st.
4898  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
4899  2nd can be non-empty only when 1st is not empty.
4900  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
4901  */
4902  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
4903 
4904  enum SECOND_VECTOR_MODE
4905  {
4906  SECOND_VECTOR_EMPTY,
4907  /*
4908  Suballocations in 2nd vector are created later than the ones in 1st, but they
4909  all have smaller offset.
4910  */
4911  SECOND_VECTOR_RING_BUFFER,
4912  /*
4913  Suballocations in 2nd vector are upper side of double stack.
4914  They all have offsets higher than those in 1st vector.
4915  Top of this stack means smaller offsets, but higher indices in this vector.
4916  */
4917  SECOND_VECTOR_DOUBLE_STACK,
4918  };
4919 
4920  VkDeviceSize m_SumFreeSize;
4921  SuballocationVectorType m_Suballocations0, m_Suballocations1;
4922  uint32_t m_1stVectorIndex;
4923  SECOND_VECTOR_MODE m_2ndVectorMode;
4924 
4925  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4926  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4927  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4928  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4929 
4930  // Number of items in 1st vector with hAllocation = null at the beginning.
4931  size_t m_1stNullItemsBeginCount;
4932  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
4933  size_t m_1stNullItemsMiddleCount;
4934  // Number of items in 2nd vector with hAllocation = null.
4935  size_t m_2ndNullItemsCount;
4936 
4937  bool ShouldCompact1st() const;
4938  void CleanupAfterFree();
4939 };
4940 
4941 /*
4942 Level 0 has block size = GetSize(). Level 1 has block size = GetSize() / 2 and so on...
4943 */
4944 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
4945 {
4946  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
4947 public:
4948  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
4949  virtual ~VmaBlockMetadata_Buddy();
4950  virtual void Init(VkDeviceSize size);
4951 
4952  virtual bool Validate() const;
4953  virtual size_t GetAllocationCount() const;
4954  virtual VkDeviceSize GetSumFreeSize() const;
4955  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4956  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
4957 
4958  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4959  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4960 
4961 #if VMA_STATS_STRING_ENABLED
4962  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4963 #endif
4964 
4965  virtual bool CreateAllocationRequest(
4966  uint32_t currentFrameIndex,
4967  uint32_t frameInUseCount,
4968  VkDeviceSize bufferImageGranularity,
4969  VkDeviceSize allocSize,
4970  VkDeviceSize allocAlignment,
4971  bool upperAddress,
4972  VmaSuballocationType allocType,
4973  bool canMakeOtherLost,
4974  uint32_t strategy,
4975  VmaAllocationRequest* pAllocationRequest);
4976 
4977  virtual bool MakeRequestedAllocationsLost(
4978  uint32_t currentFrameIndex,
4979  uint32_t frameInUseCount,
4980  VmaAllocationRequest* pAllocationRequest);
4981 
4982  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4983 
4984  virtual VkResult CheckCorruption(const void* pBlockData);
4985 
4986  virtual void Alloc(
4987  const VmaAllocationRequest& request,
4988  VmaSuballocationType type,
4989  VkDeviceSize allocSize,
4990  bool upperAddress,
4991  VmaAllocation hAllocation);
4992 
4993  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
4994  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
4995 
4996 private:
4997  static const size_t MAX_LEVELS = 30; // TODO
4998 
4999  struct Node
5000  {
5001  VkDeviceSize offset;
5002  enum TYPE
5003  {
5004  TYPE_FREE,
5005  TYPE_ALLOCATION,
5006  TYPE_SPLIT,
5007  TYPE_COUNT
5008  } type;
5009  Node* parent;
5010  Node* buddy;
5011 
5012  union
5013  {
5014  struct
5015  {
5016  Node* prev;
5017  Node* next;
5018  } free;
5019  struct
5020  {
5021  VmaAllocation alloc;
5022  } allocation;
5023  struct
5024  {
5025  Node* leftChild;
5026  } split;
5027  };
5028  };
5029 
5030  Node* m_Root;
5031  struct {
5032  Node* front;
5033  Node* back;
5034  } m_FreeList[MAX_LEVELS];
5035 
5036  void DeleteNode(Node* node);
5037  bool ValidateNode(const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5038  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5039  VkDeviceSize LevelToNodeSize(uint32_t level) const;
5040  // Alloc passed just for validation. Can be null.
5041  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5042  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5043  // Adds node to the front of FreeList at given level.
5044  // node->type must be FREE.
5045  // node->free.prev, next can be undefined.
5046  void AddToFreeListFront(uint32_t level, Node* node);
5047  // Removes node from FreeList at given level.
5048  // node->type must be FREE.
5049  // node->free.prev, next stay untouched.
5050  void RemoveFromFreeList(uint32_t level, Node* node);
5051 
5052 #if VMA_STATS_STRING_ENABLED
5053  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5054 #endif
5055 };
5056 
5057 /*
5058 Represents a single block of device memory (`VkDeviceMemory`) with all the
5059 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5060 
5061 Thread-safety: This class must be externally synchronized.
5062 */
5063 class VmaDeviceMemoryBlock
5064 {
5065  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5066 public:
5067  VmaBlockMetadata* m_pMetadata;
5068 
5069  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5070 
5071  ~VmaDeviceMemoryBlock()
5072  {
5073  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5074  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5075  }
5076 
5077  // Always call after construction.
5078  void Init(
5079  VmaAllocator hAllocator,
5080  uint32_t newMemoryTypeIndex,
5081  VkDeviceMemory newMemory,
5082  VkDeviceSize newSize,
5083  uint32_t id,
5084  uint32_t algorithm);
5085  // Always call before destruction.
5086  void Destroy(VmaAllocator allocator);
5087 
5088  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5089  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5090  uint32_t GetId() const { return m_Id; }
5091  void* GetMappedData() const { return m_pMappedData; }
5092 
5093  // Validates all data structures inside this object. If not valid, returns false.
5094  bool Validate() const;
5095 
5096  VkResult CheckCorruption(VmaAllocator hAllocator);
5097 
5098  // ppData can be null.
5099  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5100  void Unmap(VmaAllocator hAllocator, uint32_t count);
5101 
5102  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5103  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5104 
5105  VkResult BindBufferMemory(
5106  const VmaAllocator hAllocator,
5107  const VmaAllocation hAllocation,
5108  VkBuffer hBuffer);
5109  VkResult BindImageMemory(
5110  const VmaAllocator hAllocator,
5111  const VmaAllocation hAllocation,
5112  VkImage hImage);
5113 
5114 private:
5115  uint32_t m_MemoryTypeIndex;
5116  uint32_t m_Id;
5117  VkDeviceMemory m_hMemory;
5118 
5119  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5120  // Also protects m_MapCount, m_pMappedData.
5121  VMA_MUTEX m_Mutex;
5122  uint32_t m_MapCount;
5123  void* m_pMappedData;
5124 };
5125 
5126 struct VmaPointerLess
5127 {
5128  bool operator()(const void* lhs, const void* rhs) const
5129  {
5130  return lhs < rhs;
5131  }
5132 };
5133 
5134 class VmaDefragmentator;
5135 
5136 /*
5137 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5138 Vulkan memory type.
5139 
5140 Synchronized internally with a mutex.
5141 */
5142 struct VmaBlockVector
5143 {
5144  VMA_CLASS_NO_COPY(VmaBlockVector)
5145 public:
5146  VmaBlockVector(
5147  VmaAllocator hAllocator,
5148  uint32_t memoryTypeIndex,
5149  VkDeviceSize preferredBlockSize,
5150  size_t minBlockCount,
5151  size_t maxBlockCount,
5152  VkDeviceSize bufferImageGranularity,
5153  uint32_t frameInUseCount,
5154  bool isCustomPool,
5155  bool explicitBlockSize,
5156  uint32_t algorithm);
5157  ~VmaBlockVector();
5158 
5159  VkResult CreateMinBlocks();
5160 
5161  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5162  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5163  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5164  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5165  uint32_t GetAlgorithm() const { return m_Algorithm; }
5166 
5167  void GetPoolStats(VmaPoolStats* pStats);
5168 
5169  bool IsEmpty() const { return m_Blocks.empty(); }
5170  bool IsCorruptionDetectionEnabled() const;
5171 
5172  VkResult Allocate(
5173  VmaPool hCurrentPool,
5174  uint32_t currentFrameIndex,
5175  VkDeviceSize size,
5176  VkDeviceSize alignment,
5177  const VmaAllocationCreateInfo& createInfo,
5178  VmaSuballocationType suballocType,
5179  VmaAllocation* pAllocation);
5180 
5181  void Free(
5182  VmaAllocation hAllocation);
5183 
5184  // Adds statistics of this BlockVector to pStats.
5185  void AddStats(VmaStats* pStats);
5186 
5187 #if VMA_STATS_STRING_ENABLED
5188  void PrintDetailedMap(class VmaJsonWriter& json);
5189 #endif
5190 
5191  void MakePoolAllocationsLost(
5192  uint32_t currentFrameIndex,
5193  size_t* pLostAllocationCount);
5194  VkResult CheckCorruption();
5195 
5196  VmaDefragmentator* EnsureDefragmentator(
5197  VmaAllocator hAllocator,
5198  uint32_t currentFrameIndex);
5199 
5200  VkResult Defragment(
5201  VmaDefragmentationStats* pDefragmentationStats,
5202  VkDeviceSize& maxBytesToMove,
5203  uint32_t& maxAllocationsToMove);
5204 
5205  void DestroyDefragmentator();
5206 
5207 private:
5208  friend class VmaDefragmentator;
5209 
5210  const VmaAllocator m_hAllocator;
5211  const uint32_t m_MemoryTypeIndex;
5212  const VkDeviceSize m_PreferredBlockSize;
5213  const size_t m_MinBlockCount;
5214  const size_t m_MaxBlockCount;
5215  const VkDeviceSize m_BufferImageGranularity;
5216  const uint32_t m_FrameInUseCount;
5217  const bool m_IsCustomPool;
5218  const bool m_ExplicitBlockSize;
5219  const uint32_t m_Algorithm;
5220  bool m_HasEmptyBlock;
5221  VMA_MUTEX m_Mutex;
5222  // Incrementally sorted by sumFreeSize, ascending.
5223  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5224  /* There can be at most one allocation that is completely empty - a
5225  hysteresis to avoid pessimistic case of alternating creation and destruction
5226  of a VkDeviceMemory. */
5227  VmaDefragmentator* m_pDefragmentator;
5228  uint32_t m_NextBlockId;
5229 
5230  VkDeviceSize CalcMaxBlockSize() const;
5231 
5232  // Finds and removes given block from vector.
5233  void Remove(VmaDeviceMemoryBlock* pBlock);
5234 
5235  // Performs single step in sorting m_Blocks. They may not be fully sorted
5236  // after this call.
5237  void IncrementallySortBlocks();
5238 
5239  // To be used only without CAN_MAKE_OTHER_LOST flag.
5240  VkResult AllocateFromBlock(
5241  VmaDeviceMemoryBlock* pBlock,
5242  VmaPool hCurrentPool,
5243  uint32_t currentFrameIndex,
5244  VkDeviceSize size,
5245  VkDeviceSize alignment,
5246  VmaAllocationCreateFlags allocFlags,
5247  void* pUserData,
5248  VmaSuballocationType suballocType,
5249  uint32_t strategy,
5250  VmaAllocation* pAllocation);
5251 
5252  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5253 };
5254 
5255 struct VmaPool_T
5256 {
5257  VMA_CLASS_NO_COPY(VmaPool_T)
5258 public:
5259  VmaBlockVector m_BlockVector;
5260 
5261  VmaPool_T(
5262  VmaAllocator hAllocator,
5263  const VmaPoolCreateInfo& createInfo,
5264  VkDeviceSize preferredBlockSize);
5265  ~VmaPool_T();
5266 
5267  uint32_t GetId() const { return m_Id; }
5268  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5269 
5270 #if VMA_STATS_STRING_ENABLED
5271  //void PrintDetailedMap(class VmaStringBuilder& sb);
5272 #endif
5273 
5274 private:
5275  uint32_t m_Id;
5276 };
5277 
5278 class VmaDefragmentator
5279 {
5280  VMA_CLASS_NO_COPY(VmaDefragmentator)
5281 private:
5282  const VmaAllocator m_hAllocator;
5283  VmaBlockVector* const m_pBlockVector;
5284  uint32_t m_CurrentFrameIndex;
5285  VkDeviceSize m_BytesMoved;
5286  uint32_t m_AllocationsMoved;
5287 
5288  struct AllocationInfo
5289  {
5290  VmaAllocation m_hAllocation;
5291  VkBool32* m_pChanged;
5292 
5293  AllocationInfo() :
5294  m_hAllocation(VK_NULL_HANDLE),
5295  m_pChanged(VMA_NULL)
5296  {
5297  }
5298  };
5299 
5300  struct AllocationInfoSizeGreater
5301  {
5302  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5303  {
5304  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5305  }
5306  };
5307 
5308  // Used between AddAllocation and Defragment.
5309  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5310 
5311  struct BlockInfo
5312  {
5313  VmaDeviceMemoryBlock* m_pBlock;
5314  bool m_HasNonMovableAllocations;
5315  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5316 
5317  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5318  m_pBlock(VMA_NULL),
5319  m_HasNonMovableAllocations(true),
5320  m_Allocations(pAllocationCallbacks),
5321  m_pMappedDataForDefragmentation(VMA_NULL)
5322  {
5323  }
5324 
5325  void CalcHasNonMovableAllocations()
5326  {
5327  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5328  const size_t defragmentAllocCount = m_Allocations.size();
5329  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5330  }
5331 
5332  void SortAllocationsBySizeDescecnding()
5333  {
5334  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5335  }
5336 
5337  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5338  void Unmap(VmaAllocator hAllocator);
5339 
5340  private:
5341  // Not null if mapped for defragmentation only, not originally mapped.
5342  void* m_pMappedDataForDefragmentation;
5343  };
5344 
5345  struct BlockPointerLess
5346  {
5347  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5348  {
5349  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5350  }
5351  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5352  {
5353  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5354  }
5355  };
5356 
5357  // 1. Blocks with some non-movable allocations go first.
5358  // 2. Blocks with smaller sumFreeSize go first.
5359  struct BlockInfoCompareMoveDestination
5360  {
5361  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5362  {
5363  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5364  {
5365  return true;
5366  }
5367  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5368  {
5369  return false;
5370  }
5371  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5372  {
5373  return true;
5374  }
5375  return false;
5376  }
5377  };
5378 
5379  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5380  BlockInfoVector m_Blocks;
5381 
5382  VkResult DefragmentRound(
5383  VkDeviceSize maxBytesToMove,
5384  uint32_t maxAllocationsToMove);
5385 
5386  static bool MoveMakesSense(
5387  size_t dstBlockIndex, VkDeviceSize dstOffset,
5388  size_t srcBlockIndex, VkDeviceSize srcOffset);
5389 
5390 public:
5391  VmaDefragmentator(
5392  VmaAllocator hAllocator,
5393  VmaBlockVector* pBlockVector,
5394  uint32_t currentFrameIndex);
5395 
5396  ~VmaDefragmentator();
5397 
5398  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5399  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5400 
5401  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5402 
5403  VkResult Defragment(
5404  VkDeviceSize maxBytesToMove,
5405  uint32_t maxAllocationsToMove);
5406 };
5407 
5408 #if VMA_RECORDING_ENABLED
5409 
5410 class VmaRecorder
5411 {
5412 public:
5413  VmaRecorder();
5414  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5415  void WriteConfiguration(
5416  const VkPhysicalDeviceProperties& devProps,
5417  const VkPhysicalDeviceMemoryProperties& memProps,
5418  bool dedicatedAllocationExtensionEnabled);
5419  ~VmaRecorder();
5420 
5421  void RecordCreateAllocator(uint32_t frameIndex);
5422  void RecordDestroyAllocator(uint32_t frameIndex);
5423  void RecordCreatePool(uint32_t frameIndex,
5424  const VmaPoolCreateInfo& createInfo,
5425  VmaPool pool);
5426  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5427  void RecordAllocateMemory(uint32_t frameIndex,
5428  const VkMemoryRequirements& vkMemReq,
5429  const VmaAllocationCreateInfo& createInfo,
5430  VmaAllocation allocation);
5431  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5432  const VkMemoryRequirements& vkMemReq,
5433  bool requiresDedicatedAllocation,
5434  bool prefersDedicatedAllocation,
5435  const VmaAllocationCreateInfo& createInfo,
5436  VmaAllocation allocation);
5437  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5438  const VkMemoryRequirements& vkMemReq,
5439  bool requiresDedicatedAllocation,
5440  bool prefersDedicatedAllocation,
5441  const VmaAllocationCreateInfo& createInfo,
5442  VmaAllocation allocation);
5443  void RecordFreeMemory(uint32_t frameIndex,
5444  VmaAllocation allocation);
5445  void RecordSetAllocationUserData(uint32_t frameIndex,
5446  VmaAllocation allocation,
5447  const void* pUserData);
5448  void RecordCreateLostAllocation(uint32_t frameIndex,
5449  VmaAllocation allocation);
5450  void RecordMapMemory(uint32_t frameIndex,
5451  VmaAllocation allocation);
5452  void RecordUnmapMemory(uint32_t frameIndex,
5453  VmaAllocation allocation);
5454  void RecordFlushAllocation(uint32_t frameIndex,
5455  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5456  void RecordInvalidateAllocation(uint32_t frameIndex,
5457  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5458  void RecordCreateBuffer(uint32_t frameIndex,
5459  const VkBufferCreateInfo& bufCreateInfo,
5460  const VmaAllocationCreateInfo& allocCreateInfo,
5461  VmaAllocation allocation);
5462  void RecordCreateImage(uint32_t frameIndex,
5463  const VkImageCreateInfo& imageCreateInfo,
5464  const VmaAllocationCreateInfo& allocCreateInfo,
5465  VmaAllocation allocation);
5466  void RecordDestroyBuffer(uint32_t frameIndex,
5467  VmaAllocation allocation);
5468  void RecordDestroyImage(uint32_t frameIndex,
5469  VmaAllocation allocation);
5470  void RecordTouchAllocation(uint32_t frameIndex,
5471  VmaAllocation allocation);
5472  void RecordGetAllocationInfo(uint32_t frameIndex,
5473  VmaAllocation allocation);
5474  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5475  VmaPool pool);
5476 
5477 private:
5478  struct CallParams
5479  {
5480  uint32_t threadId;
5481  double time;
5482  };
5483 
5484  class UserDataString
5485  {
5486  public:
5487  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5488  const char* GetString() const { return m_Str; }
5489 
5490  private:
5491  char m_PtrStr[17];
5492  const char* m_Str;
5493  };
5494 
5495  bool m_UseMutex;
5496  VmaRecordFlags m_Flags;
5497  FILE* m_File;
5498  VMA_MUTEX m_FileMutex;
5499  int64_t m_Freq;
5500  int64_t m_StartCounter;
5501 
5502  void GetBasicParams(CallParams& outParams);
5503  void Flush();
5504 };
5505 
5506 #endif // #if VMA_RECORDING_ENABLED
5507 
5508 // Main allocator object.
5509 struct VmaAllocator_T
5510 {
5511  VMA_CLASS_NO_COPY(VmaAllocator_T)
5512 public:
5513  bool m_UseMutex;
5514  bool m_UseKhrDedicatedAllocation;
5515  VkDevice m_hDevice;
5516  bool m_AllocationCallbacksSpecified;
5517  VkAllocationCallbacks m_AllocationCallbacks;
5518  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5519 
5520  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5521  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5522  VMA_MUTEX m_HeapSizeLimitMutex;
5523 
5524  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5525  VkPhysicalDeviceMemoryProperties m_MemProps;
5526 
5527  // Default pools.
5528  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5529 
5530  // Each vector is sorted by memory (handle value).
5531  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5532  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5533  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5534 
5535  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5536  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5537  ~VmaAllocator_T();
5538 
5539  const VkAllocationCallbacks* GetAllocationCallbacks() const
5540  {
5541  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5542  }
5543  const VmaVulkanFunctions& GetVulkanFunctions() const
5544  {
5545  return m_VulkanFunctions;
5546  }
5547 
5548  VkDeviceSize GetBufferImageGranularity() const
5549  {
5550  return VMA_MAX(
5551  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5552  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5553  }
5554 
5555  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5556  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5557 
5558  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5559  {
5560  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5561  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5562  }
5563  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5564  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5565  {
5566  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5567  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5568  }
5569  // Minimum alignment for all allocations in specific memory type.
5570  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5571  {
5572  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5573  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5574  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5575  }
5576 
5577  bool IsIntegratedGpu() const
5578  {
5579  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5580  }
5581 
5582 #if VMA_RECORDING_ENABLED
5583  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5584 #endif
5585 
5586  void GetBufferMemoryRequirements(
5587  VkBuffer hBuffer,
5588  VkMemoryRequirements& memReq,
5589  bool& requiresDedicatedAllocation,
5590  bool& prefersDedicatedAllocation) const;
5591  void GetImageMemoryRequirements(
5592  VkImage hImage,
5593  VkMemoryRequirements& memReq,
5594  bool& requiresDedicatedAllocation,
5595  bool& prefersDedicatedAllocation) const;
5596 
5597  // Main allocation function.
5598  VkResult AllocateMemory(
5599  const VkMemoryRequirements& vkMemReq,
5600  bool requiresDedicatedAllocation,
5601  bool prefersDedicatedAllocation,
5602  VkBuffer dedicatedBuffer,
5603  VkImage dedicatedImage,
5604  const VmaAllocationCreateInfo& createInfo,
5605  VmaSuballocationType suballocType,
5606  VmaAllocation* pAllocation);
5607 
5608  // Main deallocation function.
5609  void FreeMemory(const VmaAllocation allocation);
5610 
5611  void CalculateStats(VmaStats* pStats);
5612 
5613 #if VMA_STATS_STRING_ENABLED
5614  void PrintDetailedMap(class VmaJsonWriter& json);
5615 #endif
5616 
5617  VkResult Defragment(
5618  VmaAllocation* pAllocations,
5619  size_t allocationCount,
5620  VkBool32* pAllocationsChanged,
5621  const VmaDefragmentationInfo* pDefragmentationInfo,
5622  VmaDefragmentationStats* pDefragmentationStats);
5623 
5624  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5625  bool TouchAllocation(VmaAllocation hAllocation);
5626 
5627  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5628  void DestroyPool(VmaPool pool);
5629  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5630 
5631  void SetCurrentFrameIndex(uint32_t frameIndex);
5632  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5633 
5634  void MakePoolAllocationsLost(
5635  VmaPool hPool,
5636  size_t* pLostAllocationCount);
5637  VkResult CheckPoolCorruption(VmaPool hPool);
5638  VkResult CheckCorruption(uint32_t memoryTypeBits);
5639 
5640  void CreateLostAllocation(VmaAllocation* pAllocation);
5641 
5642  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5643  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5644 
5645  VkResult Map(VmaAllocation hAllocation, void** ppData);
5646  void Unmap(VmaAllocation hAllocation);
5647 
5648  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5649  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5650 
5651  void FlushOrInvalidateAllocation(
5652  VmaAllocation hAllocation,
5653  VkDeviceSize offset, VkDeviceSize size,
5654  VMA_CACHE_OPERATION op);
5655 
5656  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5657 
5658 private:
5659  VkDeviceSize m_PreferredLargeHeapBlockSize;
5660 
5661  VkPhysicalDevice m_PhysicalDevice;
5662  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5663 
5664  VMA_MUTEX m_PoolsMutex;
5665  // Protected by m_PoolsMutex. Sorted by pointer value.
5666  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5667  uint32_t m_NextPoolId;
5668 
5669  VmaVulkanFunctions m_VulkanFunctions;
5670 
5671 #if VMA_RECORDING_ENABLED
5672  VmaRecorder* m_pRecorder;
5673 #endif
5674 
5675  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5676 
5677  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5678 
5679  VkResult AllocateMemoryOfType(
5680  VkDeviceSize size,
5681  VkDeviceSize alignment,
5682  bool dedicatedAllocation,
5683  VkBuffer dedicatedBuffer,
5684  VkImage dedicatedImage,
5685  const VmaAllocationCreateInfo& createInfo,
5686  uint32_t memTypeIndex,
5687  VmaSuballocationType suballocType,
5688  VmaAllocation* pAllocation);
5689 
5690  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5691  VkResult AllocateDedicatedMemory(
5692  VkDeviceSize size,
5693  VmaSuballocationType suballocType,
5694  uint32_t memTypeIndex,
5695  bool map,
5696  bool isUserDataString,
5697  void* pUserData,
5698  VkBuffer dedicatedBuffer,
5699  VkImage dedicatedImage,
5700  VmaAllocation* pAllocation);
5701 
5702  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5703  void FreeDedicatedMemory(VmaAllocation allocation);
5704 };
5705 
5707 // Memory allocation #2 after VmaAllocator_T definition
5708 
5709 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5710 {
5711  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5712 }
5713 
5714 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5715 {
5716  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5717 }
5718 
5719 template<typename T>
5720 static T* VmaAllocate(VmaAllocator hAllocator)
5721 {
5722  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5723 }
5724 
5725 template<typename T>
5726 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5727 {
5728  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5729 }
5730 
5731 template<typename T>
5732 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5733 {
5734  if(ptr != VMA_NULL)
5735  {
5736  ptr->~T();
5737  VmaFree(hAllocator, ptr);
5738  }
5739 }
5740 
5741 template<typename T>
5742 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5743 {
5744  if(ptr != VMA_NULL)
5745  {
5746  for(size_t i = count; i--; )
5747  ptr[i].~T();
5748  VmaFree(hAllocator, ptr);
5749  }
5750 }
5751 
5753 // VmaStringBuilder
5754 
5755 #if VMA_STATS_STRING_ENABLED
5756 
5757 class VmaStringBuilder
5758 {
5759 public:
5760  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5761  size_t GetLength() const { return m_Data.size(); }
5762  const char* GetData() const { return m_Data.data(); }
5763 
5764  void Add(char ch) { m_Data.push_back(ch); }
5765  void Add(const char* pStr);
5766  void AddNewLine() { Add('\n'); }
5767  void AddNumber(uint32_t num);
5768  void AddNumber(uint64_t num);
5769  void AddPointer(const void* ptr);
5770 
5771 private:
5772  VmaVector< char, VmaStlAllocator<char> > m_Data;
5773 };
5774 
5775 void VmaStringBuilder::Add(const char* pStr)
5776 {
5777  const size_t strLen = strlen(pStr);
5778  if(strLen > 0)
5779  {
5780  const size_t oldCount = m_Data.size();
5781  m_Data.resize(oldCount + strLen);
5782  memcpy(m_Data.data() + oldCount, pStr, strLen);
5783  }
5784 }
5785 
5786 void VmaStringBuilder::AddNumber(uint32_t num)
5787 {
5788  char buf[11];
5789  VmaUint32ToStr(buf, sizeof(buf), num);
5790  Add(buf);
5791 }
5792 
5793 void VmaStringBuilder::AddNumber(uint64_t num)
5794 {
5795  char buf[21];
5796  VmaUint64ToStr(buf, sizeof(buf), num);
5797  Add(buf);
5798 }
5799 
5800 void VmaStringBuilder::AddPointer(const void* ptr)
5801 {
5802  char buf[21];
5803  VmaPtrToStr(buf, sizeof(buf), ptr);
5804  Add(buf);
5805 }
5806 
5807 #endif // #if VMA_STATS_STRING_ENABLED
5808 
5810 // VmaJsonWriter
5811 
5812 #if VMA_STATS_STRING_ENABLED
5813 
5814 class VmaJsonWriter
5815 {
5816  VMA_CLASS_NO_COPY(VmaJsonWriter)
5817 public:
5818  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5819  ~VmaJsonWriter();
5820 
5821  void BeginObject(bool singleLine = false);
5822  void EndObject();
5823 
5824  void BeginArray(bool singleLine = false);
5825  void EndArray();
5826 
5827  void WriteString(const char* pStr);
5828  void BeginString(const char* pStr = VMA_NULL);
5829  void ContinueString(const char* pStr);
5830  void ContinueString(uint32_t n);
5831  void ContinueString(uint64_t n);
5832  void ContinueString_Pointer(const void* ptr);
5833  void EndString(const char* pStr = VMA_NULL);
5834 
5835  void WriteNumber(uint32_t n);
5836  void WriteNumber(uint64_t n);
5837  void WriteBool(bool b);
5838  void WriteNull();
5839 
5840 private:
5841  static const char* const INDENT;
5842 
5843  enum COLLECTION_TYPE
5844  {
5845  COLLECTION_TYPE_OBJECT,
5846  COLLECTION_TYPE_ARRAY,
5847  };
5848  struct StackItem
5849  {
5850  COLLECTION_TYPE type;
5851  uint32_t valueCount;
5852  bool singleLineMode;
5853  };
5854 
5855  VmaStringBuilder& m_SB;
5856  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5857  bool m_InsideString;
5858 
5859  void BeginValue(bool isString);
5860  void WriteIndent(bool oneLess = false);
5861 };
5862 
5863 const char* const VmaJsonWriter::INDENT = " ";
5864 
5865 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5866  m_SB(sb),
5867  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5868  m_InsideString(false)
5869 {
5870 }
5871 
5872 VmaJsonWriter::~VmaJsonWriter()
5873 {
5874  VMA_ASSERT(!m_InsideString);
5875  VMA_ASSERT(m_Stack.empty());
5876 }
5877 
5878 void VmaJsonWriter::BeginObject(bool singleLine)
5879 {
5880  VMA_ASSERT(!m_InsideString);
5881 
5882  BeginValue(false);
5883  m_SB.Add('{');
5884 
5885  StackItem item;
5886  item.type = COLLECTION_TYPE_OBJECT;
5887  item.valueCount = 0;
5888  item.singleLineMode = singleLine;
5889  m_Stack.push_back(item);
5890 }
5891 
5892 void VmaJsonWriter::EndObject()
5893 {
5894  VMA_ASSERT(!m_InsideString);
5895 
5896  WriteIndent(true);
5897  m_SB.Add('}');
5898 
5899  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5900  m_Stack.pop_back();
5901 }
5902 
5903 void VmaJsonWriter::BeginArray(bool singleLine)
5904 {
5905  VMA_ASSERT(!m_InsideString);
5906 
5907  BeginValue(false);
5908  m_SB.Add('[');
5909 
5910  StackItem item;
5911  item.type = COLLECTION_TYPE_ARRAY;
5912  item.valueCount = 0;
5913  item.singleLineMode = singleLine;
5914  m_Stack.push_back(item);
5915 }
5916 
5917 void VmaJsonWriter::EndArray()
5918 {
5919  VMA_ASSERT(!m_InsideString);
5920 
5921  WriteIndent(true);
5922  m_SB.Add(']');
5923 
5924  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5925  m_Stack.pop_back();
5926 }
5927 
5928 void VmaJsonWriter::WriteString(const char* pStr)
5929 {
5930  BeginString(pStr);
5931  EndString();
5932 }
5933 
5934 void VmaJsonWriter::BeginString(const char* pStr)
5935 {
5936  VMA_ASSERT(!m_InsideString);
5937 
5938  BeginValue(true);
5939  m_SB.Add('"');
5940  m_InsideString = true;
5941  if(pStr != VMA_NULL && pStr[0] != '\0')
5942  {
5943  ContinueString(pStr);
5944  }
5945 }
5946 
5947 void VmaJsonWriter::ContinueString(const char* pStr)
5948 {
5949  VMA_ASSERT(m_InsideString);
5950 
5951  const size_t strLen = strlen(pStr);
5952  for(size_t i = 0; i < strLen; ++i)
5953  {
5954  char ch = pStr[i];
5955  if(ch == '\\')
5956  {
5957  m_SB.Add("\\\\");
5958  }
5959  else if(ch == '"')
5960  {
5961  m_SB.Add("\\\"");
5962  }
5963  else if(ch >= 32)
5964  {
5965  m_SB.Add(ch);
5966  }
5967  else switch(ch)
5968  {
5969  case '\b':
5970  m_SB.Add("\\b");
5971  break;
5972  case '\f':
5973  m_SB.Add("\\f");
5974  break;
5975  case '\n':
5976  m_SB.Add("\\n");
5977  break;
5978  case '\r':
5979  m_SB.Add("\\r");
5980  break;
5981  case '\t':
5982  m_SB.Add("\\t");
5983  break;
5984  default:
5985  VMA_ASSERT(0 && "Character not currently supported.");
5986  break;
5987  }
5988  }
5989 }
5990 
5991 void VmaJsonWriter::ContinueString(uint32_t n)
5992 {
5993  VMA_ASSERT(m_InsideString);
5994  m_SB.AddNumber(n);
5995 }
5996 
5997 void VmaJsonWriter::ContinueString(uint64_t n)
5998 {
5999  VMA_ASSERT(m_InsideString);
6000  m_SB.AddNumber(n);
6001 }
6002 
6003 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6004 {
6005  VMA_ASSERT(m_InsideString);
6006  m_SB.AddPointer(ptr);
6007 }
6008 
6009 void VmaJsonWriter::EndString(const char* pStr)
6010 {
6011  VMA_ASSERT(m_InsideString);
6012  if(pStr != VMA_NULL && pStr[0] != '\0')
6013  {
6014  ContinueString(pStr);
6015  }
6016  m_SB.Add('"');
6017  m_InsideString = false;
6018 }
6019 
6020 void VmaJsonWriter::WriteNumber(uint32_t n)
6021 {
6022  VMA_ASSERT(!m_InsideString);
6023  BeginValue(false);
6024  m_SB.AddNumber(n);
6025 }
6026 
6027 void VmaJsonWriter::WriteNumber(uint64_t n)
6028 {
6029  VMA_ASSERT(!m_InsideString);
6030  BeginValue(false);
6031  m_SB.AddNumber(n);
6032 }
6033 
6034 void VmaJsonWriter::WriteBool(bool b)
6035 {
6036  VMA_ASSERT(!m_InsideString);
6037  BeginValue(false);
6038  m_SB.Add(b ? "true" : "false");
6039 }
6040 
6041 void VmaJsonWriter::WriteNull()
6042 {
6043  VMA_ASSERT(!m_InsideString);
6044  BeginValue(false);
6045  m_SB.Add("null");
6046 }
6047 
6048 void VmaJsonWriter::BeginValue(bool isString)
6049 {
6050  if(!m_Stack.empty())
6051  {
6052  StackItem& currItem = m_Stack.back();
6053  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6054  currItem.valueCount % 2 == 0)
6055  {
6056  VMA_ASSERT(isString);
6057  }
6058 
6059  if(currItem.type == COLLECTION_TYPE_OBJECT &&
6060  currItem.valueCount % 2 != 0)
6061  {
6062  m_SB.Add(": ");
6063  }
6064  else if(currItem.valueCount > 0)
6065  {
6066  m_SB.Add(", ");
6067  WriteIndent();
6068  }
6069  else
6070  {
6071  WriteIndent();
6072  }
6073  ++currItem.valueCount;
6074  }
6075 }
6076 
6077 void VmaJsonWriter::WriteIndent(bool oneLess)
6078 {
6079  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
6080  {
6081  m_SB.AddNewLine();
6082 
6083  size_t count = m_Stack.size();
6084  if(count > 0 && oneLess)
6085  {
6086  --count;
6087  }
6088  for(size_t i = 0; i < count; ++i)
6089  {
6090  m_SB.Add(INDENT);
6091  }
6092  }
6093 }
6094 
6095 #endif // #if VMA_STATS_STRING_ENABLED
6096 
6098 
6099 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
6100 {
6101  if(IsUserDataString())
6102  {
6103  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
6104 
6105  FreeUserDataString(hAllocator);
6106 
6107  if(pUserData != VMA_NULL)
6108  {
6109  const char* const newStrSrc = (char*)pUserData;
6110  const size_t newStrLen = strlen(newStrSrc);
6111  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
6112  memcpy(newStrDst, newStrSrc, newStrLen + 1);
6113  m_pUserData = newStrDst;
6114  }
6115  }
6116  else
6117  {
6118  m_pUserData = pUserData;
6119  }
6120 }
6121 
6122 void VmaAllocation_T::ChangeBlockAllocation(
6123  VmaAllocator hAllocator,
6124  VmaDeviceMemoryBlock* block,
6125  VkDeviceSize offset)
6126 {
6127  VMA_ASSERT(block != VMA_NULL);
6128  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6129 
6130  // Move mapping reference counter from old block to new block.
6131  if(block != m_BlockAllocation.m_Block)
6132  {
6133  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
6134  if(IsPersistentMap())
6135  ++mapRefCount;
6136  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
6137  block->Map(hAllocator, mapRefCount, VMA_NULL);
6138  }
6139 
6140  m_BlockAllocation.m_Block = block;
6141  m_BlockAllocation.m_Offset = offset;
6142 }
6143 
6144 VkDeviceSize VmaAllocation_T::GetOffset() const
6145 {
6146  switch(m_Type)
6147  {
6148  case ALLOCATION_TYPE_BLOCK:
6149  return m_BlockAllocation.m_Offset;
6150  case ALLOCATION_TYPE_DEDICATED:
6151  return 0;
6152  default:
6153  VMA_ASSERT(0);
6154  return 0;
6155  }
6156 }
6157 
6158 VkDeviceMemory VmaAllocation_T::GetMemory() const
6159 {
6160  switch(m_Type)
6161  {
6162  case ALLOCATION_TYPE_BLOCK:
6163  return m_BlockAllocation.m_Block->GetDeviceMemory();
6164  case ALLOCATION_TYPE_DEDICATED:
6165  return m_DedicatedAllocation.m_hMemory;
6166  default:
6167  VMA_ASSERT(0);
6168  return VK_NULL_HANDLE;
6169  }
6170 }
6171 
6172 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
6173 {
6174  switch(m_Type)
6175  {
6176  case ALLOCATION_TYPE_BLOCK:
6177  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
6178  case ALLOCATION_TYPE_DEDICATED:
6179  return m_DedicatedAllocation.m_MemoryTypeIndex;
6180  default:
6181  VMA_ASSERT(0);
6182  return UINT32_MAX;
6183  }
6184 }
6185 
6186 void* VmaAllocation_T::GetMappedData() const
6187 {
6188  switch(m_Type)
6189  {
6190  case ALLOCATION_TYPE_BLOCK:
6191  if(m_MapCount != 0)
6192  {
6193  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
6194  VMA_ASSERT(pBlockData != VMA_NULL);
6195  return (char*)pBlockData + m_BlockAllocation.m_Offset;
6196  }
6197  else
6198  {
6199  return VMA_NULL;
6200  }
6201  break;
6202  case ALLOCATION_TYPE_DEDICATED:
6203  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6204  return m_DedicatedAllocation.m_pMappedData;
6205  default:
6206  VMA_ASSERT(0);
6207  return VMA_NULL;
6208  }
6209 }
6210 
6211 bool VmaAllocation_T::CanBecomeLost() const
6212 {
6213  switch(m_Type)
6214  {
6215  case ALLOCATION_TYPE_BLOCK:
6216  return m_BlockAllocation.m_CanBecomeLost;
6217  case ALLOCATION_TYPE_DEDICATED:
6218  return false;
6219  default:
6220  VMA_ASSERT(0);
6221  return false;
6222  }
6223 }
6224 
6225 VmaPool VmaAllocation_T::GetPool() const
6226 {
6227  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6228  return m_BlockAllocation.m_hPool;
6229 }
6230 
6231 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6232 {
6233  VMA_ASSERT(CanBecomeLost());
6234 
6235  /*
6236  Warning: This is a carefully designed algorithm.
6237  Do not modify unless you really know what you're doing :)
6238  */
6239  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6240  for(;;)
6241  {
6242  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6243  {
6244  VMA_ASSERT(0);
6245  return false;
6246  }
6247  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6248  {
6249  return false;
6250  }
6251  else // Last use time earlier than current time.
6252  {
6253  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6254  {
6255  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6256  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6257  return true;
6258  }
6259  }
6260  }
6261 }
6262 
6263 #if VMA_STATS_STRING_ENABLED
6264 
6265 // Correspond to values of enum VmaSuballocationType.
6266 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6267  "FREE",
6268  "UNKNOWN",
6269  "BUFFER",
6270  "IMAGE_UNKNOWN",
6271  "IMAGE_LINEAR",
6272  "IMAGE_OPTIMAL",
6273 };
6274 
6275 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6276 {
6277  json.WriteString("Type");
6278  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6279 
6280  json.WriteString("Size");
6281  json.WriteNumber(m_Size);
6282 
6283  if(m_pUserData != VMA_NULL)
6284  {
6285  json.WriteString("UserData");
6286  if(IsUserDataString())
6287  {
6288  json.WriteString((const char*)m_pUserData);
6289  }
6290  else
6291  {
6292  json.BeginString();
6293  json.ContinueString_Pointer(m_pUserData);
6294  json.EndString();
6295  }
6296  }
6297 
6298  json.WriteString("CreationFrameIndex");
6299  json.WriteNumber(m_CreationFrameIndex);
6300 
6301  json.WriteString("LastUseFrameIndex");
6302  json.WriteNumber(GetLastUseFrameIndex());
6303 
6304  if(m_BufferImageUsage != 0)
6305  {
6306  json.WriteString("Usage");
6307  json.WriteNumber(m_BufferImageUsage);
6308  }
6309 }
6310 
6311 #endif
6312 
6313 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6314 {
6315  VMA_ASSERT(IsUserDataString());
6316  if(m_pUserData != VMA_NULL)
6317  {
6318  char* const oldStr = (char*)m_pUserData;
6319  const size_t oldStrLen = strlen(oldStr);
6320  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6321  m_pUserData = VMA_NULL;
6322  }
6323 }
6324 
6325 void VmaAllocation_T::BlockAllocMap()
6326 {
6327  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6328 
6329  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6330  {
6331  ++m_MapCount;
6332  }
6333  else
6334  {
6335  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6336  }
6337 }
6338 
6339 void VmaAllocation_T::BlockAllocUnmap()
6340 {
6341  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6342 
6343  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6344  {
6345  --m_MapCount;
6346  }
6347  else
6348  {
6349  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6350  }
6351 }
6352 
6353 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6354 {
6355  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6356 
6357  if(m_MapCount != 0)
6358  {
6359  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6360  {
6361  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6362  *ppData = m_DedicatedAllocation.m_pMappedData;
6363  ++m_MapCount;
6364  return VK_SUCCESS;
6365  }
6366  else
6367  {
6368  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6369  return VK_ERROR_MEMORY_MAP_FAILED;
6370  }
6371  }
6372  else
6373  {
6374  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6375  hAllocator->m_hDevice,
6376  m_DedicatedAllocation.m_hMemory,
6377  0, // offset
6378  VK_WHOLE_SIZE,
6379  0, // flags
6380  ppData);
6381  if(result == VK_SUCCESS)
6382  {
6383  m_DedicatedAllocation.m_pMappedData = *ppData;
6384  m_MapCount = 1;
6385  }
6386  return result;
6387  }
6388 }
6389 
6390 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6391 {
6392  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6393 
6394  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6395  {
6396  --m_MapCount;
6397  if(m_MapCount == 0)
6398  {
6399  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6400  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6401  hAllocator->m_hDevice,
6402  m_DedicatedAllocation.m_hMemory);
6403  }
6404  }
6405  else
6406  {
6407  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6408  }
6409 }
6410 
6411 #if VMA_STATS_STRING_ENABLED
6412 
6413 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6414 {
6415  json.BeginObject();
6416 
6417  json.WriteString("Blocks");
6418  json.WriteNumber(stat.blockCount);
6419 
6420  json.WriteString("Allocations");
6421  json.WriteNumber(stat.allocationCount);
6422 
6423  json.WriteString("UnusedRanges");
6424  json.WriteNumber(stat.unusedRangeCount);
6425 
6426  json.WriteString("UsedBytes");
6427  json.WriteNumber(stat.usedBytes);
6428 
6429  json.WriteString("UnusedBytes");
6430  json.WriteNumber(stat.unusedBytes);
6431 
6432  if(stat.allocationCount > 1)
6433  {
6434  json.WriteString("AllocationSize");
6435  json.BeginObject(true);
6436  json.WriteString("Min");
6437  json.WriteNumber(stat.allocationSizeMin);
6438  json.WriteString("Avg");
6439  json.WriteNumber(stat.allocationSizeAvg);
6440  json.WriteString("Max");
6441  json.WriteNumber(stat.allocationSizeMax);
6442  json.EndObject();
6443  }
6444 
6445  if(stat.unusedRangeCount > 1)
6446  {
6447  json.WriteString("UnusedRangeSize");
6448  json.BeginObject(true);
6449  json.WriteString("Min");
6450  json.WriteNumber(stat.unusedRangeSizeMin);
6451  json.WriteString("Avg");
6452  json.WriteNumber(stat.unusedRangeSizeAvg);
6453  json.WriteString("Max");
6454  json.WriteNumber(stat.unusedRangeSizeMax);
6455  json.EndObject();
6456  }
6457 
6458  json.EndObject();
6459 }
6460 
6461 #endif // #if VMA_STATS_STRING_ENABLED
6462 
6463 struct VmaSuballocationItemSizeLess
6464 {
6465  bool operator()(
6466  const VmaSuballocationList::iterator lhs,
6467  const VmaSuballocationList::iterator rhs) const
6468  {
6469  return lhs->size < rhs->size;
6470  }
6471  bool operator()(
6472  const VmaSuballocationList::iterator lhs,
6473  VkDeviceSize rhsSize) const
6474  {
6475  return lhs->size < rhsSize;
6476  }
6477 };
6478 
6479 
6481 // class VmaBlockMetadata
6482 
6483 #if VMA_STATS_STRING_ENABLED
6484 
6485 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6486  VkDeviceSize unusedBytes,
6487  size_t allocationCount,
6488  size_t unusedRangeCount) const
6489 {
6490  json.BeginObject();
6491 
6492  json.WriteString("TotalBytes");
6493  json.WriteNumber(GetSize());
6494 
6495  json.WriteString("UnusedBytes");
6496  json.WriteNumber(unusedBytes);
6497 
6498  json.WriteString("Allocations");
6499  json.WriteNumber((uint64_t)allocationCount);
6500 
6501  json.WriteString("UnusedRanges");
6502  json.WriteNumber((uint64_t)unusedRangeCount);
6503 
6504  json.WriteString("Suballocations");
6505  json.BeginArray();
6506 }
6507 
6508 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6509  VkDeviceSize offset,
6510  VmaAllocation hAllocation) const
6511 {
6512  json.BeginObject(true);
6513 
6514  json.WriteString("Offset");
6515  json.WriteNumber(offset);
6516 
6517  hAllocation->PrintParameters(json);
6518 
6519  json.EndObject();
6520 }
6521 
6522 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6523  VkDeviceSize offset,
6524  VkDeviceSize size) const
6525 {
6526  json.BeginObject(true);
6527 
6528  json.WriteString("Offset");
6529  json.WriteNumber(offset);
6530 
6531  json.WriteString("Type");
6532  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6533 
6534  json.WriteString("Size");
6535  json.WriteNumber(size);
6536 
6537  json.EndObject();
6538 }
6539 
6540 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6541 {
6542  json.EndArray();
6543  json.EndObject();
6544 }
6545 
6546 #endif // #if VMA_STATS_STRING_ENABLED
6547 
6549 // class VmaBlockMetadata_Generic
6550 
6551 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6552  m_FreeCount(0),
6553  m_SumFreeSize(0),
6554  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6555  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6556 {
6557 }
6558 
6559 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6560 {
6561 }
6562 
6563 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6564 {
6565  VmaBlockMetadata::Init(size);
6566  m_FreeCount = 1;
6567  m_SumFreeSize = size;
6568 
6569  VmaSuballocation suballoc = {};
6570  suballoc.offset = 0;
6571  suballoc.size = size;
6572  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6573  suballoc.hAllocation = VK_NULL_HANDLE;
6574 
6575  m_Suballocations.push_back(suballoc);
6576  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6577  --suballocItem;
6578  m_FreeSuballocationsBySize.push_back(suballocItem);
6579 }
6580 
6581 bool VmaBlockMetadata_Generic::Validate() const
6582 {
6583  if(m_Suballocations.empty())
6584  {
6585  return false;
6586  }
6587 
6588  // Expected offset of new suballocation as calculated from previous ones.
6589  VkDeviceSize calculatedOffset = 0;
6590  // Expected number of free suballocations as calculated from traversing their list.
6591  uint32_t calculatedFreeCount = 0;
6592  // Expected sum size of free suballocations as calculated from traversing their list.
6593  VkDeviceSize calculatedSumFreeSize = 0;
6594  // Expected number of free suballocations that should be registered in
6595  // m_FreeSuballocationsBySize calculated from traversing their list.
6596  size_t freeSuballocationsToRegister = 0;
6597  // True if previous visited suballocation was free.
6598  bool prevFree = false;
6599 
6600  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6601  suballocItem != m_Suballocations.cend();
6602  ++suballocItem)
6603  {
6604  const VmaSuballocation& subAlloc = *suballocItem;
6605 
6606  // Actual offset of this suballocation doesn't match expected one.
6607  if(subAlloc.offset != calculatedOffset)
6608  {
6609  return false;
6610  }
6611 
6612  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6613  // Two adjacent free suballocations are invalid. They should be merged.
6614  if(prevFree && currFree)
6615  {
6616  return false;
6617  }
6618 
6619  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
6620  {
6621  return false;
6622  }
6623 
6624  if(currFree)
6625  {
6626  calculatedSumFreeSize += subAlloc.size;
6627  ++calculatedFreeCount;
6628  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6629  {
6630  ++freeSuballocationsToRegister;
6631  }
6632 
6633  // Margin required between allocations - every free space must be at least that large.
6634  if(subAlloc.size < VMA_DEBUG_MARGIN)
6635  {
6636  return false;
6637  }
6638  }
6639  else
6640  {
6641  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
6642  {
6643  return false;
6644  }
6645  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
6646  {
6647  return false;
6648  }
6649 
6650  // Margin required between allocations - previous allocation must be free.
6651  if(VMA_DEBUG_MARGIN > 0 && !prevFree)
6652  {
6653  return false;
6654  }
6655  }
6656 
6657  calculatedOffset += subAlloc.size;
6658  prevFree = currFree;
6659  }
6660 
6661  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6662  // match expected one.
6663  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
6664  {
6665  return false;
6666  }
6667 
6668  VkDeviceSize lastSize = 0;
6669  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6670  {
6671  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6672 
6673  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6674  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
6675  {
6676  return false;
6677  }
6678  // They must be sorted by size ascending.
6679  if(suballocItem->size < lastSize)
6680  {
6681  return false;
6682  }
6683 
6684  lastSize = suballocItem->size;
6685  }
6686 
6687  // Check if totals match calculacted values.
6688  if(!ValidateFreeSuballocationList() ||
6689  (calculatedOffset != GetSize()) ||
6690  (calculatedSumFreeSize != m_SumFreeSize) ||
6691  (calculatedFreeCount != m_FreeCount))
6692  {
6693  return false;
6694  }
6695 
6696  return true;
6697 }
6698 
6699 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6700 {
6701  if(!m_FreeSuballocationsBySize.empty())
6702  {
6703  return m_FreeSuballocationsBySize.back()->size;
6704  }
6705  else
6706  {
6707  return 0;
6708  }
6709 }
6710 
6711 bool VmaBlockMetadata_Generic::IsEmpty() const
6712 {
6713  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6714 }
6715 
6716 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6717 {
6718  outInfo.blockCount = 1;
6719 
6720  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6721  outInfo.allocationCount = rangeCount - m_FreeCount;
6722  outInfo.unusedRangeCount = m_FreeCount;
6723 
6724  outInfo.unusedBytes = m_SumFreeSize;
6725  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6726 
6727  outInfo.allocationSizeMin = UINT64_MAX;
6728  outInfo.allocationSizeMax = 0;
6729  outInfo.unusedRangeSizeMin = UINT64_MAX;
6730  outInfo.unusedRangeSizeMax = 0;
6731 
6732  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6733  suballocItem != m_Suballocations.cend();
6734  ++suballocItem)
6735  {
6736  const VmaSuballocation& suballoc = *suballocItem;
6737  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6738  {
6739  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6740  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6741  }
6742  else
6743  {
6744  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6745  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6746  }
6747  }
6748 }
6749 
6750 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6751 {
6752  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6753 
6754  inoutStats.size += GetSize();
6755  inoutStats.unusedSize += m_SumFreeSize;
6756  inoutStats.allocationCount += rangeCount - m_FreeCount;
6757  inoutStats.unusedRangeCount += m_FreeCount;
6758  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6759 }
6760 
6761 #if VMA_STATS_STRING_ENABLED
6762 
6763 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6764 {
6765  PrintDetailedMap_Begin(json,
6766  m_SumFreeSize, // unusedBytes
6767  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6768  m_FreeCount); // unusedRangeCount
6769 
6770  size_t i = 0;
6771  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6772  suballocItem != m_Suballocations.cend();
6773  ++suballocItem, ++i)
6774  {
6775  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6776  {
6777  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6778  }
6779  else
6780  {
6781  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6782  }
6783  }
6784 
6785  PrintDetailedMap_End(json);
6786 }
6787 
6788 #endif // #if VMA_STATS_STRING_ENABLED
6789 
6790 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6791  uint32_t currentFrameIndex,
6792  uint32_t frameInUseCount,
6793  VkDeviceSize bufferImageGranularity,
6794  VkDeviceSize allocSize,
6795  VkDeviceSize allocAlignment,
6796  bool upperAddress,
6797  VmaSuballocationType allocType,
6798  bool canMakeOtherLost,
6799  uint32_t strategy,
6800  VmaAllocationRequest* pAllocationRequest)
6801 {
6802  VMA_ASSERT(allocSize > 0);
6803  VMA_ASSERT(!upperAddress);
6804  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6805  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6806  VMA_HEAVY_ASSERT(Validate());
6807 
6808  // There is not enough total free space in this block to fullfill the request: Early return.
6809  if(canMakeOtherLost == false &&
6810  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6811  {
6812  return false;
6813  }
6814 
6815  // New algorithm, efficiently searching freeSuballocationsBySize.
6816  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6817  if(freeSuballocCount > 0)
6818  {
6820  {
6821  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6822  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6823  m_FreeSuballocationsBySize.data(),
6824  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6825  allocSize + 2 * VMA_DEBUG_MARGIN,
6826  VmaSuballocationItemSizeLess());
6827  size_t index = it - m_FreeSuballocationsBySize.data();
6828  for(; index < freeSuballocCount; ++index)
6829  {
6830  if(CheckAllocation(
6831  currentFrameIndex,
6832  frameInUseCount,
6833  bufferImageGranularity,
6834  allocSize,
6835  allocAlignment,
6836  allocType,
6837  m_FreeSuballocationsBySize[index],
6838  false, // canMakeOtherLost
6839  &pAllocationRequest->offset,
6840  &pAllocationRequest->itemsToMakeLostCount,
6841  &pAllocationRequest->sumFreeSize,
6842  &pAllocationRequest->sumItemSize))
6843  {
6844  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6845  return true;
6846  }
6847  }
6848  }
6849  else // WORST_FIT, FIRST_FIT
6850  {
6851  // Search staring from biggest suballocations.
6852  for(size_t index = freeSuballocCount; index--; )
6853  {
6854  if(CheckAllocation(
6855  currentFrameIndex,
6856  frameInUseCount,
6857  bufferImageGranularity,
6858  allocSize,
6859  allocAlignment,
6860  allocType,
6861  m_FreeSuballocationsBySize[index],
6862  false, // canMakeOtherLost
6863  &pAllocationRequest->offset,
6864  &pAllocationRequest->itemsToMakeLostCount,
6865  &pAllocationRequest->sumFreeSize,
6866  &pAllocationRequest->sumItemSize))
6867  {
6868  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6869  return true;
6870  }
6871  }
6872  }
6873  }
6874 
6875  if(canMakeOtherLost)
6876  {
6877  // Brute-force algorithm. TODO: Come up with something better.
6878 
6879  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6880  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6881 
6882  VmaAllocationRequest tmpAllocRequest = {};
6883  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6884  suballocIt != m_Suballocations.end();
6885  ++suballocIt)
6886  {
6887  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6888  suballocIt->hAllocation->CanBecomeLost())
6889  {
6890  if(CheckAllocation(
6891  currentFrameIndex,
6892  frameInUseCount,
6893  bufferImageGranularity,
6894  allocSize,
6895  allocAlignment,
6896  allocType,
6897  suballocIt,
6898  canMakeOtherLost,
6899  &tmpAllocRequest.offset,
6900  &tmpAllocRequest.itemsToMakeLostCount,
6901  &tmpAllocRequest.sumFreeSize,
6902  &tmpAllocRequest.sumItemSize))
6903  {
6904  tmpAllocRequest.item = suballocIt;
6905 
6906  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
6908  {
6909  *pAllocationRequest = tmpAllocRequest;
6910  }
6911  }
6912  }
6913  }
6914 
6915  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
6916  {
6917  return true;
6918  }
6919  }
6920 
6921  return false;
6922 }
6923 
6924 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
6925  uint32_t currentFrameIndex,
6926  uint32_t frameInUseCount,
6927  VmaAllocationRequest* pAllocationRequest)
6928 {
6929  while(pAllocationRequest->itemsToMakeLostCount > 0)
6930  {
6931  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
6932  {
6933  ++pAllocationRequest->item;
6934  }
6935  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6936  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
6937  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
6938  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6939  {
6940  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
6941  --pAllocationRequest->itemsToMakeLostCount;
6942  }
6943  else
6944  {
6945  return false;
6946  }
6947  }
6948 
6949  VMA_HEAVY_ASSERT(Validate());
6950  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6951  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
6952 
6953  return true;
6954 }
6955 
6956 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6957 {
6958  uint32_t lostAllocationCount = 0;
6959  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6960  it != m_Suballocations.end();
6961  ++it)
6962  {
6963  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
6964  it->hAllocation->CanBecomeLost() &&
6965  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6966  {
6967  it = FreeSuballocation(it);
6968  ++lostAllocationCount;
6969  }
6970  }
6971  return lostAllocationCount;
6972 }
6973 
6974 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
6975 {
6976  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6977  it != m_Suballocations.end();
6978  ++it)
6979  {
6980  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6981  {
6982  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
6983  {
6984  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
6985  return VK_ERROR_VALIDATION_FAILED_EXT;
6986  }
6987  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
6988  {
6989  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
6990  return VK_ERROR_VALIDATION_FAILED_EXT;
6991  }
6992  }
6993  }
6994 
6995  return VK_SUCCESS;
6996 }
6997 
6998 void VmaBlockMetadata_Generic::Alloc(
6999  const VmaAllocationRequest& request,
7000  VmaSuballocationType type,
7001  VkDeviceSize allocSize,
7002  bool upperAddress,
7003  VmaAllocation hAllocation)
7004 {
7005  VMA_ASSERT(!upperAddress);
7006  VMA_ASSERT(request.item != m_Suballocations.end());
7007  VmaSuballocation& suballoc = *request.item;
7008  // Given suballocation is a free block.
7009  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7010  // Given offset is inside this suballocation.
7011  VMA_ASSERT(request.offset >= suballoc.offset);
7012  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7013  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7014  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7015 
7016  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7017  // it to become used.
7018  UnregisterFreeSuballocation(request.item);
7019 
7020  suballoc.offset = request.offset;
7021  suballoc.size = allocSize;
7022  suballoc.type = type;
7023  suballoc.hAllocation = hAllocation;
7024 
7025  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7026  if(paddingEnd)
7027  {
7028  VmaSuballocation paddingSuballoc = {};
7029  paddingSuballoc.offset = request.offset + allocSize;
7030  paddingSuballoc.size = paddingEnd;
7031  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7032  VmaSuballocationList::iterator next = request.item;
7033  ++next;
7034  const VmaSuballocationList::iterator paddingEndItem =
7035  m_Suballocations.insert(next, paddingSuballoc);
7036  RegisterFreeSuballocation(paddingEndItem);
7037  }
7038 
7039  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7040  if(paddingBegin)
7041  {
7042  VmaSuballocation paddingSuballoc = {};
7043  paddingSuballoc.offset = request.offset - paddingBegin;
7044  paddingSuballoc.size = paddingBegin;
7045  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7046  const VmaSuballocationList::iterator paddingBeginItem =
7047  m_Suballocations.insert(request.item, paddingSuballoc);
7048  RegisterFreeSuballocation(paddingBeginItem);
7049  }
7050 
7051  // Update totals.
7052  m_FreeCount = m_FreeCount - 1;
7053  if(paddingBegin > 0)
7054  {
7055  ++m_FreeCount;
7056  }
7057  if(paddingEnd > 0)
7058  {
7059  ++m_FreeCount;
7060  }
7061  m_SumFreeSize -= allocSize;
7062 }
7063 
7064 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
7065 {
7066  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7067  suballocItem != m_Suballocations.end();
7068  ++suballocItem)
7069  {
7070  VmaSuballocation& suballoc = *suballocItem;
7071  if(suballoc.hAllocation == allocation)
7072  {
7073  FreeSuballocation(suballocItem);
7074  VMA_HEAVY_ASSERT(Validate());
7075  return;
7076  }
7077  }
7078  VMA_ASSERT(0 && "Not found!");
7079 }
7080 
7081 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
7082 {
7083  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
7084  suballocItem != m_Suballocations.end();
7085  ++suballocItem)
7086  {
7087  VmaSuballocation& suballoc = *suballocItem;
7088  if(suballoc.offset == offset)
7089  {
7090  FreeSuballocation(suballocItem);
7091  return;
7092  }
7093  }
7094  VMA_ASSERT(0 && "Not found!");
7095 }
7096 
7097 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7098 {
7099  VkDeviceSize lastSize = 0;
7100  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7101  {
7102  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7103 
7104  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7105  {
7106  VMA_ASSERT(0);
7107  return false;
7108  }
7109  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7110  {
7111  VMA_ASSERT(0);
7112  return false;
7113  }
7114  if(it->size < lastSize)
7115  {
7116  VMA_ASSERT(0);
7117  return false;
7118  }
7119 
7120  lastSize = it->size;
7121  }
7122  return true;
7123 }
7124 
7125 bool VmaBlockMetadata_Generic::CheckAllocation(
7126  uint32_t currentFrameIndex,
7127  uint32_t frameInUseCount,
7128  VkDeviceSize bufferImageGranularity,
7129  VkDeviceSize allocSize,
7130  VkDeviceSize allocAlignment,
7131  VmaSuballocationType allocType,
7132  VmaSuballocationList::const_iterator suballocItem,
7133  bool canMakeOtherLost,
7134  VkDeviceSize* pOffset,
7135  size_t* itemsToMakeLostCount,
7136  VkDeviceSize* pSumFreeSize,
7137  VkDeviceSize* pSumItemSize) const
7138 {
7139  VMA_ASSERT(allocSize > 0);
7140  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7141  VMA_ASSERT(suballocItem != m_Suballocations.cend());
7142  VMA_ASSERT(pOffset != VMA_NULL);
7143 
7144  *itemsToMakeLostCount = 0;
7145  *pSumFreeSize = 0;
7146  *pSumItemSize = 0;
7147 
7148  if(canMakeOtherLost)
7149  {
7150  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7151  {
7152  *pSumFreeSize = suballocItem->size;
7153  }
7154  else
7155  {
7156  if(suballocItem->hAllocation->CanBecomeLost() &&
7157  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7158  {
7159  ++*itemsToMakeLostCount;
7160  *pSumItemSize = suballocItem->size;
7161  }
7162  else
7163  {
7164  return false;
7165  }
7166  }
7167 
7168  // Remaining size is too small for this request: Early return.
7169  if(GetSize() - suballocItem->offset < allocSize)
7170  {
7171  return false;
7172  }
7173 
7174  // Start from offset equal to beginning of this suballocation.
7175  *pOffset = suballocItem->offset;
7176 
7177  // Apply VMA_DEBUG_MARGIN at the beginning.
7178  if(VMA_DEBUG_MARGIN > 0)
7179  {
7180  *pOffset += VMA_DEBUG_MARGIN;
7181  }
7182 
7183  // Apply alignment.
7184  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7185 
7186  // Check previous suballocations for BufferImageGranularity conflicts.
7187  // Make bigger alignment if necessary.
7188  if(bufferImageGranularity > 1)
7189  {
7190  bool bufferImageGranularityConflict = false;
7191  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7192  while(prevSuballocItem != m_Suballocations.cbegin())
7193  {
7194  --prevSuballocItem;
7195  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7196  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7197  {
7198  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7199  {
7200  bufferImageGranularityConflict = true;
7201  break;
7202  }
7203  }
7204  else
7205  // Already on previous page.
7206  break;
7207  }
7208  if(bufferImageGranularityConflict)
7209  {
7210  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7211  }
7212  }
7213 
7214  // Now that we have final *pOffset, check if we are past suballocItem.
7215  // If yes, return false - this function should be called for another suballocItem as starting point.
7216  if(*pOffset >= suballocItem->offset + suballocItem->size)
7217  {
7218  return false;
7219  }
7220 
7221  // Calculate padding at the beginning based on current offset.
7222  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7223 
7224  // Calculate required margin at the end.
7225  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7226 
7227  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7228  // Another early return check.
7229  if(suballocItem->offset + totalSize > GetSize())
7230  {
7231  return false;
7232  }
7233 
7234  // Advance lastSuballocItem until desired size is reached.
7235  // Update itemsToMakeLostCount.
7236  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7237  if(totalSize > suballocItem->size)
7238  {
7239  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7240  while(remainingSize > 0)
7241  {
7242  ++lastSuballocItem;
7243  if(lastSuballocItem == m_Suballocations.cend())
7244  {
7245  return false;
7246  }
7247  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7248  {
7249  *pSumFreeSize += lastSuballocItem->size;
7250  }
7251  else
7252  {
7253  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7254  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7255  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7256  {
7257  ++*itemsToMakeLostCount;
7258  *pSumItemSize += lastSuballocItem->size;
7259  }
7260  else
7261  {
7262  return false;
7263  }
7264  }
7265  remainingSize = (lastSuballocItem->size < remainingSize) ?
7266  remainingSize - lastSuballocItem->size : 0;
7267  }
7268  }
7269 
7270  // Check next suballocations for BufferImageGranularity conflicts.
7271  // If conflict exists, we must mark more allocations lost or fail.
7272  if(bufferImageGranularity > 1)
7273  {
7274  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7275  ++nextSuballocItem;
7276  while(nextSuballocItem != m_Suballocations.cend())
7277  {
7278  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7279  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7280  {
7281  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7282  {
7283  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7284  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7285  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7286  {
7287  ++*itemsToMakeLostCount;
7288  }
7289  else
7290  {
7291  return false;
7292  }
7293  }
7294  }
7295  else
7296  {
7297  // Already on next page.
7298  break;
7299  }
7300  ++nextSuballocItem;
7301  }
7302  }
7303  }
7304  else
7305  {
7306  const VmaSuballocation& suballoc = *suballocItem;
7307  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7308 
7309  *pSumFreeSize = suballoc.size;
7310 
7311  // Size of this suballocation is too small for this request: Early return.
7312  if(suballoc.size < allocSize)
7313  {
7314  return false;
7315  }
7316 
7317  // Start from offset equal to beginning of this suballocation.
7318  *pOffset = suballoc.offset;
7319 
7320  // Apply VMA_DEBUG_MARGIN at the beginning.
7321  if(VMA_DEBUG_MARGIN > 0)
7322  {
7323  *pOffset += VMA_DEBUG_MARGIN;
7324  }
7325 
7326  // Apply alignment.
7327  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7328 
7329  // Check previous suballocations for BufferImageGranularity conflicts.
7330  // Make bigger alignment if necessary.
7331  if(bufferImageGranularity > 1)
7332  {
7333  bool bufferImageGranularityConflict = false;
7334  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7335  while(prevSuballocItem != m_Suballocations.cbegin())
7336  {
7337  --prevSuballocItem;
7338  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7339  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7340  {
7341  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7342  {
7343  bufferImageGranularityConflict = true;
7344  break;
7345  }
7346  }
7347  else
7348  // Already on previous page.
7349  break;
7350  }
7351  if(bufferImageGranularityConflict)
7352  {
7353  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7354  }
7355  }
7356 
7357  // Calculate padding at the beginning based on current offset.
7358  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7359 
7360  // Calculate required margin at the end.
7361  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7362 
7363  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7364  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7365  {
7366  return false;
7367  }
7368 
7369  // Check next suballocations for BufferImageGranularity conflicts.
7370  // If conflict exists, allocation cannot be made here.
7371  if(bufferImageGranularity > 1)
7372  {
7373  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7374  ++nextSuballocItem;
7375  while(nextSuballocItem != m_Suballocations.cend())
7376  {
7377  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7378  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7379  {
7380  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7381  {
7382  return false;
7383  }
7384  }
7385  else
7386  {
7387  // Already on next page.
7388  break;
7389  }
7390  ++nextSuballocItem;
7391  }
7392  }
7393  }
7394 
7395  // All tests passed: Success. pOffset is already filled.
7396  return true;
7397 }
7398 
7399 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7400 {
7401  VMA_ASSERT(item != m_Suballocations.end());
7402  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7403 
7404  VmaSuballocationList::iterator nextItem = item;
7405  ++nextItem;
7406  VMA_ASSERT(nextItem != m_Suballocations.end());
7407  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7408 
7409  item->size += nextItem->size;
7410  --m_FreeCount;
7411  m_Suballocations.erase(nextItem);
7412 }
7413 
7414 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7415 {
7416  // Change this suballocation to be marked as free.
7417  VmaSuballocation& suballoc = *suballocItem;
7418  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7419  suballoc.hAllocation = VK_NULL_HANDLE;
7420 
7421  // Update totals.
7422  ++m_FreeCount;
7423  m_SumFreeSize += suballoc.size;
7424 
7425  // Merge with previous and/or next suballocation if it's also free.
7426  bool mergeWithNext = false;
7427  bool mergeWithPrev = false;
7428 
7429  VmaSuballocationList::iterator nextItem = suballocItem;
7430  ++nextItem;
7431  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7432  {
7433  mergeWithNext = true;
7434  }
7435 
7436  VmaSuballocationList::iterator prevItem = suballocItem;
7437  if(suballocItem != m_Suballocations.begin())
7438  {
7439  --prevItem;
7440  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7441  {
7442  mergeWithPrev = true;
7443  }
7444  }
7445 
7446  if(mergeWithNext)
7447  {
7448  UnregisterFreeSuballocation(nextItem);
7449  MergeFreeWithNext(suballocItem);
7450  }
7451 
7452  if(mergeWithPrev)
7453  {
7454  UnregisterFreeSuballocation(prevItem);
7455  MergeFreeWithNext(prevItem);
7456  RegisterFreeSuballocation(prevItem);
7457  return prevItem;
7458  }
7459  else
7460  {
7461  RegisterFreeSuballocation(suballocItem);
7462  return suballocItem;
7463  }
7464 }
7465 
7466 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7467 {
7468  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7469  VMA_ASSERT(item->size > 0);
7470 
7471  // You may want to enable this validation at the beginning or at the end of
7472  // this function, depending on what do you want to check.
7473  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7474 
7475  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7476  {
7477  if(m_FreeSuballocationsBySize.empty())
7478  {
7479  m_FreeSuballocationsBySize.push_back(item);
7480  }
7481  else
7482  {
7483  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7484  }
7485  }
7486 
7487  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7488 }
7489 
7490 
7491 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7492 {
7493  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7494  VMA_ASSERT(item->size > 0);
7495 
7496  // You may want to enable this validation at the beginning or at the end of
7497  // this function, depending on what do you want to check.
7498  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7499 
7500  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7501  {
7502  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7503  m_FreeSuballocationsBySize.data(),
7504  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7505  item,
7506  VmaSuballocationItemSizeLess());
7507  for(size_t index = it - m_FreeSuballocationsBySize.data();
7508  index < m_FreeSuballocationsBySize.size();
7509  ++index)
7510  {
7511  if(m_FreeSuballocationsBySize[index] == item)
7512  {
7513  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7514  return;
7515  }
7516  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7517  }
7518  VMA_ASSERT(0 && "Not found.");
7519  }
7520 
7521  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7522 }
7523 
7525 // class VmaBlockMetadata_Linear
7526 
7527 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7528  m_SumFreeSize(0),
7529  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7530  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7531  m_1stVectorIndex(0),
7532  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7533  m_1stNullItemsBeginCount(0),
7534  m_1stNullItemsMiddleCount(0),
7535  m_2ndNullItemsCount(0)
7536 {
7537 }
7538 
7539 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7540 {
7541 }
7542 
7543 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7544 {
7545  VmaBlockMetadata::Init(size);
7546  m_SumFreeSize = size;
7547 }
7548 
7549 bool VmaBlockMetadata_Linear::Validate() const
7550 {
7551  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7552  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7553 
7554  if(suballocations2nd.empty() != (m_2ndVectorMode == SECOND_VECTOR_EMPTY))
7555  {
7556  return false;
7557  }
7558  if(suballocations1st.empty() && !suballocations2nd.empty() &&
7559  m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7560  {
7561  return false;
7562  }
7563  if(!suballocations1st.empty())
7564  {
7565  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7566  if(suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
7567  {
7568  return false;
7569  }
7570  // Null item at the end should be just pop_back().
7571  if(suballocations1st.back().hAllocation == VK_NULL_HANDLE)
7572  {
7573  return false;
7574  }
7575  }
7576  if(!suballocations2nd.empty())
7577  {
7578  // Null item at the end should be just pop_back().
7579  if(suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
7580  {
7581  return false;
7582  }
7583  }
7584 
7585  if(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount > suballocations1st.size())
7586  {
7587  return false;
7588  }
7589  if(m_2ndNullItemsCount > suballocations2nd.size())
7590  {
7591  return false;
7592  }
7593 
7594  VkDeviceSize sumUsedSize = 0;
7595  const size_t suballoc1stCount = suballocations1st.size();
7596  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7597 
7598  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7599  {
7600  const size_t suballoc2ndCount = suballocations2nd.size();
7601  size_t nullItem2ndCount = 0;
7602  for(size_t i = 0; i < suballoc2ndCount; ++i)
7603  {
7604  const VmaSuballocation& suballoc = suballocations2nd[i];
7605  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7606 
7607  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7608  {
7609  return false;
7610  }
7611  if(suballoc.offset < offset)
7612  {
7613  return false;
7614  }
7615 
7616  if(!currFree)
7617  {
7618  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7619  {
7620  return false;
7621  }
7622  if(suballoc.hAllocation->GetSize() != suballoc.size)
7623  {
7624  return false;
7625  }
7626  sumUsedSize += suballoc.size;
7627  }
7628  else
7629  {
7630  ++nullItem2ndCount;
7631  }
7632 
7633  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7634  }
7635 
7636  if(nullItem2ndCount != m_2ndNullItemsCount)
7637  {
7638  return false;
7639  }
7640  }
7641 
7642  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7643  {
7644  const VmaSuballocation& suballoc = suballocations1st[i];
7645  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE ||
7646  suballoc.hAllocation != VK_NULL_HANDLE)
7647  {
7648  return false;
7649  }
7650  }
7651 
7652  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7653 
7654  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7655  {
7656  const VmaSuballocation& suballoc = suballocations1st[i];
7657  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7658 
7659  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7660  {
7661  return false;
7662  }
7663  if(suballoc.offset < offset)
7664  {
7665  return false;
7666  }
7667  if(i < m_1stNullItemsBeginCount && !currFree)
7668  {
7669  return false;
7670  }
7671 
7672  if(!currFree)
7673  {
7674  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7675  {
7676  return false;
7677  }
7678  if(suballoc.hAllocation->GetSize() != suballoc.size)
7679  {
7680  return false;
7681  }
7682  sumUsedSize += suballoc.size;
7683  }
7684  else
7685  {
7686  ++nullItem1stCount;
7687  }
7688 
7689  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7690  }
7691  if(nullItem1stCount != m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount)
7692  {
7693  return false;
7694  }
7695 
7696  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7697  {
7698  const size_t suballoc2ndCount = suballocations2nd.size();
7699  size_t nullItem2ndCount = 0;
7700  for(size_t i = suballoc2ndCount; i--; )
7701  {
7702  const VmaSuballocation& suballoc = suballocations2nd[i];
7703  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7704 
7705  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7706  {
7707  return false;
7708  }
7709  if(suballoc.offset < offset)
7710  {
7711  return false;
7712  }
7713 
7714  if(!currFree)
7715  {
7716  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7717  {
7718  return false;
7719  }
7720  if(suballoc.hAllocation->GetSize() != suballoc.size)
7721  {
7722  return false;
7723  }
7724  sumUsedSize += suballoc.size;
7725  }
7726  else
7727  {
7728  ++nullItem2ndCount;
7729  }
7730 
7731  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7732  }
7733 
7734  if(nullItem2ndCount != m_2ndNullItemsCount)
7735  {
7736  return false;
7737  }
7738  }
7739 
7740  if(offset > GetSize())
7741  {
7742  return false;
7743  }
7744  if(m_SumFreeSize != GetSize() - sumUsedSize)
7745  {
7746  return false;
7747  }
7748 
7749  return true;
7750 }
7751 
7752 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7753 {
7754  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7755  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7756 }
7757 
7758 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7759 {
7760  const VkDeviceSize size = GetSize();
7761 
7762  /*
7763  We don't consider gaps inside allocation vectors with freed allocations because
7764  they are not suitable for reuse in linear allocator. We consider only space that
7765  is available for new allocations.
7766  */
7767  if(IsEmpty())
7768  {
7769  return size;
7770  }
7771 
7772  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7773 
7774  switch(m_2ndVectorMode)
7775  {
7776  case SECOND_VECTOR_EMPTY:
7777  /*
7778  Available space is after end of 1st, as well as before beginning of 1st (which
7779  whould make it a ring buffer).
7780  */
7781  {
7782  const size_t suballocations1stCount = suballocations1st.size();
7783  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7784  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7785  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7786  return VMA_MAX(
7787  firstSuballoc.offset,
7788  size - (lastSuballoc.offset + lastSuballoc.size));
7789  }
7790  break;
7791 
7792  case SECOND_VECTOR_RING_BUFFER:
7793  /*
7794  Available space is only between end of 2nd and beginning of 1st.
7795  */
7796  {
7797  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7798  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7799  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7800  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7801  }
7802  break;
7803 
7804  case SECOND_VECTOR_DOUBLE_STACK:
7805  /*
7806  Available space is only between end of 1st and top of 2nd.
7807  */
7808  {
7809  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7810  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7811  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7812  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7813  }
7814  break;
7815 
7816  default:
7817  VMA_ASSERT(0);
7818  return 0;
7819  }
7820 }
7821 
7822 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7823 {
7824  const VkDeviceSize size = GetSize();
7825  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7826  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7827  const size_t suballoc1stCount = suballocations1st.size();
7828  const size_t suballoc2ndCount = suballocations2nd.size();
7829 
7830  outInfo.blockCount = 1;
7831  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7832  outInfo.unusedRangeCount = 0;
7833  outInfo.usedBytes = 0;
7834  outInfo.allocationSizeMin = UINT64_MAX;
7835  outInfo.allocationSizeMax = 0;
7836  outInfo.unusedRangeSizeMin = UINT64_MAX;
7837  outInfo.unusedRangeSizeMax = 0;
7838 
7839  VkDeviceSize lastOffset = 0;
7840 
7841  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7842  {
7843  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7844  size_t nextAlloc2ndIndex = 0;
7845  while(lastOffset < freeSpace2ndTo1stEnd)
7846  {
7847  // Find next non-null allocation or move nextAllocIndex to the end.
7848  while(nextAlloc2ndIndex < suballoc2ndCount &&
7849  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7850  {
7851  ++nextAlloc2ndIndex;
7852  }
7853 
7854  // Found non-null allocation.
7855  if(nextAlloc2ndIndex < suballoc2ndCount)
7856  {
7857  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7858 
7859  // 1. Process free space before this allocation.
7860  if(lastOffset < suballoc.offset)
7861  {
7862  // There is free space from lastOffset to suballoc.offset.
7863  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7864  ++outInfo.unusedRangeCount;
7865  outInfo.unusedBytes += unusedRangeSize;
7866  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7867  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7868  }
7869 
7870  // 2. Process this allocation.
7871  // There is allocation with suballoc.offset, suballoc.size.
7872  outInfo.usedBytes += suballoc.size;
7873  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7874  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7875 
7876  // 3. Prepare for next iteration.
7877  lastOffset = suballoc.offset + suballoc.size;
7878  ++nextAlloc2ndIndex;
7879  }
7880  // We are at the end.
7881  else
7882  {
7883  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7884  if(lastOffset < freeSpace2ndTo1stEnd)
7885  {
7886  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7887  ++outInfo.unusedRangeCount;
7888  outInfo.unusedBytes += unusedRangeSize;
7889  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7890  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7891  }
7892 
7893  // End of loop.
7894  lastOffset = freeSpace2ndTo1stEnd;
7895  }
7896  }
7897  }
7898 
7899  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7900  const VkDeviceSize freeSpace1stTo2ndEnd =
7901  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7902  while(lastOffset < freeSpace1stTo2ndEnd)
7903  {
7904  // Find next non-null allocation or move nextAllocIndex to the end.
7905  while(nextAlloc1stIndex < suballoc1stCount &&
7906  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7907  {
7908  ++nextAlloc1stIndex;
7909  }
7910 
7911  // Found non-null allocation.
7912  if(nextAlloc1stIndex < suballoc1stCount)
7913  {
7914  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7915 
7916  // 1. Process free space before this allocation.
7917  if(lastOffset < suballoc.offset)
7918  {
7919  // There is free space from lastOffset to suballoc.offset.
7920  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7921  ++outInfo.unusedRangeCount;
7922  outInfo.unusedBytes += unusedRangeSize;
7923  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7924  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7925  }
7926 
7927  // 2. Process this allocation.
7928  // There is allocation with suballoc.offset, suballoc.size.
7929  outInfo.usedBytes += suballoc.size;
7930  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7931  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7932 
7933  // 3. Prepare for next iteration.
7934  lastOffset = suballoc.offset + suballoc.size;
7935  ++nextAlloc1stIndex;
7936  }
7937  // We are at the end.
7938  else
7939  {
7940  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7941  if(lastOffset < freeSpace1stTo2ndEnd)
7942  {
7943  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7944  ++outInfo.unusedRangeCount;
7945  outInfo.unusedBytes += unusedRangeSize;
7946  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7947  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7948  }
7949 
7950  // End of loop.
7951  lastOffset = freeSpace1stTo2ndEnd;
7952  }
7953  }
7954 
7955  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7956  {
7957  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7958  while(lastOffset < size)
7959  {
7960  // Find next non-null allocation or move nextAllocIndex to the end.
7961  while(nextAlloc2ndIndex != SIZE_MAX &&
7962  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7963  {
7964  --nextAlloc2ndIndex;
7965  }
7966 
7967  // Found non-null allocation.
7968  if(nextAlloc2ndIndex != SIZE_MAX)
7969  {
7970  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7971 
7972  // 1. Process free space before this allocation.
7973  if(lastOffset < suballoc.offset)
7974  {
7975  // There is free space from lastOffset to suballoc.offset.
7976  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7977  ++outInfo.unusedRangeCount;
7978  outInfo.unusedBytes += unusedRangeSize;
7979  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7980  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7981  }
7982 
7983  // 2. Process this allocation.
7984  // There is allocation with suballoc.offset, suballoc.size.
7985  outInfo.usedBytes += suballoc.size;
7986  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7987  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7988 
7989  // 3. Prepare for next iteration.
7990  lastOffset = suballoc.offset + suballoc.size;
7991  --nextAlloc2ndIndex;
7992  }
7993  // We are at the end.
7994  else
7995  {
7996  // There is free space from lastOffset to size.
7997  if(lastOffset < size)
7998  {
7999  const VkDeviceSize unusedRangeSize = size - lastOffset;
8000  ++outInfo.unusedRangeCount;
8001  outInfo.unusedBytes += unusedRangeSize;
8002  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8003  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8004  }
8005 
8006  // End of loop.
8007  lastOffset = size;
8008  }
8009  }
8010  }
8011 
8012  outInfo.unusedBytes = size - outInfo.usedBytes;
8013 }
8014 
8015 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
8016 {
8017  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8018  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8019  const VkDeviceSize size = GetSize();
8020  const size_t suballoc1stCount = suballocations1st.size();
8021  const size_t suballoc2ndCount = suballocations2nd.size();
8022 
8023  inoutStats.size += size;
8024 
8025  VkDeviceSize lastOffset = 0;
8026 
8027  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8028  {
8029  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8030  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8031  while(lastOffset < freeSpace2ndTo1stEnd)
8032  {
8033  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8034  while(nextAlloc2ndIndex < suballoc2ndCount &&
8035  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8036  {
8037  ++nextAlloc2ndIndex;
8038  }
8039 
8040  // Found non-null allocation.
8041  if(nextAlloc2ndIndex < suballoc2ndCount)
8042  {
8043  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8044 
8045  // 1. Process free space before this allocation.
8046  if(lastOffset < suballoc.offset)
8047  {
8048  // There is free space from lastOffset to suballoc.offset.
8049  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8050  inoutStats.unusedSize += unusedRangeSize;
8051  ++inoutStats.unusedRangeCount;
8052  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8053  }
8054 
8055  // 2. Process this allocation.
8056  // There is allocation with suballoc.offset, suballoc.size.
8057  ++inoutStats.allocationCount;
8058 
8059  // 3. Prepare for next iteration.
8060  lastOffset = suballoc.offset + suballoc.size;
8061  ++nextAlloc2ndIndex;
8062  }
8063  // We are at the end.
8064  else
8065  {
8066  if(lastOffset < freeSpace2ndTo1stEnd)
8067  {
8068  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8069  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8070  inoutStats.unusedSize += unusedRangeSize;
8071  ++inoutStats.unusedRangeCount;
8072  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8073  }
8074 
8075  // End of loop.
8076  lastOffset = freeSpace2ndTo1stEnd;
8077  }
8078  }
8079  }
8080 
8081  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8082  const VkDeviceSize freeSpace1stTo2ndEnd =
8083  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8084  while(lastOffset < freeSpace1stTo2ndEnd)
8085  {
8086  // Find next non-null allocation or move nextAllocIndex to the end.
8087  while(nextAlloc1stIndex < suballoc1stCount &&
8088  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8089  {
8090  ++nextAlloc1stIndex;
8091  }
8092 
8093  // Found non-null allocation.
8094  if(nextAlloc1stIndex < suballoc1stCount)
8095  {
8096  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8097 
8098  // 1. Process free space before this allocation.
8099  if(lastOffset < suballoc.offset)
8100  {
8101  // There is free space from lastOffset to suballoc.offset.
8102  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8103  inoutStats.unusedSize += unusedRangeSize;
8104  ++inoutStats.unusedRangeCount;
8105  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8106  }
8107 
8108  // 2. Process this allocation.
8109  // There is allocation with suballoc.offset, suballoc.size.
8110  ++inoutStats.allocationCount;
8111 
8112  // 3. Prepare for next iteration.
8113  lastOffset = suballoc.offset + suballoc.size;
8114  ++nextAlloc1stIndex;
8115  }
8116  // We are at the end.
8117  else
8118  {
8119  if(lastOffset < freeSpace1stTo2ndEnd)
8120  {
8121  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8122  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8123  inoutStats.unusedSize += unusedRangeSize;
8124  ++inoutStats.unusedRangeCount;
8125  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8126  }
8127 
8128  // End of loop.
8129  lastOffset = freeSpace1stTo2ndEnd;
8130  }
8131  }
8132 
8133  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8134  {
8135  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8136  while(lastOffset < size)
8137  {
8138  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8139  while(nextAlloc2ndIndex != SIZE_MAX &&
8140  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8141  {
8142  --nextAlloc2ndIndex;
8143  }
8144 
8145  // Found non-null allocation.
8146  if(nextAlloc2ndIndex != SIZE_MAX)
8147  {
8148  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8149 
8150  // 1. Process free space before this allocation.
8151  if(lastOffset < suballoc.offset)
8152  {
8153  // There is free space from lastOffset to suballoc.offset.
8154  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8155  inoutStats.unusedSize += unusedRangeSize;
8156  ++inoutStats.unusedRangeCount;
8157  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8158  }
8159 
8160  // 2. Process this allocation.
8161  // There is allocation with suballoc.offset, suballoc.size.
8162  ++inoutStats.allocationCount;
8163 
8164  // 3. Prepare for next iteration.
8165  lastOffset = suballoc.offset + suballoc.size;
8166  --nextAlloc2ndIndex;
8167  }
8168  // We are at the end.
8169  else
8170  {
8171  if(lastOffset < size)
8172  {
8173  // There is free space from lastOffset to size.
8174  const VkDeviceSize unusedRangeSize = size - lastOffset;
8175  inoutStats.unusedSize += unusedRangeSize;
8176  ++inoutStats.unusedRangeCount;
8177  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
8178  }
8179 
8180  // End of loop.
8181  lastOffset = size;
8182  }
8183  }
8184  }
8185 }
8186 
8187 #if VMA_STATS_STRING_ENABLED
8188 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8189 {
8190  const VkDeviceSize size = GetSize();
8191  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8192  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8193  const size_t suballoc1stCount = suballocations1st.size();
8194  const size_t suballoc2ndCount = suballocations2nd.size();
8195 
8196  // FIRST PASS
8197 
8198  size_t unusedRangeCount = 0;
8199  VkDeviceSize usedBytes = 0;
8200 
8201  VkDeviceSize lastOffset = 0;
8202 
8203  size_t alloc2ndCount = 0;
8204  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8205  {
8206  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8207  size_t nextAlloc2ndIndex = 0;
8208  while(lastOffset < freeSpace2ndTo1stEnd)
8209  {
8210  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8211  while(nextAlloc2ndIndex < suballoc2ndCount &&
8212  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8213  {
8214  ++nextAlloc2ndIndex;
8215  }
8216 
8217  // Found non-null allocation.
8218  if(nextAlloc2ndIndex < suballoc2ndCount)
8219  {
8220  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8221 
8222  // 1. Process free space before this allocation.
8223  if(lastOffset < suballoc.offset)
8224  {
8225  // There is free space from lastOffset to suballoc.offset.
8226  ++unusedRangeCount;
8227  }
8228 
8229  // 2. Process this allocation.
8230  // There is allocation with suballoc.offset, suballoc.size.
8231  ++alloc2ndCount;
8232  usedBytes += suballoc.size;
8233 
8234  // 3. Prepare for next iteration.
8235  lastOffset = suballoc.offset + suballoc.size;
8236  ++nextAlloc2ndIndex;
8237  }
8238  // We are at the end.
8239  else
8240  {
8241  if(lastOffset < freeSpace2ndTo1stEnd)
8242  {
8243  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8244  ++unusedRangeCount;
8245  }
8246 
8247  // End of loop.
8248  lastOffset = freeSpace2ndTo1stEnd;
8249  }
8250  }
8251  }
8252 
8253  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8254  size_t alloc1stCount = 0;
8255  const VkDeviceSize freeSpace1stTo2ndEnd =
8256  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8257  while(lastOffset < freeSpace1stTo2ndEnd)
8258  {
8259  // Find next non-null allocation or move nextAllocIndex to the end.
8260  while(nextAlloc1stIndex < suballoc1stCount &&
8261  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8262  {
8263  ++nextAlloc1stIndex;
8264  }
8265 
8266  // Found non-null allocation.
8267  if(nextAlloc1stIndex < suballoc1stCount)
8268  {
8269  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8270 
8271  // 1. Process free space before this allocation.
8272  if(lastOffset < suballoc.offset)
8273  {
8274  // There is free space from lastOffset to suballoc.offset.
8275  ++unusedRangeCount;
8276  }
8277 
8278  // 2. Process this allocation.
8279  // There is allocation with suballoc.offset, suballoc.size.
8280  ++alloc1stCount;
8281  usedBytes += suballoc.size;
8282 
8283  // 3. Prepare for next iteration.
8284  lastOffset = suballoc.offset + suballoc.size;
8285  ++nextAlloc1stIndex;
8286  }
8287  // We are at the end.
8288  else
8289  {
8290  if(lastOffset < size)
8291  {
8292  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8293  ++unusedRangeCount;
8294  }
8295 
8296  // End of loop.
8297  lastOffset = freeSpace1stTo2ndEnd;
8298  }
8299  }
8300 
8301  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8302  {
8303  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8304  while(lastOffset < size)
8305  {
8306  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8307  while(nextAlloc2ndIndex != SIZE_MAX &&
8308  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8309  {
8310  --nextAlloc2ndIndex;
8311  }
8312 
8313  // Found non-null allocation.
8314  if(nextAlloc2ndIndex != SIZE_MAX)
8315  {
8316  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8317 
8318  // 1. Process free space before this allocation.
8319  if(lastOffset < suballoc.offset)
8320  {
8321  // There is free space from lastOffset to suballoc.offset.
8322  ++unusedRangeCount;
8323  }
8324 
8325  // 2. Process this allocation.
8326  // There is allocation with suballoc.offset, suballoc.size.
8327  ++alloc2ndCount;
8328  usedBytes += suballoc.size;
8329 
8330  // 3. Prepare for next iteration.
8331  lastOffset = suballoc.offset + suballoc.size;
8332  --nextAlloc2ndIndex;
8333  }
8334  // We are at the end.
8335  else
8336  {
8337  if(lastOffset < size)
8338  {
8339  // There is free space from lastOffset to size.
8340  ++unusedRangeCount;
8341  }
8342 
8343  // End of loop.
8344  lastOffset = size;
8345  }
8346  }
8347  }
8348 
8349  const VkDeviceSize unusedBytes = size - usedBytes;
8350  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8351 
8352  // SECOND PASS
8353  lastOffset = 0;
8354 
8355  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8356  {
8357  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8358  size_t nextAlloc2ndIndex = 0;
8359  while(lastOffset < freeSpace2ndTo1stEnd)
8360  {
8361  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8362  while(nextAlloc2ndIndex < suballoc2ndCount &&
8363  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8364  {
8365  ++nextAlloc2ndIndex;
8366  }
8367 
8368  // Found non-null allocation.
8369  if(nextAlloc2ndIndex < suballoc2ndCount)
8370  {
8371  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8372 
8373  // 1. Process free space before this allocation.
8374  if(lastOffset < suballoc.offset)
8375  {
8376  // There is free space from lastOffset to suballoc.offset.
8377  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8378  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8379  }
8380 
8381  // 2. Process this allocation.
8382  // There is allocation with suballoc.offset, suballoc.size.
8383  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8384 
8385  // 3. Prepare for next iteration.
8386  lastOffset = suballoc.offset + suballoc.size;
8387  ++nextAlloc2ndIndex;
8388  }
8389  // We are at the end.
8390  else
8391  {
8392  if(lastOffset < freeSpace2ndTo1stEnd)
8393  {
8394  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8395  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8396  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8397  }
8398 
8399  // End of loop.
8400  lastOffset = freeSpace2ndTo1stEnd;
8401  }
8402  }
8403  }
8404 
8405  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8406  while(lastOffset < freeSpace1stTo2ndEnd)
8407  {
8408  // Find next non-null allocation or move nextAllocIndex to the end.
8409  while(nextAlloc1stIndex < suballoc1stCount &&
8410  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8411  {
8412  ++nextAlloc1stIndex;
8413  }
8414 
8415  // Found non-null allocation.
8416  if(nextAlloc1stIndex < suballoc1stCount)
8417  {
8418  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8419 
8420  // 1. Process free space before this allocation.
8421  if(lastOffset < suballoc.offset)
8422  {
8423  // There is free space from lastOffset to suballoc.offset.
8424  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8425  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8426  }
8427 
8428  // 2. Process this allocation.
8429  // There is allocation with suballoc.offset, suballoc.size.
8430  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8431 
8432  // 3. Prepare for next iteration.
8433  lastOffset = suballoc.offset + suballoc.size;
8434  ++nextAlloc1stIndex;
8435  }
8436  // We are at the end.
8437  else
8438  {
8439  if(lastOffset < freeSpace1stTo2ndEnd)
8440  {
8441  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8442  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8443  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8444  }
8445 
8446  // End of loop.
8447  lastOffset = freeSpace1stTo2ndEnd;
8448  }
8449  }
8450 
8451  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8452  {
8453  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8454  while(lastOffset < size)
8455  {
8456  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8457  while(nextAlloc2ndIndex != SIZE_MAX &&
8458  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8459  {
8460  --nextAlloc2ndIndex;
8461  }
8462 
8463  // Found non-null allocation.
8464  if(nextAlloc2ndIndex != SIZE_MAX)
8465  {
8466  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8467 
8468  // 1. Process free space before this allocation.
8469  if(lastOffset < suballoc.offset)
8470  {
8471  // There is free space from lastOffset to suballoc.offset.
8472  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8473  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8474  }
8475 
8476  // 2. Process this allocation.
8477  // There is allocation with suballoc.offset, suballoc.size.
8478  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8479 
8480  // 3. Prepare for next iteration.
8481  lastOffset = suballoc.offset + suballoc.size;
8482  --nextAlloc2ndIndex;
8483  }
8484  // We are at the end.
8485  else
8486  {
8487  if(lastOffset < size)
8488  {
8489  // There is free space from lastOffset to size.
8490  const VkDeviceSize unusedRangeSize = size - lastOffset;
8491  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8492  }
8493 
8494  // End of loop.
8495  lastOffset = size;
8496  }
8497  }
8498  }
8499 
8500  PrintDetailedMap_End(json);
8501 }
8502 #endif // #if VMA_STATS_STRING_ENABLED
8503 
8504 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8505  uint32_t currentFrameIndex,
8506  uint32_t frameInUseCount,
8507  VkDeviceSize bufferImageGranularity,
8508  VkDeviceSize allocSize,
8509  VkDeviceSize allocAlignment,
8510  bool upperAddress,
8511  VmaSuballocationType allocType,
8512  bool canMakeOtherLost,
8513  uint32_t strategy,
8514  VmaAllocationRequest* pAllocationRequest)
8515 {
8516  VMA_ASSERT(allocSize > 0);
8517  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8518  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8519  VMA_HEAVY_ASSERT(Validate());
8520 
8521  const VkDeviceSize size = GetSize();
8522  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8523  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8524 
8525  if(upperAddress)
8526  {
8527  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8528  {
8529  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8530  return false;
8531  }
8532 
8533  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8534  if(allocSize > size)
8535  {
8536  return false;
8537  }
8538  VkDeviceSize resultBaseOffset = size - allocSize;
8539  if(!suballocations2nd.empty())
8540  {
8541  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8542  resultBaseOffset = lastSuballoc.offset - allocSize;
8543  if(allocSize > lastSuballoc.offset)
8544  {
8545  return false;
8546  }
8547  }
8548 
8549  // Start from offset equal to end of free space.
8550  VkDeviceSize resultOffset = resultBaseOffset;
8551 
8552  // Apply VMA_DEBUG_MARGIN at the end.
8553  if(VMA_DEBUG_MARGIN > 0)
8554  {
8555  if(resultOffset < VMA_DEBUG_MARGIN)
8556  {
8557  return false;
8558  }
8559  resultOffset -= VMA_DEBUG_MARGIN;
8560  }
8561 
8562  // Apply alignment.
8563  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8564 
8565  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8566  // Make bigger alignment if necessary.
8567  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8568  {
8569  bool bufferImageGranularityConflict = false;
8570  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8571  {
8572  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8573  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8574  {
8575  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8576  {
8577  bufferImageGranularityConflict = true;
8578  break;
8579  }
8580  }
8581  else
8582  // Already on previous page.
8583  break;
8584  }
8585  if(bufferImageGranularityConflict)
8586  {
8587  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8588  }
8589  }
8590 
8591  // There is enough free space.
8592  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8593  suballocations1st.back().offset + suballocations1st.back().size :
8594  0;
8595  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8596  {
8597  // Check previous suballocations for BufferImageGranularity conflicts.
8598  // If conflict exists, allocation cannot be made here.
8599  if(bufferImageGranularity > 1)
8600  {
8601  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8602  {
8603  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8604  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8605  {
8606  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8607  {
8608  return false;
8609  }
8610  }
8611  else
8612  {
8613  // Already on next page.
8614  break;
8615  }
8616  }
8617  }
8618 
8619  // All tests passed: Success.
8620  pAllocationRequest->offset = resultOffset;
8621  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8622  pAllocationRequest->sumItemSize = 0;
8623  // pAllocationRequest->item unused.
8624  pAllocationRequest->itemsToMakeLostCount = 0;
8625  return true;
8626  }
8627  }
8628  else // !upperAddress
8629  {
8630  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8631  {
8632  // Try to allocate at the end of 1st vector.
8633 
8634  VkDeviceSize resultBaseOffset = 0;
8635  if(!suballocations1st.empty())
8636  {
8637  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8638  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8639  }
8640 
8641  // Start from offset equal to beginning of free space.
8642  VkDeviceSize resultOffset = resultBaseOffset;
8643 
8644  // Apply VMA_DEBUG_MARGIN at the beginning.
8645  if(VMA_DEBUG_MARGIN > 0)
8646  {
8647  resultOffset += VMA_DEBUG_MARGIN;
8648  }
8649 
8650  // Apply alignment.
8651  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8652 
8653  // Check previous suballocations for BufferImageGranularity conflicts.
8654  // Make bigger alignment if necessary.
8655  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8656  {
8657  bool bufferImageGranularityConflict = false;
8658  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8659  {
8660  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8661  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8662  {
8663  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8664  {
8665  bufferImageGranularityConflict = true;
8666  break;
8667  }
8668  }
8669  else
8670  // Already on previous page.
8671  break;
8672  }
8673  if(bufferImageGranularityConflict)
8674  {
8675  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8676  }
8677  }
8678 
8679  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8680  suballocations2nd.back().offset : size;
8681 
8682  // There is enough free space at the end after alignment.
8683  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8684  {
8685  // Check next suballocations for BufferImageGranularity conflicts.
8686  // If conflict exists, allocation cannot be made here.
8687  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8688  {
8689  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8690  {
8691  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8692  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8693  {
8694  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8695  {
8696  return false;
8697  }
8698  }
8699  else
8700  {
8701  // Already on previous page.
8702  break;
8703  }
8704  }
8705  }
8706 
8707  // All tests passed: Success.
8708  pAllocationRequest->offset = resultOffset;
8709  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8710  pAllocationRequest->sumItemSize = 0;
8711  // pAllocationRequest->item unused.
8712  pAllocationRequest->itemsToMakeLostCount = 0;
8713  return true;
8714  }
8715  }
8716 
8717  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8718  // beginning of 1st vector as the end of free space.
8719  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8720  {
8721  VMA_ASSERT(!suballocations1st.empty());
8722 
8723  VkDeviceSize resultBaseOffset = 0;
8724  if(!suballocations2nd.empty())
8725  {
8726  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8727  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8728  }
8729 
8730  // Start from offset equal to beginning of free space.
8731  VkDeviceSize resultOffset = resultBaseOffset;
8732 
8733  // Apply VMA_DEBUG_MARGIN at the beginning.
8734  if(VMA_DEBUG_MARGIN > 0)
8735  {
8736  resultOffset += VMA_DEBUG_MARGIN;
8737  }
8738 
8739  // Apply alignment.
8740  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8741 
8742  // Check previous suballocations for BufferImageGranularity conflicts.
8743  // Make bigger alignment if necessary.
8744  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8745  {
8746  bool bufferImageGranularityConflict = false;
8747  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8748  {
8749  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8750  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8751  {
8752  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8753  {
8754  bufferImageGranularityConflict = true;
8755  break;
8756  }
8757  }
8758  else
8759  // Already on previous page.
8760  break;
8761  }
8762  if(bufferImageGranularityConflict)
8763  {
8764  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8765  }
8766  }
8767 
8768  pAllocationRequest->itemsToMakeLostCount = 0;
8769  pAllocationRequest->sumItemSize = 0;
8770  size_t index1st = m_1stNullItemsBeginCount;
8771 
8772  if(canMakeOtherLost)
8773  {
8774  while(index1st < suballocations1st.size() &&
8775  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8776  {
8777  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8778  const VmaSuballocation& suballoc = suballocations1st[index1st];
8779  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8780  {
8781  // No problem.
8782  }
8783  else
8784  {
8785  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8786  if(suballoc.hAllocation->CanBecomeLost() &&
8787  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8788  {
8789  ++pAllocationRequest->itemsToMakeLostCount;
8790  pAllocationRequest->sumItemSize += suballoc.size;
8791  }
8792  else
8793  {
8794  return false;
8795  }
8796  }
8797  ++index1st;
8798  }
8799 
8800  // Check next suballocations for BufferImageGranularity conflicts.
8801  // If conflict exists, we must mark more allocations lost or fail.
8802  if(bufferImageGranularity > 1)
8803  {
8804  while(index1st < suballocations1st.size())
8805  {
8806  const VmaSuballocation& suballoc = suballocations1st[index1st];
8807  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8808  {
8809  if(suballoc.hAllocation != VK_NULL_HANDLE)
8810  {
8811  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8812  if(suballoc.hAllocation->CanBecomeLost() &&
8813  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8814  {
8815  ++pAllocationRequest->itemsToMakeLostCount;
8816  pAllocationRequest->sumItemSize += suballoc.size;
8817  }
8818  else
8819  {
8820  return false;
8821  }
8822  }
8823  }
8824  else
8825  {
8826  // Already on next page.
8827  break;
8828  }
8829  ++index1st;
8830  }
8831  }
8832  }
8833 
8834  // There is enough free space at the end after alignment.
8835  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
8836  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
8837  {
8838  // Check next suballocations for BufferImageGranularity conflicts.
8839  // If conflict exists, allocation cannot be made here.
8840  if(bufferImageGranularity > 1)
8841  {
8842  for(size_t nextSuballocIndex = index1st;
8843  nextSuballocIndex < suballocations1st.size();
8844  nextSuballocIndex++)
8845  {
8846  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8847  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8848  {
8849  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8850  {
8851  return false;
8852  }
8853  }
8854  else
8855  {
8856  // Already on next page.
8857  break;
8858  }
8859  }
8860  }
8861 
8862  // All tests passed: Success.
8863  pAllocationRequest->offset = resultOffset;
8864  pAllocationRequest->sumFreeSize =
8865  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8866  - resultBaseOffset
8867  - pAllocationRequest->sumItemSize;
8868  // pAllocationRequest->item unused.
8869  return true;
8870  }
8871  }
8872  }
8873 
8874  return false;
8875 }
8876 
8877 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8878  uint32_t currentFrameIndex,
8879  uint32_t frameInUseCount,
8880  VmaAllocationRequest* pAllocationRequest)
8881 {
8882  if(pAllocationRequest->itemsToMakeLostCount == 0)
8883  {
8884  return true;
8885  }
8886 
8887  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8888 
8889  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8890  size_t index1st = m_1stNullItemsBeginCount;
8891  size_t madeLostCount = 0;
8892  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8893  {
8894  VMA_ASSERT(index1st < suballocations1st.size());
8895  VmaSuballocation& suballoc = suballocations1st[index1st];
8896  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8897  {
8898  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8899  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8900  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8901  {
8902  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8903  suballoc.hAllocation = VK_NULL_HANDLE;
8904  m_SumFreeSize += suballoc.size;
8905  ++m_1stNullItemsMiddleCount;
8906  ++madeLostCount;
8907  }
8908  else
8909  {
8910  return false;
8911  }
8912  }
8913  ++index1st;
8914  }
8915 
8916  CleanupAfterFree();
8917  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8918 
8919  return true;
8920 }
8921 
8922 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8923 {
8924  uint32_t lostAllocationCount = 0;
8925 
8926  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8927  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8928  {
8929  VmaSuballocation& suballoc = suballocations1st[i];
8930  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8931  suballoc.hAllocation->CanBecomeLost() &&
8932  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8933  {
8934  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8935  suballoc.hAllocation = VK_NULL_HANDLE;
8936  ++m_1stNullItemsMiddleCount;
8937  m_SumFreeSize += suballoc.size;
8938  ++lostAllocationCount;
8939  }
8940  }
8941 
8942  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8943  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8944  {
8945  VmaSuballocation& suballoc = suballocations2nd[i];
8946  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8947  suballoc.hAllocation->CanBecomeLost() &&
8948  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8949  {
8950  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8951  suballoc.hAllocation = VK_NULL_HANDLE;
8952  ++m_2ndNullItemsCount;
8953  ++lostAllocationCount;
8954  }
8955  }
8956 
8957  if(lostAllocationCount)
8958  {
8959  CleanupAfterFree();
8960  }
8961 
8962  return lostAllocationCount;
8963 }
8964 
8965 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8966 {
8967  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8968  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8969  {
8970  const VmaSuballocation& suballoc = suballocations1st[i];
8971  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8972  {
8973  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8974  {
8975  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8976  return VK_ERROR_VALIDATION_FAILED_EXT;
8977  }
8978  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8979  {
8980  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8981  return VK_ERROR_VALIDATION_FAILED_EXT;
8982  }
8983  }
8984  }
8985 
8986  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8987  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8988  {
8989  const VmaSuballocation& suballoc = suballocations2nd[i];
8990  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8991  {
8992  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8993  {
8994  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8995  return VK_ERROR_VALIDATION_FAILED_EXT;
8996  }
8997  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8998  {
8999  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9000  return VK_ERROR_VALIDATION_FAILED_EXT;
9001  }
9002  }
9003  }
9004 
9005  return VK_SUCCESS;
9006 }
9007 
9008 void VmaBlockMetadata_Linear::Alloc(
9009  const VmaAllocationRequest& request,
9010  VmaSuballocationType type,
9011  VkDeviceSize allocSize,
9012  bool upperAddress,
9013  VmaAllocation hAllocation)
9014 {
9015  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
9016 
9017  if(upperAddress)
9018  {
9019  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
9020  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
9021  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9022  suballocations2nd.push_back(newSuballoc);
9023  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
9024  }
9025  else
9026  {
9027  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9028 
9029  // First allocation.
9030  if(suballocations1st.empty())
9031  {
9032  suballocations1st.push_back(newSuballoc);
9033  }
9034  else
9035  {
9036  // New allocation at the end of 1st vector.
9037  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
9038  {
9039  // Check if it fits before the end of the block.
9040  VMA_ASSERT(request.offset + allocSize <= GetSize());
9041  suballocations1st.push_back(newSuballoc);
9042  }
9043  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
9044  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
9045  {
9046  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9047 
9048  switch(m_2ndVectorMode)
9049  {
9050  case SECOND_VECTOR_EMPTY:
9051  // First allocation from second part ring buffer.
9052  VMA_ASSERT(suballocations2nd.empty());
9053  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
9054  break;
9055  case SECOND_VECTOR_RING_BUFFER:
9056  // 2-part ring buffer is already started.
9057  VMA_ASSERT(!suballocations2nd.empty());
9058  break;
9059  case SECOND_VECTOR_DOUBLE_STACK:
9060  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
9061  break;
9062  default:
9063  VMA_ASSERT(0);
9064  }
9065 
9066  suballocations2nd.push_back(newSuballoc);
9067  }
9068  else
9069  {
9070  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
9071  }
9072  }
9073  }
9074 
9075  m_SumFreeSize -= newSuballoc.size;
9076 }
9077 
9078 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
9079 {
9080  FreeAtOffset(allocation->GetOffset());
9081 }
9082 
9083 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
9084 {
9085  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9086  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9087 
9088  if(!suballocations1st.empty())
9089  {
9090  // First allocation: Mark it as next empty at the beginning.
9091  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
9092  if(firstSuballoc.offset == offset)
9093  {
9094  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9095  firstSuballoc.hAllocation = VK_NULL_HANDLE;
9096  m_SumFreeSize += firstSuballoc.size;
9097  ++m_1stNullItemsBeginCount;
9098  CleanupAfterFree();
9099  return;
9100  }
9101  }
9102 
9103  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
9104  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
9105  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9106  {
9107  VmaSuballocation& lastSuballoc = suballocations2nd.back();
9108  if(lastSuballoc.offset == offset)
9109  {
9110  m_SumFreeSize += lastSuballoc.size;
9111  suballocations2nd.pop_back();
9112  CleanupAfterFree();
9113  return;
9114  }
9115  }
9116  // Last allocation in 1st vector.
9117  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
9118  {
9119  VmaSuballocation& lastSuballoc = suballocations1st.back();
9120  if(lastSuballoc.offset == offset)
9121  {
9122  m_SumFreeSize += lastSuballoc.size;
9123  suballocations1st.pop_back();
9124  CleanupAfterFree();
9125  return;
9126  }
9127  }
9128 
9129  // Item from the middle of 1st vector.
9130  {
9131  VmaSuballocation refSuballoc;
9132  refSuballoc.offset = offset;
9133  // Rest of members stays uninitialized intentionally for better performance.
9134  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
9135  suballocations1st.begin() + m_1stNullItemsBeginCount,
9136  suballocations1st.end(),
9137  refSuballoc);
9138  if(it != suballocations1st.end())
9139  {
9140  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9141  it->hAllocation = VK_NULL_HANDLE;
9142  ++m_1stNullItemsMiddleCount;
9143  m_SumFreeSize += it->size;
9144  CleanupAfterFree();
9145  return;
9146  }
9147  }
9148 
9149  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
9150  {
9151  // Item from the middle of 2nd vector.
9152  VmaSuballocation refSuballoc;
9153  refSuballoc.offset = offset;
9154  // Rest of members stays uninitialized intentionally for better performance.
9155  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
9156  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
9157  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
9158  if(it != suballocations2nd.end())
9159  {
9160  it->type = VMA_SUBALLOCATION_TYPE_FREE;
9161  it->hAllocation = VK_NULL_HANDLE;
9162  ++m_2ndNullItemsCount;
9163  m_SumFreeSize += it->size;
9164  CleanupAfterFree();
9165  return;
9166  }
9167  }
9168 
9169  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
9170 }
9171 
9172 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
9173 {
9174  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9175  const size_t suballocCount = AccessSuballocations1st().size();
9176  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
9177 }
9178 
9179 void VmaBlockMetadata_Linear::CleanupAfterFree()
9180 {
9181  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9182  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9183 
9184  if(IsEmpty())
9185  {
9186  suballocations1st.clear();
9187  suballocations2nd.clear();
9188  m_1stNullItemsBeginCount = 0;
9189  m_1stNullItemsMiddleCount = 0;
9190  m_2ndNullItemsCount = 0;
9191  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9192  }
9193  else
9194  {
9195  const size_t suballoc1stCount = suballocations1st.size();
9196  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
9197  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9198 
9199  // Find more null items at the beginning of 1st vector.
9200  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9201  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9202  {
9203  ++m_1stNullItemsBeginCount;
9204  --m_1stNullItemsMiddleCount;
9205  }
9206 
9207  // Find more null items at the end of 1st vector.
9208  while(m_1stNullItemsMiddleCount > 0 &&
9209  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9210  {
9211  --m_1stNullItemsMiddleCount;
9212  suballocations1st.pop_back();
9213  }
9214 
9215  // Find more null items at the end of 2nd vector.
9216  while(m_2ndNullItemsCount > 0 &&
9217  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9218  {
9219  --m_2ndNullItemsCount;
9220  suballocations2nd.pop_back();
9221  }
9222 
9223  if(ShouldCompact1st())
9224  {
9225  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9226  size_t srcIndex = m_1stNullItemsBeginCount;
9227  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9228  {
9229  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9230  {
9231  ++srcIndex;
9232  }
9233  if(dstIndex != srcIndex)
9234  {
9235  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9236  }
9237  ++srcIndex;
9238  }
9239  suballocations1st.resize(nonNullItemCount);
9240  m_1stNullItemsBeginCount = 0;
9241  m_1stNullItemsMiddleCount = 0;
9242  }
9243 
9244  // 2nd vector became empty.
9245  if(suballocations2nd.empty())
9246  {
9247  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9248  }
9249 
9250  // 1st vector became empty.
9251  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9252  {
9253  suballocations1st.clear();
9254  m_1stNullItemsBeginCount = 0;
9255 
9256  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9257  {
9258  // Swap 1st with 2nd. Now 2nd is empty.
9259  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9260  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9261  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9262  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9263  {
9264  ++m_1stNullItemsBeginCount;
9265  --m_1stNullItemsMiddleCount;
9266  }
9267  m_2ndNullItemsCount = 0;
9268  m_1stVectorIndex ^= 1;
9269  }
9270  }
9271  }
9272 
9273  VMA_HEAVY_ASSERT(Validate());
9274 }
9275 
9276 
9278 // class VmaBlockMetadata_Buddy
9279 
9280 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
9281  m_Root(VMA_NULL)
9282 {
9283  memset(m_FreeList, 0, sizeof(m_FreeList));
9284 }
9285 
9286 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9287 {
9288  DeleteNode(m_Root);
9289 }
9290 
9291 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
9292 {
9293  VmaBlockMetadata::Init(size);
9294 
9295  Node* rootNode = new Node();
9296  rootNode->offset = 0;
9297  rootNode->type = Node::TYPE_FREE;
9298  rootNode->parent = VMA_NULL;
9299  rootNode->buddy = VMA_NULL;
9300 
9301  m_Root = rootNode;
9302  AddToFreeListFront(0, rootNode);
9303 }
9304 
9305 bool VmaBlockMetadata_Buddy::Validate() const
9306 {
9307  // Validate tree.
9308  if(!ValidateNode(VMA_NULL, m_Root, 0, GetSize()))
9309  {
9310  return false;
9311  }
9312 
9313  // Validate free node lists.
9314  for(uint32_t level = 0; level < MAX_LEVELS; ++level)
9315  {
9316  if(m_FreeList[level].front != VMA_NULL &&
9317  m_FreeList[level].front->free.prev != VMA_NULL)
9318  {
9319  return false;
9320  }
9321 
9322  for(Node* node = m_FreeList[level].front;
9323  node != VMA_NULL;
9324  node = node->free.next)
9325  {
9326  if(node->type != Node::TYPE_FREE)
9327  {
9328  return false;
9329  }
9330 
9331  if(node->free.next == VMA_NULL)
9332  {
9333  if(m_FreeList[level].back != node)
9334  {
9335  return false;
9336  }
9337  }
9338  else
9339  {
9340  if(node->free.next->free.prev != node)
9341  {
9342  return false;
9343  }
9344  }
9345  }
9346  }
9347 
9348  return true;
9349 }
9350 
9351 size_t VmaBlockMetadata_Buddy::GetAllocationCount() const
9352 {
9353  return 0; // TODO
9354 }
9355 
9356 VkDeviceSize VmaBlockMetadata_Buddy::GetSumFreeSize() const
9357 {
9358  return 0; // TODO
9359 }
9360 
9361 VkDeviceSize VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
9362 {
9363  return 0; // TODO
9364 }
9365 
9366 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9367 {
9368  outInfo.blockCount = 1;
9369 
9370  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
9371  outInfo.unusedBytes = outInfo.unusedBytes = 0;
9372 
9373  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
9374  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
9375  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
9376 
9377  CalcAllocationStatInfoNode(outInfo, m_Root, GetSize());
9378 }
9379 
9380 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
9381 {
9382  // TODO
9383 }
9384 
9385 #if VMA_STATS_STRING_ENABLED
9386 
9387 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
9388 {
9389  // TODO optimize
9390  VmaStatInfo stat;
9391  CalcAllocationStatInfo(stat);
9392 
9393  PrintDetailedMap_Begin(
9394  json,
9395  stat.unusedBytes,
9396  stat.allocationCount,
9397  stat.unusedRangeCount);
9398 
9399  PrintDetailedMapNode(json, m_Root, GetSize());
9400 
9401  PrintDetailedMap_End(json);
9402 }
9403 
9404 #endif // #if VMA_STATS_STRING_ENABLED
9405 
9406 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9407  uint32_t currentFrameIndex,
9408  uint32_t frameInUseCount,
9409  VkDeviceSize bufferImageGranularity,
9410  VkDeviceSize allocSize,
9411  VkDeviceSize allocAlignment,
9412  bool upperAddress,
9413  VmaSuballocationType allocType,
9414  bool canMakeOtherLost,
9415  uint32_t strategy,
9416  VmaAllocationRequest* pAllocationRequest)
9417 {
9418  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9419 
9420  const VkDeviceSize size = GetSize();
9421  if(allocSize > size)
9422  {
9423  return false;
9424  }
9425 
9426  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9427  for(uint32_t level = targetLevel + 1; level--; )
9428  {
9429  if(m_FreeList[level].front != VMA_NULL)
9430  {
9431  pAllocationRequest->offset = m_FreeList[level].front->offset;
9432  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
9433  pAllocationRequest->sumItemSize = 0;
9434  pAllocationRequest->itemsToMakeLostCount = 0;
9435  pAllocationRequest->customData = (void*)(uintptr_t)level;
9436  return true;
9437  }
9438  }
9439 
9440  return false;
9441 }
9442 
9443 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
9444  uint32_t currentFrameIndex,
9445  uint32_t frameInUseCount,
9446  VmaAllocationRequest* pAllocationRequest)
9447 {
9448  return false; // TODO
9449 }
9450 
9451 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9452 {
9453  return 0; // TODO
9454 }
9455 
9456 VkResult VmaBlockMetadata_Buddy::CheckCorruption(const void* pBlockData)
9457 {
9458  return VK_SUCCESS; // TODO
9459 }
9460 
9461 void VmaBlockMetadata_Buddy::Alloc(
9462  const VmaAllocationRequest& request,
9463  VmaSuballocationType type,
9464  VkDeviceSize allocSize,
9465  bool upperAddress,
9466  VmaAllocation hAllocation)
9467 {
9468  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9469  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9470  VMA_ASSERT(m_FreeList[currLevel].front != VMA_NULL);
9471  Node* currNode = m_FreeList[currLevel].front;
9472  VMA_ASSERT(currNode->type == Node::TYPE_FREE);
9473  VMA_ASSERT(currNode->offset == request.offset);
9474 
9475  // Go down, splitting free nodes.
9476  while(currLevel < targetLevel)
9477  {
9478  // currNode is already first free node at currLevel.
9479  // Remove it from list of free nodes at this currLevel.
9480  RemoveFromFreeList(currLevel, currNode);
9481 
9482  const uint32_t childrenLevel = currLevel + 1;
9483 
9484  // Create two free sub-nodes.
9485  Node* leftChild = new Node();
9486  Node* rightChild = new Node();
9487 
9488  leftChild->offset = currNode->offset;
9489  leftChild->type = Node::TYPE_FREE;
9490  leftChild->parent = currNode;
9491  leftChild->buddy = rightChild;
9492 
9493  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9494  rightChild->type = Node::TYPE_FREE;
9495  rightChild->parent = currNode;
9496  rightChild->buddy = leftChild;
9497 
9498  // Convert current currNode to split type.
9499  currNode->type = Node::TYPE_SPLIT;
9500  currNode->split.leftChild = leftChild;
9501 
9502  // Add child nodes to free list. Order is important!
9503  AddToFreeListFront(childrenLevel, rightChild);
9504  AddToFreeListFront(childrenLevel, leftChild);
9505 
9506  ++currLevel;
9507  currNode = m_FreeList[currLevel].front;
9508  }
9509 
9510  // Remove from free list.
9511  VMA_ASSERT(currLevel == targetLevel && currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9512  RemoveFromFreeList(currLevel, currNode);
9513 
9514  // Convert to allocation node.
9515  currNode->type = Node::TYPE_ALLOCATION;
9516  currNode->allocation.alloc = hAllocation;
9517 }
9518 
9519 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
9520 {
9521  if(node->type == Node::TYPE_SPLIT)
9522  {
9523  DeleteNode(node->split.leftChild->buddy);
9524  DeleteNode(node->split.leftChild);
9525  }
9526 
9527  delete node;
9528 }
9529 
9530 bool VmaBlockMetadata_Buddy::ValidateNode(const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9531 {
9532  if(curr->parent != parent)
9533  {
9534  return false;
9535  }
9536  if((curr->buddy == VMA_NULL) != (parent == VMA_NULL))
9537  {
9538  return false;
9539  }
9540  if(curr->buddy != VMA_NULL && curr->buddy->buddy != curr)
9541  {
9542  return false;
9543  }
9544  switch(curr->type)
9545  {
9546  case Node::TYPE_FREE:
9547  // curr->free.prev, next are validated separately.
9548  break;
9549  case Node::TYPE_ALLOCATION:
9550  if(curr->allocation.alloc == VK_NULL_HANDLE)
9551  {
9552  return false;
9553  }
9554  break;
9555  case Node::TYPE_SPLIT:
9556  {
9557  const uint32_t childrenLevel = level + 1;
9558  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
9559  const Node* const leftChild = curr->split.leftChild;
9560  if(leftChild == VMA_NULL)
9561  {
9562  return false;
9563  }
9564  if(leftChild->offset != curr->offset)
9565  {
9566  return false;
9567  }
9568  if(!ValidateNode(curr, leftChild, childrenLevel, childrenLevelNodeSize))
9569  {
9570  return false;
9571  }
9572  const Node* const rightChild = leftChild->buddy;
9573  if(rightChild->offset != curr->offset + levelNodeSize)
9574  {
9575  return false;
9576  }
9577  if(!ValidateNode(curr, rightChild, childrenLevel, childrenLevelNodeSize))
9578  {
9579  return false;
9580  }
9581  }
9582  break;
9583  default:
9584  return false;
9585  }
9586 
9587  return true;
9588 }
9589 
9590 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9591 {
9592  // TODO optimize
9593  uint32_t level = 0;
9594  VkDeviceSize currLevelNodeSize = GetSize();
9595  VkDeviceSize nextLevelNodeSize = currLevelNodeSize / 2;
9596  while(allocSize <= nextLevelNodeSize && level + 1 < MAX_LEVELS)
9597  {
9598  ++level;
9599  currLevelNodeSize = nextLevelNodeSize;
9600  nextLevelNodeSize = currLevelNodeSize / 2;
9601  }
9602  return level;
9603 }
9604 
9605 VkDeviceSize VmaBlockMetadata_Buddy::LevelToNodeSize(uint32_t level) const
9606 {
9607  // TODO optimize
9608  VkDeviceSize result = GetSize();
9609  for(uint32_t i = 0; i < level; ++i)
9610  {
9611  result /= 2;
9612  }
9613  return result;
9614 }
9615 
9616 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
9617 {
9618  // Find node and level.
9619  Node* node = m_Root;
9620  VkDeviceSize nodeOffset = 0;
9621  uint32_t level = 0;
9622  VkDeviceSize levelSize = GetSize();
9623  while(node->type == Node::TYPE_SPLIT)
9624  {
9625  const VkDeviceSize nextLevelSize = levelSize / 2;
9626  if(offset < nodeOffset + nextLevelSize)
9627  {
9628  node = node->split.leftChild;
9629  }
9630  else
9631  {
9632  node = node->split.leftChild->buddy;
9633  nodeOffset += nextLevelSize;
9634  }
9635  ++level;
9636  levelSize = nextLevelSize;
9637  }
9638 
9639  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9640  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
9641 
9642  node->type = Node::TYPE_FREE;
9643 
9644  // Join free nodes if possible.
9645  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
9646  {
9647  RemoveFromFreeList(level, node->buddy);
9648  Node* const parent = node->parent;
9649 
9650  delete node->buddy;
9651  delete node;
9652  parent->type = Node::TYPE_FREE;
9653 
9654  node = parent;
9655  --level;
9656  }
9657 
9658  AddToFreeListFront(level, node);
9659 }
9660 
9661 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
9662 {
9663  switch(node->type)
9664  {
9665  case Node::TYPE_FREE:
9666  ++outInfo.unusedRangeCount;
9667  outInfo.unusedBytes += levelNodeSize;
9668  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
9669  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
9670  break;
9671  case Node::TYPE_ALLOCATION:
9672  ++outInfo.allocationCount;
9673  outInfo.usedBytes += levelNodeSize;
9674  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, levelNodeSize);
9675  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, levelNodeSize);
9676  break;
9677  case Node::TYPE_SPLIT:
9678  {
9679  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9680  const Node* const leftChild = node->split.leftChild;
9681  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
9682  const Node* const rightChild = leftChild->buddy;
9683  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
9684  }
9685  break;
9686  default:
9687  VMA_ASSERT(0);
9688  }
9689 }
9690 
9691 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9692 {
9693  VMA_ASSERT(node->type == Node::TYPE_FREE);
9694 
9695  // List is empty.
9696  Node* const frontNode = m_FreeList[level].front;
9697  if(frontNode == VMA_NULL)
9698  {
9699  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9700  node->free.prev = node->free.next = VMA_NULL;
9701  m_FreeList[level].front = m_FreeList[level].back = node;
9702  }
9703  else
9704  {
9705  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9706  node->free.prev = VMA_NULL;
9707  node->free.next = frontNode;
9708  frontNode->free.prev = node;
9709  m_FreeList[level].front = node;
9710  }
9711 }
9712 
9713 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9714 {
9715  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9716 
9717  // It is at the front.
9718  if(node->free.prev == VMA_NULL)
9719  {
9720  VMA_ASSERT(m_FreeList[level].front == node);
9721  m_FreeList[level].front = node->free.next;
9722  }
9723  else
9724  {
9725  Node* const prevFreeNode = node->free.prev;
9726  VMA_ASSERT(prevFreeNode->free.next == node);
9727  prevFreeNode->free.next = node->free.next;
9728  }
9729 
9730  // It is at the back.
9731  if(node->free.next == VMA_NULL)
9732  {
9733  VMA_ASSERT(m_FreeList[level].back == node);
9734  m_FreeList[level].back = node->free.prev;
9735  }
9736  else
9737  {
9738  Node* const nextFreeNode = node->free.next;
9739  VMA_ASSERT(nextFreeNode->free.prev == node);
9740  nextFreeNode->free.prev = node->free.prev;
9741  }
9742 }
9743 
9744 #if VMA_STATS_STRING_ENABLED
9745 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9746 {
9747  switch(node->type)
9748  {
9749  case Node::TYPE_FREE:
9750  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9751  break;
9752  case Node::TYPE_ALLOCATION:
9753  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
9754  break;
9755  case Node::TYPE_SPLIT:
9756  {
9757  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9758  const Node* const leftChild = node->split.leftChild;
9759  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9760  const Node* const rightChild = leftChild->buddy;
9761  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9762  }
9763  break;
9764  default:
9765  VMA_ASSERT(0);
9766  }
9767 }
9768 #endif // #if VMA_STATS_STRING_ENABLED
9769 
9770 
9772 // class VmaDeviceMemoryBlock
9773 
9774 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9775  m_pMetadata(VMA_NULL),
9776  m_MemoryTypeIndex(UINT32_MAX),
9777  m_Id(0),
9778  m_hMemory(VK_NULL_HANDLE),
9779  m_MapCount(0),
9780  m_pMappedData(VMA_NULL)
9781 {
9782 }
9783 
9784 void VmaDeviceMemoryBlock::Init(
9785  VmaAllocator hAllocator,
9786  uint32_t newMemoryTypeIndex,
9787  VkDeviceMemory newMemory,
9788  VkDeviceSize newSize,
9789  uint32_t id,
9790  uint32_t algorithm)
9791 {
9792  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9793 
9794  m_MemoryTypeIndex = newMemoryTypeIndex;
9795  m_Id = id;
9796  m_hMemory = newMemory;
9797 
9798  switch(algorithm)
9799  {
9801  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9802  break;
9804  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
9805  break;
9806  default:
9807  VMA_ASSERT(0);
9808  // Fall-through.
9809  case 0:
9810  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9811  }
9812  m_pMetadata->Init(newSize);
9813 }
9814 
9815 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9816 {
9817  // This is the most important assert in the entire library.
9818  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9819  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9820 
9821  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9822  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9823  m_hMemory = VK_NULL_HANDLE;
9824 
9825  vma_delete(allocator, m_pMetadata);
9826  m_pMetadata = VMA_NULL;
9827 }
9828 
9829 bool VmaDeviceMemoryBlock::Validate() const
9830 {
9831  if((m_hMemory == VK_NULL_HANDLE) ||
9832  (m_pMetadata->GetSize() == 0))
9833  {
9834  return false;
9835  }
9836 
9837  return m_pMetadata->Validate();
9838 }
9839 
9840 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9841 {
9842  void* pData = nullptr;
9843  VkResult res = Map(hAllocator, 1, &pData);
9844  if(res != VK_SUCCESS)
9845  {
9846  return res;
9847  }
9848 
9849  res = m_pMetadata->CheckCorruption(pData);
9850 
9851  Unmap(hAllocator, 1);
9852 
9853  return res;
9854 }
9855 
9856 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9857 {
9858  if(count == 0)
9859  {
9860  return VK_SUCCESS;
9861  }
9862 
9863  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9864  if(m_MapCount != 0)
9865  {
9866  m_MapCount += count;
9867  VMA_ASSERT(m_pMappedData != VMA_NULL);
9868  if(ppData != VMA_NULL)
9869  {
9870  *ppData = m_pMappedData;
9871  }
9872  return VK_SUCCESS;
9873  }
9874  else
9875  {
9876  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9877  hAllocator->m_hDevice,
9878  m_hMemory,
9879  0, // offset
9880  VK_WHOLE_SIZE,
9881  0, // flags
9882  &m_pMappedData);
9883  if(result == VK_SUCCESS)
9884  {
9885  if(ppData != VMA_NULL)
9886  {
9887  *ppData = m_pMappedData;
9888  }
9889  m_MapCount = count;
9890  }
9891  return result;
9892  }
9893 }
9894 
9895 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9896 {
9897  if(count == 0)
9898  {
9899  return;
9900  }
9901 
9902  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9903  if(m_MapCount >= count)
9904  {
9905  m_MapCount -= count;
9906  if(m_MapCount == 0)
9907  {
9908  m_pMappedData = VMA_NULL;
9909  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9910  }
9911  }
9912  else
9913  {
9914  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
9915  }
9916 }
9917 
9918 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9919 {
9920  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9921  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9922 
9923  void* pData;
9924  VkResult res = Map(hAllocator, 1, &pData);
9925  if(res != VK_SUCCESS)
9926  {
9927  return res;
9928  }
9929 
9930  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
9931  VmaWriteMagicValue(pData, allocOffset + allocSize);
9932 
9933  Unmap(hAllocator, 1);
9934 
9935  return VK_SUCCESS;
9936 }
9937 
9938 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9939 {
9940  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9941  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9942 
9943  void* pData;
9944  VkResult res = Map(hAllocator, 1, &pData);
9945  if(res != VK_SUCCESS)
9946  {
9947  return res;
9948  }
9949 
9950  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
9951  {
9952  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
9953  }
9954  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
9955  {
9956  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
9957  }
9958 
9959  Unmap(hAllocator, 1);
9960 
9961  return VK_SUCCESS;
9962 }
9963 
9964 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
9965  const VmaAllocator hAllocator,
9966  const VmaAllocation hAllocation,
9967  VkBuffer hBuffer)
9968 {
9969  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9970  hAllocation->GetBlock() == this);
9971  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9972  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9973  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
9974  hAllocator->m_hDevice,
9975  hBuffer,
9976  m_hMemory,
9977  hAllocation->GetOffset());
9978 }
9979 
9980 VkResult VmaDeviceMemoryBlock::BindImageMemory(
9981  const VmaAllocator hAllocator,
9982  const VmaAllocation hAllocation,
9983  VkImage hImage)
9984 {
9985  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9986  hAllocation->GetBlock() == this);
9987  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9988  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9989  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
9990  hAllocator->m_hDevice,
9991  hImage,
9992  m_hMemory,
9993  hAllocation->GetOffset());
9994 }
9995 
9996 static void InitStatInfo(VmaStatInfo& outInfo)
9997 {
9998  memset(&outInfo, 0, sizeof(outInfo));
9999  outInfo.allocationSizeMin = UINT64_MAX;
10000  outInfo.unusedRangeSizeMin = UINT64_MAX;
10001 }
10002 
10003 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
10004 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
10005 {
10006  inoutInfo.blockCount += srcInfo.blockCount;
10007  inoutInfo.allocationCount += srcInfo.allocationCount;
10008  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
10009  inoutInfo.usedBytes += srcInfo.usedBytes;
10010  inoutInfo.unusedBytes += srcInfo.unusedBytes;
10011  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
10012  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
10013  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
10014  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
10015 }
10016 
10017 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
10018 {
10019  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
10020  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
10021  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
10022  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
10023 }
10024 
10025 VmaPool_T::VmaPool_T(
10026  VmaAllocator hAllocator,
10027  const VmaPoolCreateInfo& createInfo,
10028  VkDeviceSize preferredBlockSize) :
10029  m_BlockVector(
10030  hAllocator,
10031  createInfo.memoryTypeIndex,
10032  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
10033  createInfo.minBlockCount,
10034  createInfo.maxBlockCount,
10035  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
10036  createInfo.frameInUseCount,
10037  true, // isCustomPool
10038  createInfo.blockSize != 0, // explicitBlockSize
10039  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
10040  m_Id(0)
10041 {
10042 }
10043 
10044 VmaPool_T::~VmaPool_T()
10045 {
10046 }
10047 
10048 #if VMA_STATS_STRING_ENABLED
10049 
10050 #endif // #if VMA_STATS_STRING_ENABLED
10051 
10052 VmaBlockVector::VmaBlockVector(
10053  VmaAllocator hAllocator,
10054  uint32_t memoryTypeIndex,
10055  VkDeviceSize preferredBlockSize,
10056  size_t minBlockCount,
10057  size_t maxBlockCount,
10058  VkDeviceSize bufferImageGranularity,
10059  uint32_t frameInUseCount,
10060  bool isCustomPool,
10061  bool explicitBlockSize,
10062  uint32_t algorithm) :
10063  m_hAllocator(hAllocator),
10064  m_MemoryTypeIndex(memoryTypeIndex),
10065  m_PreferredBlockSize(preferredBlockSize),
10066  m_MinBlockCount(minBlockCount),
10067  m_MaxBlockCount(maxBlockCount),
10068  m_BufferImageGranularity(bufferImageGranularity),
10069  m_FrameInUseCount(frameInUseCount),
10070  m_IsCustomPool(isCustomPool),
10071  m_ExplicitBlockSize(explicitBlockSize),
10072  m_Algorithm(algorithm),
10073  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
10074  m_HasEmptyBlock(false),
10075  m_pDefragmentator(VMA_NULL),
10076  m_NextBlockId(0)
10077 {
10078 }
10079 
10080 VmaBlockVector::~VmaBlockVector()
10081 {
10082  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
10083 
10084  for(size_t i = m_Blocks.size(); i--; )
10085  {
10086  m_Blocks[i]->Destroy(m_hAllocator);
10087  vma_delete(m_hAllocator, m_Blocks[i]);
10088  }
10089 }
10090 
10091 VkResult VmaBlockVector::CreateMinBlocks()
10092 {
10093  for(size_t i = 0; i < m_MinBlockCount; ++i)
10094  {
10095  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
10096  if(res != VK_SUCCESS)
10097  {
10098  return res;
10099  }
10100  }
10101  return VK_SUCCESS;
10102 }
10103 
10104 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
10105 {
10106  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10107 
10108  const size_t blockCount = m_Blocks.size();
10109 
10110  pStats->size = 0;
10111  pStats->unusedSize = 0;
10112  pStats->allocationCount = 0;
10113  pStats->unusedRangeCount = 0;
10114  pStats->unusedRangeSizeMax = 0;
10115  pStats->blockCount = blockCount;
10116 
10117  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10118  {
10119  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10120  VMA_ASSERT(pBlock);
10121  VMA_HEAVY_ASSERT(pBlock->Validate());
10122  pBlock->m_pMetadata->AddPoolStats(*pStats);
10123  }
10124 }
10125 
10126 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
10127 {
10128  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
10129  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
10130  (VMA_DEBUG_MARGIN > 0) &&
10131  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
10132 }
10133 
10134 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
10135 
10136 VkResult VmaBlockVector::Allocate(
10137  VmaPool hCurrentPool,
10138  uint32_t currentFrameIndex,
10139  VkDeviceSize size,
10140  VkDeviceSize alignment,
10141  const VmaAllocationCreateInfo& createInfo,
10142  VmaSuballocationType suballocType,
10143  VmaAllocation* pAllocation)
10144 {
10145  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10146  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
10147  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10148  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10149  const bool canCreateNewBlock =
10150  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
10151  (m_Blocks.size() < m_MaxBlockCount);
10152  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
10153 
10154  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
10155  // Which in turn is available only when maxBlockCount = 1.
10156  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
10157  {
10158  canMakeOtherLost = false;
10159  }
10160 
10161  // Upper address can only be used with linear allocator and within single memory block.
10162  if(isUpperAddress &&
10163  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
10164  {
10165  return VK_ERROR_FEATURE_NOT_PRESENT;
10166  }
10167 
10168  // Validate strategy.
10169  switch(strategy)
10170  {
10171  case 0:
10173  break;
10177  break;
10178  default:
10179  return VK_ERROR_FEATURE_NOT_PRESENT;
10180  }
10181 
10182  // Early reject: requested allocation size is larger that maximum block size for this block vector.
10183  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
10184  {
10185  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10186  }
10187 
10188  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10189 
10190  /*
10191  Under certain condition, this whole section can be skipped for optimization, so
10192  we move on directly to trying to allocate with canMakeOtherLost. That's the case
10193  e.g. for custom pools with linear algorithm.
10194  */
10195  if(!canMakeOtherLost || canCreateNewBlock)
10196  {
10197  // 1. Search existing allocations. Try to allocate without making other allocations lost.
10198  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
10200 
10201  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10202  {
10203  // Use only last block.
10204  if(!m_Blocks.empty())
10205  {
10206  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
10207  VMA_ASSERT(pCurrBlock);
10208  VkResult res = AllocateFromBlock(
10209  pCurrBlock,
10210  hCurrentPool,
10211  currentFrameIndex,
10212  size,
10213  alignment,
10214  allocFlagsCopy,
10215  createInfo.pUserData,
10216  suballocType,
10217  strategy,
10218  pAllocation);
10219  if(res == VK_SUCCESS)
10220  {
10221  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
10222  return VK_SUCCESS;
10223  }
10224  }
10225  }
10226  else
10227  {
10229  {
10230  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10231  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10232  {
10233  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10234  VMA_ASSERT(pCurrBlock);
10235  VkResult res = AllocateFromBlock(
10236  pCurrBlock,
10237  hCurrentPool,
10238  currentFrameIndex,
10239  size,
10240  alignment,
10241  allocFlagsCopy,
10242  createInfo.pUserData,
10243  suballocType,
10244  strategy,
10245  pAllocation);
10246  if(res == VK_SUCCESS)
10247  {
10248  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10249  return VK_SUCCESS;
10250  }
10251  }
10252  }
10253  else // WORST_FIT, FIRST_FIT
10254  {
10255  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10256  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10257  {
10258  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10259  VMA_ASSERT(pCurrBlock);
10260  VkResult res = AllocateFromBlock(
10261  pCurrBlock,
10262  hCurrentPool,
10263  currentFrameIndex,
10264  size,
10265  alignment,
10266  allocFlagsCopy,
10267  createInfo.pUserData,
10268  suballocType,
10269  strategy,
10270  pAllocation);
10271  if(res == VK_SUCCESS)
10272  {
10273  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
10274  return VK_SUCCESS;
10275  }
10276  }
10277  }
10278  }
10279 
10280  // 2. Try to create new block.
10281  if(canCreateNewBlock)
10282  {
10283  // Calculate optimal size for new block.
10284  VkDeviceSize newBlockSize = m_PreferredBlockSize;
10285  uint32_t newBlockSizeShift = 0;
10286  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
10287 
10288  if(!m_ExplicitBlockSize)
10289  {
10290  // Allocate 1/8, 1/4, 1/2 as first blocks.
10291  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
10292  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
10293  {
10294  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10295  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
10296  {
10297  newBlockSize = smallerNewBlockSize;
10298  ++newBlockSizeShift;
10299  }
10300  else
10301  {
10302  break;
10303  }
10304  }
10305  }
10306 
10307  size_t newBlockIndex = 0;
10308  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
10309  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
10310  if(!m_ExplicitBlockSize)
10311  {
10312  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
10313  {
10314  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
10315  if(smallerNewBlockSize >= size)
10316  {
10317  newBlockSize = smallerNewBlockSize;
10318  ++newBlockSizeShift;
10319  res = CreateBlock(newBlockSize, &newBlockIndex);
10320  }
10321  else
10322  {
10323  break;
10324  }
10325  }
10326  }
10327 
10328  if(res == VK_SUCCESS)
10329  {
10330  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
10331  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
10332 
10333  res = AllocateFromBlock(
10334  pBlock,
10335  hCurrentPool,
10336  currentFrameIndex,
10337  size,
10338  alignment,
10339  allocFlagsCopy,
10340  createInfo.pUserData,
10341  suballocType,
10342  strategy,
10343  pAllocation);
10344  if(res == VK_SUCCESS)
10345  {
10346  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
10347  return VK_SUCCESS;
10348  }
10349  else
10350  {
10351  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
10352  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10353  }
10354  }
10355  }
10356  }
10357 
10358  // 3. Try to allocate from existing blocks with making other allocations lost.
10359  if(canMakeOtherLost)
10360  {
10361  uint32_t tryIndex = 0;
10362  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
10363  {
10364  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
10365  VmaAllocationRequest bestRequest = {};
10366  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
10367 
10368  // 1. Search existing allocations.
10370  {
10371  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
10372  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
10373  {
10374  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10375  VMA_ASSERT(pCurrBlock);
10376  VmaAllocationRequest currRequest = {};
10377  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10378  currentFrameIndex,
10379  m_FrameInUseCount,
10380  m_BufferImageGranularity,
10381  size,
10382  alignment,
10383  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10384  suballocType,
10385  canMakeOtherLost,
10386  strategy,
10387  &currRequest))
10388  {
10389  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10390  if(pBestRequestBlock == VMA_NULL ||
10391  currRequestCost < bestRequestCost)
10392  {
10393  pBestRequestBlock = pCurrBlock;
10394  bestRequest = currRequest;
10395  bestRequestCost = currRequestCost;
10396 
10397  if(bestRequestCost == 0)
10398  {
10399  break;
10400  }
10401  }
10402  }
10403  }
10404  }
10405  else // WORST_FIT, FIRST_FIT
10406  {
10407  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
10408  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10409  {
10410  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
10411  VMA_ASSERT(pCurrBlock);
10412  VmaAllocationRequest currRequest = {};
10413  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
10414  currentFrameIndex,
10415  m_FrameInUseCount,
10416  m_BufferImageGranularity,
10417  size,
10418  alignment,
10419  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
10420  suballocType,
10421  canMakeOtherLost,
10422  strategy,
10423  &currRequest))
10424  {
10425  const VkDeviceSize currRequestCost = currRequest.CalcCost();
10426  if(pBestRequestBlock == VMA_NULL ||
10427  currRequestCost < bestRequestCost ||
10429  {
10430  pBestRequestBlock = pCurrBlock;
10431  bestRequest = currRequest;
10432  bestRequestCost = currRequestCost;
10433 
10434  if(bestRequestCost == 0 ||
10436  {
10437  break;
10438  }
10439  }
10440  }
10441  }
10442  }
10443 
10444  if(pBestRequestBlock != VMA_NULL)
10445  {
10446  if(mapped)
10447  {
10448  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
10449  if(res != VK_SUCCESS)
10450  {
10451  return res;
10452  }
10453  }
10454 
10455  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
10456  currentFrameIndex,
10457  m_FrameInUseCount,
10458  &bestRequest))
10459  {
10460  // We no longer have an empty Allocation.
10461  if(pBestRequestBlock->m_pMetadata->IsEmpty())
10462  {
10463  m_HasEmptyBlock = false;
10464  }
10465  // Allocate from this pBlock.
10466  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10467  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
10468  (*pAllocation)->InitBlockAllocation(
10469  hCurrentPool,
10470  pBestRequestBlock,
10471  bestRequest.offset,
10472  alignment,
10473  size,
10474  suballocType,
10475  mapped,
10476  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10477  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
10478  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
10479  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
10480  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10481  {
10482  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10483  }
10484  if(IsCorruptionDetectionEnabled())
10485  {
10486  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
10487  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10488  }
10489  return VK_SUCCESS;
10490  }
10491  // else: Some allocations must have been touched while we are here. Next try.
10492  }
10493  else
10494  {
10495  // Could not find place in any of the blocks - break outer loop.
10496  break;
10497  }
10498  }
10499  /* Maximum number of tries exceeded - a very unlike event when many other
10500  threads are simultaneously touching allocations making it impossible to make
10501  lost at the same time as we try to allocate. */
10502  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
10503  {
10504  return VK_ERROR_TOO_MANY_OBJECTS;
10505  }
10506  }
10507 
10508  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10509 }
10510 
10511 void VmaBlockVector::Free(
10512  VmaAllocation hAllocation)
10513 {
10514  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
10515 
10516  // Scope for lock.
10517  {
10518  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10519 
10520  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
10521 
10522  if(IsCorruptionDetectionEnabled())
10523  {
10524  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
10525  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
10526  }
10527 
10528  if(hAllocation->IsPersistentMap())
10529  {
10530  pBlock->Unmap(m_hAllocator, 1);
10531  }
10532 
10533  pBlock->m_pMetadata->Free(hAllocation);
10534  VMA_HEAVY_ASSERT(pBlock->Validate());
10535 
10536  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
10537 
10538  // pBlock became empty after this deallocation.
10539  if(pBlock->m_pMetadata->IsEmpty())
10540  {
10541  // Already has empty Allocation. We don't want to have two, so delete this one.
10542  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
10543  {
10544  pBlockToDelete = pBlock;
10545  Remove(pBlock);
10546  }
10547  // We now have first empty block.
10548  else
10549  {
10550  m_HasEmptyBlock = true;
10551  }
10552  }
10553  // pBlock didn't become empty, but we have another empty block - find and free that one.
10554  // (This is optional, heuristics.)
10555  else if(m_HasEmptyBlock)
10556  {
10557  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
10558  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
10559  {
10560  pBlockToDelete = pLastBlock;
10561  m_Blocks.pop_back();
10562  m_HasEmptyBlock = false;
10563  }
10564  }
10565 
10566  IncrementallySortBlocks();
10567  }
10568 
10569  // Destruction of a free Allocation. Deferred until this point, outside of mutex
10570  // lock, for performance reason.
10571  if(pBlockToDelete != VMA_NULL)
10572  {
10573  VMA_DEBUG_LOG(" Deleted empty allocation");
10574  pBlockToDelete->Destroy(m_hAllocator);
10575  vma_delete(m_hAllocator, pBlockToDelete);
10576  }
10577 }
10578 
10579 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
10580 {
10581  VkDeviceSize result = 0;
10582  for(size_t i = m_Blocks.size(); i--; )
10583  {
10584  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
10585  if(result >= m_PreferredBlockSize)
10586  {
10587  break;
10588  }
10589  }
10590  return result;
10591 }
10592 
10593 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
10594 {
10595  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10596  {
10597  if(m_Blocks[blockIndex] == pBlock)
10598  {
10599  VmaVectorRemove(m_Blocks, blockIndex);
10600  return;
10601  }
10602  }
10603  VMA_ASSERT(0);
10604 }
10605 
10606 void VmaBlockVector::IncrementallySortBlocks()
10607 {
10608  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
10609  {
10610  // Bubble sort only until first swap.
10611  for(size_t i = 1; i < m_Blocks.size(); ++i)
10612  {
10613  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
10614  {
10615  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
10616  return;
10617  }
10618  }
10619  }
10620 }
10621 
10622 VkResult VmaBlockVector::AllocateFromBlock(
10623  VmaDeviceMemoryBlock* pBlock,
10624  VmaPool hCurrentPool,
10625  uint32_t currentFrameIndex,
10626  VkDeviceSize size,
10627  VkDeviceSize alignment,
10628  VmaAllocationCreateFlags allocFlags,
10629  void* pUserData,
10630  VmaSuballocationType suballocType,
10631  uint32_t strategy,
10632  VmaAllocation* pAllocation)
10633 {
10634  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
10635  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
10636  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
10637  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
10638 
10639  VmaAllocationRequest currRequest = {};
10640  if(pBlock->m_pMetadata->CreateAllocationRequest(
10641  currentFrameIndex,
10642  m_FrameInUseCount,
10643  m_BufferImageGranularity,
10644  size,
10645  alignment,
10646  isUpperAddress,
10647  suballocType,
10648  false, // canMakeOtherLost
10649  strategy,
10650  &currRequest))
10651  {
10652  // Allocate from pCurrBlock.
10653  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
10654 
10655  if(mapped)
10656  {
10657  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
10658  if(res != VK_SUCCESS)
10659  {
10660  return res;
10661  }
10662  }
10663 
10664  // We no longer have an empty Allocation.
10665  if(pBlock->m_pMetadata->IsEmpty())
10666  {
10667  m_HasEmptyBlock = false;
10668  }
10669 
10670  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
10671  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
10672  (*pAllocation)->InitBlockAllocation(
10673  hCurrentPool,
10674  pBlock,
10675  currRequest.offset,
10676  alignment,
10677  size,
10678  suballocType,
10679  mapped,
10680  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
10681  VMA_HEAVY_ASSERT(pBlock->Validate());
10682  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
10683  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
10684  {
10685  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
10686  }
10687  if(IsCorruptionDetectionEnabled())
10688  {
10689  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
10690  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
10691  }
10692  return VK_SUCCESS;
10693  }
10694  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
10695 }
10696 
10697 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
10698 {
10699  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
10700  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
10701  allocInfo.allocationSize = blockSize;
10702  VkDeviceMemory mem = VK_NULL_HANDLE;
10703  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
10704  if(res < 0)
10705  {
10706  return res;
10707  }
10708 
10709  // New VkDeviceMemory successfully created.
10710 
10711  // Create new Allocation for it.
10712  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
10713  pBlock->Init(
10714  m_hAllocator,
10715  m_MemoryTypeIndex,
10716  mem,
10717  allocInfo.allocationSize,
10718  m_NextBlockId++,
10719  m_Algorithm);
10720 
10721  m_Blocks.push_back(pBlock);
10722  if(pNewBlockIndex != VMA_NULL)
10723  {
10724  *pNewBlockIndex = m_Blocks.size() - 1;
10725  }
10726 
10727  return VK_SUCCESS;
10728 }
10729 
10730 #if VMA_STATS_STRING_ENABLED
10731 
10732 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
10733 {
10734  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10735 
10736  json.BeginObject();
10737 
10738  if(m_IsCustomPool)
10739  {
10740  json.WriteString("MemoryTypeIndex");
10741  json.WriteNumber(m_MemoryTypeIndex);
10742 
10743  json.WriteString("BlockSize");
10744  json.WriteNumber(m_PreferredBlockSize);
10745 
10746  json.WriteString("BlockCount");
10747  json.BeginObject(true);
10748  if(m_MinBlockCount > 0)
10749  {
10750  json.WriteString("Min");
10751  json.WriteNumber((uint64_t)m_MinBlockCount);
10752  }
10753  if(m_MaxBlockCount < SIZE_MAX)
10754  {
10755  json.WriteString("Max");
10756  json.WriteNumber((uint64_t)m_MaxBlockCount);
10757  }
10758  json.WriteString("Cur");
10759  json.WriteNumber((uint64_t)m_Blocks.size());
10760  json.EndObject();
10761 
10762  if(m_FrameInUseCount > 0)
10763  {
10764  json.WriteString("FrameInUseCount");
10765  json.WriteNumber(m_FrameInUseCount);
10766  }
10767 
10768  if(m_Algorithm != 0)
10769  {
10770  json.WriteString("Algorithm");
10771  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
10772  }
10773  }
10774  else
10775  {
10776  json.WriteString("PreferredBlockSize");
10777  json.WriteNumber(m_PreferredBlockSize);
10778  }
10779 
10780  json.WriteString("Blocks");
10781  json.BeginObject();
10782  for(size_t i = 0; i < m_Blocks.size(); ++i)
10783  {
10784  json.BeginString();
10785  json.ContinueString(m_Blocks[i]->GetId());
10786  json.EndString();
10787 
10788  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10789  }
10790  json.EndObject();
10791 
10792  json.EndObject();
10793 }
10794 
10795 #endif // #if VMA_STATS_STRING_ENABLED
10796 
10797 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10798  VmaAllocator hAllocator,
10799  uint32_t currentFrameIndex)
10800 {
10801  if(m_pDefragmentator == VMA_NULL)
10802  {
10803  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
10804  hAllocator,
10805  this,
10806  currentFrameIndex);
10807  }
10808 
10809  return m_pDefragmentator;
10810 }
10811 
10812 VkResult VmaBlockVector::Defragment(
10813  VmaDefragmentationStats* pDefragmentationStats,
10814  VkDeviceSize& maxBytesToMove,
10815  uint32_t& maxAllocationsToMove)
10816 {
10817  if(m_pDefragmentator == VMA_NULL)
10818  {
10819  return VK_SUCCESS;
10820  }
10821 
10822  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10823 
10824  // Defragment.
10825  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
10826 
10827  // Accumulate statistics.
10828  if(pDefragmentationStats != VMA_NULL)
10829  {
10830  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
10831  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
10832  pDefragmentationStats->bytesMoved += bytesMoved;
10833  pDefragmentationStats->allocationsMoved += allocationsMoved;
10834  VMA_ASSERT(bytesMoved <= maxBytesToMove);
10835  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
10836  maxBytesToMove -= bytesMoved;
10837  maxAllocationsToMove -= allocationsMoved;
10838  }
10839 
10840  // Free empty blocks.
10841  m_HasEmptyBlock = false;
10842  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10843  {
10844  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
10845  if(pBlock->m_pMetadata->IsEmpty())
10846  {
10847  if(m_Blocks.size() > m_MinBlockCount)
10848  {
10849  if(pDefragmentationStats != VMA_NULL)
10850  {
10851  ++pDefragmentationStats->deviceMemoryBlocksFreed;
10852  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
10853  }
10854 
10855  VmaVectorRemove(m_Blocks, blockIndex);
10856  pBlock->Destroy(m_hAllocator);
10857  vma_delete(m_hAllocator, pBlock);
10858  }
10859  else
10860  {
10861  m_HasEmptyBlock = true;
10862  }
10863  }
10864  }
10865 
10866  return result;
10867 }
10868 
10869 void VmaBlockVector::DestroyDefragmentator()
10870 {
10871  if(m_pDefragmentator != VMA_NULL)
10872  {
10873  vma_delete(m_hAllocator, m_pDefragmentator);
10874  m_pDefragmentator = VMA_NULL;
10875  }
10876 }
10877 
10878 void VmaBlockVector::MakePoolAllocationsLost(
10879  uint32_t currentFrameIndex,
10880  size_t* pLostAllocationCount)
10881 {
10882  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10883  size_t lostAllocationCount = 0;
10884  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10885  {
10886  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10887  VMA_ASSERT(pBlock);
10888  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10889  }
10890  if(pLostAllocationCount != VMA_NULL)
10891  {
10892  *pLostAllocationCount = lostAllocationCount;
10893  }
10894 }
10895 
10896 VkResult VmaBlockVector::CheckCorruption()
10897 {
10898  if(!IsCorruptionDetectionEnabled())
10899  {
10900  return VK_ERROR_FEATURE_NOT_PRESENT;
10901  }
10902 
10903  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10904  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10905  {
10906  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10907  VMA_ASSERT(pBlock);
10908  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10909  if(res != VK_SUCCESS)
10910  {
10911  return res;
10912  }
10913  }
10914  return VK_SUCCESS;
10915 }
10916 
10917 void VmaBlockVector::AddStats(VmaStats* pStats)
10918 {
10919  const uint32_t memTypeIndex = m_MemoryTypeIndex;
10920  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
10921 
10922  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10923 
10924  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10925  {
10926  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10927  VMA_ASSERT(pBlock);
10928  VMA_HEAVY_ASSERT(pBlock->Validate());
10929  VmaStatInfo allocationStatInfo;
10930  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
10931  VmaAddStatInfo(pStats->total, allocationStatInfo);
10932  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
10933  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
10934  }
10935 }
10936 
10938 // VmaDefragmentator members definition
10939 
10940 VmaDefragmentator::VmaDefragmentator(
10941  VmaAllocator hAllocator,
10942  VmaBlockVector* pBlockVector,
10943  uint32_t currentFrameIndex) :
10944  m_hAllocator(hAllocator),
10945  m_pBlockVector(pBlockVector),
10946  m_CurrentFrameIndex(currentFrameIndex),
10947  m_BytesMoved(0),
10948  m_AllocationsMoved(0),
10949  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
10950  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
10951 {
10952  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
10953 }
10954 
10955 VmaDefragmentator::~VmaDefragmentator()
10956 {
10957  for(size_t i = m_Blocks.size(); i--; )
10958  {
10959  vma_delete(m_hAllocator, m_Blocks[i]);
10960  }
10961 }
10962 
10963 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
10964 {
10965  AllocationInfo allocInfo;
10966  allocInfo.m_hAllocation = hAlloc;
10967  allocInfo.m_pChanged = pChanged;
10968  m_Allocations.push_back(allocInfo);
10969 }
10970 
10971 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
10972 {
10973  // It has already been mapped for defragmentation.
10974  if(m_pMappedDataForDefragmentation)
10975  {
10976  *ppMappedData = m_pMappedDataForDefragmentation;
10977  return VK_SUCCESS;
10978  }
10979 
10980  // It is originally mapped.
10981  if(m_pBlock->GetMappedData())
10982  {
10983  *ppMappedData = m_pBlock->GetMappedData();
10984  return VK_SUCCESS;
10985  }
10986 
10987  // Map on first usage.
10988  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
10989  *ppMappedData = m_pMappedDataForDefragmentation;
10990  return res;
10991 }
10992 
10993 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
10994 {
10995  if(m_pMappedDataForDefragmentation != VMA_NULL)
10996  {
10997  m_pBlock->Unmap(hAllocator, 1);
10998  }
10999 }
11000 
11001 VkResult VmaDefragmentator::DefragmentRound(
11002  VkDeviceSize maxBytesToMove,
11003  uint32_t maxAllocationsToMove)
11004 {
11005  if(m_Blocks.empty())
11006  {
11007  return VK_SUCCESS;
11008  }
11009 
11010  size_t srcBlockIndex = m_Blocks.size() - 1;
11011  size_t srcAllocIndex = SIZE_MAX;
11012  for(;;)
11013  {
11014  // 1. Find next allocation to move.
11015  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
11016  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
11017  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
11018  {
11019  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
11020  {
11021  // Finished: no more allocations to process.
11022  if(srcBlockIndex == 0)
11023  {
11024  return VK_SUCCESS;
11025  }
11026  else
11027  {
11028  --srcBlockIndex;
11029  srcAllocIndex = SIZE_MAX;
11030  }
11031  }
11032  else
11033  {
11034  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
11035  }
11036  }
11037 
11038  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
11039  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
11040 
11041  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
11042  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
11043  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
11044  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
11045 
11046  // 2. Try to find new place for this allocation in preceding or current block.
11047  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
11048  {
11049  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
11050  VmaAllocationRequest dstAllocRequest;
11051  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
11052  m_CurrentFrameIndex,
11053  m_pBlockVector->GetFrameInUseCount(),
11054  m_pBlockVector->GetBufferImageGranularity(),
11055  size,
11056  alignment,
11057  false, // upperAddress
11058  suballocType,
11059  false, // canMakeOtherLost
11061  &dstAllocRequest) &&
11062  MoveMakesSense(
11063  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
11064  {
11065  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
11066 
11067  // Reached limit on number of allocations or bytes to move.
11068  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
11069  (m_BytesMoved + size > maxBytesToMove))
11070  {
11071  return VK_INCOMPLETE;
11072  }
11073 
11074  void* pDstMappedData = VMA_NULL;
11075  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
11076  if(res != VK_SUCCESS)
11077  {
11078  return res;
11079  }
11080 
11081  void* pSrcMappedData = VMA_NULL;
11082  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
11083  if(res != VK_SUCCESS)
11084  {
11085  return res;
11086  }
11087 
11088  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11089  memcpy(
11090  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
11091  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
11092  static_cast<size_t>(size));
11093 
11094  if(VMA_DEBUG_MARGIN > 0)
11095  {
11096  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
11097  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
11098  }
11099 
11100  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
11101  dstAllocRequest,
11102  suballocType,
11103  size,
11104  false, // upperAddress
11105  allocInfo.m_hAllocation);
11106  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
11107 
11108  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
11109 
11110  if(allocInfo.m_pChanged != VMA_NULL)
11111  {
11112  *allocInfo.m_pChanged = VK_TRUE;
11113  }
11114 
11115  ++m_AllocationsMoved;
11116  m_BytesMoved += size;
11117 
11118  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
11119 
11120  break;
11121  }
11122  }
11123 
11124  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
11125 
11126  if(srcAllocIndex > 0)
11127  {
11128  --srcAllocIndex;
11129  }
11130  else
11131  {
11132  if(srcBlockIndex > 0)
11133  {
11134  --srcBlockIndex;
11135  srcAllocIndex = SIZE_MAX;
11136  }
11137  else
11138  {
11139  return VK_SUCCESS;
11140  }
11141  }
11142  }
11143 }
11144 
11145 VkResult VmaDefragmentator::Defragment(
11146  VkDeviceSize maxBytesToMove,
11147  uint32_t maxAllocationsToMove)
11148 {
11149  if(m_Allocations.empty())
11150  {
11151  return VK_SUCCESS;
11152  }
11153 
11154  // Create block info for each block.
11155  const size_t blockCount = m_pBlockVector->m_Blocks.size();
11156  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11157  {
11158  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
11159  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
11160  m_Blocks.push_back(pBlockInfo);
11161  }
11162 
11163  // Sort them by m_pBlock pointer value.
11164  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
11165 
11166  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
11167  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
11168  {
11169  AllocationInfo& allocInfo = m_Allocations[blockIndex];
11170  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
11171  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11172  {
11173  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
11174  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
11175  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
11176  {
11177  (*it)->m_Allocations.push_back(allocInfo);
11178  }
11179  else
11180  {
11181  VMA_ASSERT(0);
11182  }
11183  }
11184  }
11185  m_Allocations.clear();
11186 
11187  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11188  {
11189  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
11190  pBlockInfo->CalcHasNonMovableAllocations();
11191  pBlockInfo->SortAllocationsBySizeDescecnding();
11192  }
11193 
11194  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
11195  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
11196 
11197  // Execute defragmentation rounds (the main part).
11198  VkResult result = VK_SUCCESS;
11199  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
11200  {
11201  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
11202  }
11203 
11204  // Unmap blocks that were mapped for defragmentation.
11205  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11206  {
11207  m_Blocks[blockIndex]->Unmap(m_hAllocator);
11208  }
11209 
11210  return result;
11211 }
11212 
11213 bool VmaDefragmentator::MoveMakesSense(
11214  size_t dstBlockIndex, VkDeviceSize dstOffset,
11215  size_t srcBlockIndex, VkDeviceSize srcOffset)
11216 {
11217  if(dstBlockIndex < srcBlockIndex)
11218  {
11219  return true;
11220  }
11221  if(dstBlockIndex > srcBlockIndex)
11222  {
11223  return false;
11224  }
11225  if(dstOffset < srcOffset)
11226  {
11227  return true;
11228  }
11229  return false;
11230 }
11231 
11233 // VmaRecorder
11234 
11235 #if VMA_RECORDING_ENABLED
11236 
11237 VmaRecorder::VmaRecorder() :
11238  m_UseMutex(true),
11239  m_Flags(0),
11240  m_File(VMA_NULL),
11241  m_Freq(INT64_MAX),
11242  m_StartCounter(INT64_MAX)
11243 {
11244 }
11245 
11246 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
11247 {
11248  m_UseMutex = useMutex;
11249  m_Flags = settings.flags;
11250 
11251  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
11252  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
11253 
11254  // Open file for writing.
11255  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
11256  if(err != 0)
11257  {
11258  return VK_ERROR_INITIALIZATION_FAILED;
11259  }
11260 
11261  // Write header.
11262  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
11263  fprintf(m_File, "%s\n", "1,3");
11264 
11265  return VK_SUCCESS;
11266 }
11267 
11268 VmaRecorder::~VmaRecorder()
11269 {
11270  if(m_File != VMA_NULL)
11271  {
11272  fclose(m_File);
11273  }
11274 }
11275 
11276 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
11277 {
11278  CallParams callParams;
11279  GetBasicParams(callParams);
11280 
11281  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11282  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
11283  Flush();
11284 }
11285 
11286 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
11287 {
11288  CallParams callParams;
11289  GetBasicParams(callParams);
11290 
11291  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11292  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
11293  Flush();
11294 }
11295 
11296 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
11297 {
11298  CallParams callParams;
11299  GetBasicParams(callParams);
11300 
11301  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11302  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
11303  createInfo.memoryTypeIndex,
11304  createInfo.flags,
11305  createInfo.blockSize,
11306  createInfo.minBlockCount,
11307  createInfo.maxBlockCount,
11308  createInfo.frameInUseCount,
11309  pool);
11310  Flush();
11311 }
11312 
11313 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
11314 {
11315  CallParams callParams;
11316  GetBasicParams(callParams);
11317 
11318  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11319  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
11320  pool);
11321  Flush();
11322 }
11323 
11324 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
11325  const VkMemoryRequirements& vkMemReq,
11326  const VmaAllocationCreateInfo& createInfo,
11327  VmaAllocation allocation)
11328 {
11329  CallParams callParams;
11330  GetBasicParams(callParams);
11331 
11332  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11333  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11334  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11335  vkMemReq.size,
11336  vkMemReq.alignment,
11337  vkMemReq.memoryTypeBits,
11338  createInfo.flags,
11339  createInfo.usage,
11340  createInfo.requiredFlags,
11341  createInfo.preferredFlags,
11342  createInfo.memoryTypeBits,
11343  createInfo.pool,
11344  allocation,
11345  userDataStr.GetString());
11346  Flush();
11347 }
11348 
11349 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
11350  const VkMemoryRequirements& vkMemReq,
11351  bool requiresDedicatedAllocation,
11352  bool prefersDedicatedAllocation,
11353  const VmaAllocationCreateInfo& createInfo,
11354  VmaAllocation allocation)
11355 {
11356  CallParams callParams;
11357  GetBasicParams(callParams);
11358 
11359  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11360  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11361  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11362  vkMemReq.size,
11363  vkMemReq.alignment,
11364  vkMemReq.memoryTypeBits,
11365  requiresDedicatedAllocation ? 1 : 0,
11366  prefersDedicatedAllocation ? 1 : 0,
11367  createInfo.flags,
11368  createInfo.usage,
11369  createInfo.requiredFlags,
11370  createInfo.preferredFlags,
11371  createInfo.memoryTypeBits,
11372  createInfo.pool,
11373  allocation,
11374  userDataStr.GetString());
11375  Flush();
11376 }
11377 
11378 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
11379  const VkMemoryRequirements& vkMemReq,
11380  bool requiresDedicatedAllocation,
11381  bool prefersDedicatedAllocation,
11382  const VmaAllocationCreateInfo& createInfo,
11383  VmaAllocation allocation)
11384 {
11385  CallParams callParams;
11386  GetBasicParams(callParams);
11387 
11388  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11389  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
11390  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11391  vkMemReq.size,
11392  vkMemReq.alignment,
11393  vkMemReq.memoryTypeBits,
11394  requiresDedicatedAllocation ? 1 : 0,
11395  prefersDedicatedAllocation ? 1 : 0,
11396  createInfo.flags,
11397  createInfo.usage,
11398  createInfo.requiredFlags,
11399  createInfo.preferredFlags,
11400  createInfo.memoryTypeBits,
11401  createInfo.pool,
11402  allocation,
11403  userDataStr.GetString());
11404  Flush();
11405 }
11406 
11407 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
11408  VmaAllocation allocation)
11409 {
11410  CallParams callParams;
11411  GetBasicParams(callParams);
11412 
11413  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11414  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11415  allocation);
11416  Flush();
11417 }
11418 
11419 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
11420  VmaAllocation allocation,
11421  const void* pUserData)
11422 {
11423  CallParams callParams;
11424  GetBasicParams(callParams);
11425 
11426  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11427  UserDataString userDataStr(
11428  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
11429  pUserData);
11430  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11431  allocation,
11432  userDataStr.GetString());
11433  Flush();
11434 }
11435 
11436 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
11437  VmaAllocation allocation)
11438 {
11439  CallParams callParams;
11440  GetBasicParams(callParams);
11441 
11442  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11443  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11444  allocation);
11445  Flush();
11446 }
11447 
11448 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
11449  VmaAllocation allocation)
11450 {
11451  CallParams callParams;
11452  GetBasicParams(callParams);
11453 
11454  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11455  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11456  allocation);
11457  Flush();
11458 }
11459 
11460 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
11461  VmaAllocation allocation)
11462 {
11463  CallParams callParams;
11464  GetBasicParams(callParams);
11465 
11466  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11467  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
11468  allocation);
11469  Flush();
11470 }
11471 
11472 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
11473  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11474 {
11475  CallParams callParams;
11476  GetBasicParams(callParams);
11477 
11478  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11479  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11480  allocation,
11481  offset,
11482  size);
11483  Flush();
11484 }
11485 
11486 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
11487  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
11488 {
11489  CallParams callParams;
11490  GetBasicParams(callParams);
11491 
11492  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11493  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
11494  allocation,
11495  offset,
11496  size);
11497  Flush();
11498 }
11499 
11500 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
11501  const VkBufferCreateInfo& bufCreateInfo,
11502  const VmaAllocationCreateInfo& allocCreateInfo,
11503  VmaAllocation allocation)
11504 {
11505  CallParams callParams;
11506  GetBasicParams(callParams);
11507 
11508  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11509  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11510  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11511  bufCreateInfo.flags,
11512  bufCreateInfo.size,
11513  bufCreateInfo.usage,
11514  bufCreateInfo.sharingMode,
11515  allocCreateInfo.flags,
11516  allocCreateInfo.usage,
11517  allocCreateInfo.requiredFlags,
11518  allocCreateInfo.preferredFlags,
11519  allocCreateInfo.memoryTypeBits,
11520  allocCreateInfo.pool,
11521  allocation,
11522  userDataStr.GetString());
11523  Flush();
11524 }
11525 
11526 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
11527  const VkImageCreateInfo& imageCreateInfo,
11528  const VmaAllocationCreateInfo& allocCreateInfo,
11529  VmaAllocation allocation)
11530 {
11531  CallParams callParams;
11532  GetBasicParams(callParams);
11533 
11534  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11535  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
11536  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
11537  imageCreateInfo.flags,
11538  imageCreateInfo.imageType,
11539  imageCreateInfo.format,
11540  imageCreateInfo.extent.width,
11541  imageCreateInfo.extent.height,
11542  imageCreateInfo.extent.depth,
11543  imageCreateInfo.mipLevels,
11544  imageCreateInfo.arrayLayers,
11545  imageCreateInfo.samples,
11546  imageCreateInfo.tiling,
11547  imageCreateInfo.usage,
11548  imageCreateInfo.sharingMode,
11549  imageCreateInfo.initialLayout,
11550  allocCreateInfo.flags,
11551  allocCreateInfo.usage,
11552  allocCreateInfo.requiredFlags,
11553  allocCreateInfo.preferredFlags,
11554  allocCreateInfo.memoryTypeBits,
11555  allocCreateInfo.pool,
11556  allocation,
11557  userDataStr.GetString());
11558  Flush();
11559 }
11560 
11561 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
11562  VmaAllocation allocation)
11563 {
11564  CallParams callParams;
11565  GetBasicParams(callParams);
11566 
11567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11568  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
11569  allocation);
11570  Flush();
11571 }
11572 
11573 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
11574  VmaAllocation allocation)
11575 {
11576  CallParams callParams;
11577  GetBasicParams(callParams);
11578 
11579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11580  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
11581  allocation);
11582  Flush();
11583 }
11584 
11585 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
11586  VmaAllocation allocation)
11587 {
11588  CallParams callParams;
11589  GetBasicParams(callParams);
11590 
11591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11592  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
11593  allocation);
11594  Flush();
11595 }
11596 
11597 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
11598  VmaAllocation allocation)
11599 {
11600  CallParams callParams;
11601  GetBasicParams(callParams);
11602 
11603  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11604  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
11605  allocation);
11606  Flush();
11607 }
11608 
11609 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
11610  VmaPool pool)
11611 {
11612  CallParams callParams;
11613  GetBasicParams(callParams);
11614 
11615  VmaMutexLock lock(m_FileMutex, m_UseMutex);
11616  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
11617  pool);
11618  Flush();
11619 }
11620 
11621 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
11622 {
11623  if(pUserData != VMA_NULL)
11624  {
11625  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
11626  {
11627  m_Str = (const char*)pUserData;
11628  }
11629  else
11630  {
11631  sprintf_s(m_PtrStr, "%p", pUserData);
11632  m_Str = m_PtrStr;
11633  }
11634  }
11635  else
11636  {
11637  m_Str = "";
11638  }
11639 }
11640 
11641 void VmaRecorder::WriteConfiguration(
11642  const VkPhysicalDeviceProperties& devProps,
11643  const VkPhysicalDeviceMemoryProperties& memProps,
11644  bool dedicatedAllocationExtensionEnabled)
11645 {
11646  fprintf(m_File, "Config,Begin\n");
11647 
11648  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
11649  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
11650  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
11651  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
11652  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
11653  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
11654 
11655  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
11656  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
11657  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
11658 
11659  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
11660  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
11661  {
11662  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
11663  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
11664  }
11665  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
11666  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
11667  {
11668  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
11669  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
11670  }
11671 
11672  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
11673 
11674  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
11675  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
11676  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
11677  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
11678  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
11679  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
11680  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
11681  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
11682  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11683 
11684  fprintf(m_File, "Config,End\n");
11685 }
11686 
11687 void VmaRecorder::GetBasicParams(CallParams& outParams)
11688 {
11689  outParams.threadId = GetCurrentThreadId();
11690 
11691  LARGE_INTEGER counter;
11692  QueryPerformanceCounter(&counter);
11693  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
11694 }
11695 
11696 void VmaRecorder::Flush()
11697 {
11698  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
11699  {
11700  fflush(m_File);
11701  }
11702 }
11703 
11704 #endif // #if VMA_RECORDING_ENABLED
11705 
11707 // VmaAllocator_T
11708 
11709 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
11710  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
11711  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
11712  m_hDevice(pCreateInfo->device),
11713  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
11714  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
11715  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
11716  m_PreferredLargeHeapBlockSize(0),
11717  m_PhysicalDevice(pCreateInfo->physicalDevice),
11718  m_CurrentFrameIndex(0),
11719  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
11720  m_NextPoolId(0)
11722  ,m_pRecorder(VMA_NULL)
11723 #endif
11724 {
11725  if(VMA_DEBUG_DETECT_CORRUPTION)
11726  {
11727  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
11728  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
11729  }
11730 
11731  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
11732 
11733 #if !(VMA_DEDICATED_ALLOCATION)
11735  {
11736  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
11737  }
11738 #endif
11739 
11740  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
11741  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
11742  memset(&m_MemProps, 0, sizeof(m_MemProps));
11743 
11744  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
11745  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
11746 
11747  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11748  {
11749  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
11750  }
11751 
11752  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
11753  {
11754  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
11755  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
11756  }
11757 
11758  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
11759 
11760  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
11761  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
11762 
11763  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
11764  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
11765  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
11766  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
11767 
11768  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
11769  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
11770 
11771  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
11772  {
11773  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
11774  {
11775  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
11776  if(limit != VK_WHOLE_SIZE)
11777  {
11778  m_HeapSizeLimit[heapIndex] = limit;
11779  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
11780  {
11781  m_MemProps.memoryHeaps[heapIndex].size = limit;
11782  }
11783  }
11784  }
11785  }
11786 
11787  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11788  {
11789  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
11790 
11791  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
11792  this,
11793  memTypeIndex,
11794  preferredBlockSize,
11795  0,
11796  SIZE_MAX,
11797  GetBufferImageGranularity(),
11798  pCreateInfo->frameInUseCount,
11799  false, // isCustomPool
11800  false, // explicitBlockSize
11801  false); // linearAlgorithm
11802  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
11803  // becase minBlockCount is 0.
11804  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
11805 
11806  }
11807 }
11808 
11809 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
11810 {
11811  VkResult res = VK_SUCCESS;
11812 
11813  if(pCreateInfo->pRecordSettings != VMA_NULL &&
11814  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
11815  {
11816 #if VMA_RECORDING_ENABLED
11817  m_pRecorder = vma_new(this, VmaRecorder)();
11818  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
11819  if(res != VK_SUCCESS)
11820  {
11821  return res;
11822  }
11823  m_pRecorder->WriteConfiguration(
11824  m_PhysicalDeviceProperties,
11825  m_MemProps,
11826  m_UseKhrDedicatedAllocation);
11827  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
11828 #else
11829  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
11830  return VK_ERROR_FEATURE_NOT_PRESENT;
11831 #endif
11832  }
11833 
11834  return res;
11835 }
11836 
11837 VmaAllocator_T::~VmaAllocator_T()
11838 {
11839 #if VMA_RECORDING_ENABLED
11840  if(m_pRecorder != VMA_NULL)
11841  {
11842  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
11843  vma_delete(this, m_pRecorder);
11844  }
11845 #endif
11846 
11847  VMA_ASSERT(m_Pools.empty());
11848 
11849  for(size_t i = GetMemoryTypeCount(); i--; )
11850  {
11851  vma_delete(this, m_pDedicatedAllocations[i]);
11852  vma_delete(this, m_pBlockVectors[i]);
11853  }
11854 }
11855 
11856 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
11857 {
11858 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11859  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
11860  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
11861  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
11862  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
11863  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
11864  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
11865  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
11866  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
11867  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
11868  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
11869  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
11870  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
11871  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
11872  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
11873  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
11874  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
11875 #if VMA_DEDICATED_ALLOCATION
11876  if(m_UseKhrDedicatedAllocation)
11877  {
11878  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
11879  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11880  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11881  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11882  }
11883 #endif // #if VMA_DEDICATED_ALLOCATION
11884 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11885 
11886 #define VMA_COPY_IF_NOT_NULL(funcName) \
11887  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11888 
11889  if(pVulkanFunctions != VMA_NULL)
11890  {
11891  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11892  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11893  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11894  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11895  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11896  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11897  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11898  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11899  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11900  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11901  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11902  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11903  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11904  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11905  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11906  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11907 #if VMA_DEDICATED_ALLOCATION
11908  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11909  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11910 #endif
11911  }
11912 
11913 #undef VMA_COPY_IF_NOT_NULL
11914 
11915  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
11916  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
11917  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
11918  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
11919  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
11920  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
11921  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
11922  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
11923  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
11924  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
11925  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
11926  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
11927  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
11928  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
11929  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
11930  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
11931  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
11932  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
11933 #if VMA_DEDICATED_ALLOCATION
11934  if(m_UseKhrDedicatedAllocation)
11935  {
11936  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
11937  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
11938  }
11939 #endif
11940 }
11941 
11942 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
11943 {
11944  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11945  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
11946  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
11947  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
11948 }
11949 
11950 VkResult VmaAllocator_T::AllocateMemoryOfType(
11951  VkDeviceSize size,
11952  VkDeviceSize alignment,
11953  bool dedicatedAllocation,
11954  VkBuffer dedicatedBuffer,
11955  VkImage dedicatedImage,
11956  const VmaAllocationCreateInfo& createInfo,
11957  uint32_t memTypeIndex,
11958  VmaSuballocationType suballocType,
11959  VmaAllocation* pAllocation)
11960 {
11961  VMA_ASSERT(pAllocation != VMA_NULL);
11962  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
11963 
11964  VmaAllocationCreateInfo finalCreateInfo = createInfo;
11965 
11966  // If memory type is not HOST_VISIBLE, disable MAPPED.
11967  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11968  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
11969  {
11970  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
11971  }
11972 
11973  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
11974  VMA_ASSERT(blockVector);
11975 
11976  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
11977  bool preferDedicatedMemory =
11978  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
11979  dedicatedAllocation ||
11980  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
11981  size > preferredBlockSize / 2;
11982 
11983  if(preferDedicatedMemory &&
11984  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
11985  finalCreateInfo.pool == VK_NULL_HANDLE)
11986  {
11988  }
11989 
11990  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
11991  {
11992  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11993  {
11994  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11995  }
11996  else
11997  {
11998  return AllocateDedicatedMemory(
11999  size,
12000  suballocType,
12001  memTypeIndex,
12002  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12003  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12004  finalCreateInfo.pUserData,
12005  dedicatedBuffer,
12006  dedicatedImage,
12007  pAllocation);
12008  }
12009  }
12010  else
12011  {
12012  VkResult res = blockVector->Allocate(
12013  VK_NULL_HANDLE, // hCurrentPool
12014  m_CurrentFrameIndex.load(),
12015  size,
12016  alignment,
12017  finalCreateInfo,
12018  suballocType,
12019  pAllocation);
12020  if(res == VK_SUCCESS)
12021  {
12022  return res;
12023  }
12024 
12025  // 5. Try dedicated memory.
12026  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12027  {
12028  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12029  }
12030  else
12031  {
12032  res = AllocateDedicatedMemory(
12033  size,
12034  suballocType,
12035  memTypeIndex,
12036  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
12037  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
12038  finalCreateInfo.pUserData,
12039  dedicatedBuffer,
12040  dedicatedImage,
12041  pAllocation);
12042  if(res == VK_SUCCESS)
12043  {
12044  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
12045  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
12046  return VK_SUCCESS;
12047  }
12048  else
12049  {
12050  // Everything failed: Return error code.
12051  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12052  return res;
12053  }
12054  }
12055  }
12056 }
12057 
12058 VkResult VmaAllocator_T::AllocateDedicatedMemory(
12059  VkDeviceSize size,
12060  VmaSuballocationType suballocType,
12061  uint32_t memTypeIndex,
12062  bool map,
12063  bool isUserDataString,
12064  void* pUserData,
12065  VkBuffer dedicatedBuffer,
12066  VkImage dedicatedImage,
12067  VmaAllocation* pAllocation)
12068 {
12069  VMA_ASSERT(pAllocation);
12070 
12071  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
12072  allocInfo.memoryTypeIndex = memTypeIndex;
12073  allocInfo.allocationSize = size;
12074 
12075 #if VMA_DEDICATED_ALLOCATION
12076  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
12077  if(m_UseKhrDedicatedAllocation)
12078  {
12079  if(dedicatedBuffer != VK_NULL_HANDLE)
12080  {
12081  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
12082  dedicatedAllocInfo.buffer = dedicatedBuffer;
12083  allocInfo.pNext = &dedicatedAllocInfo;
12084  }
12085  else if(dedicatedImage != VK_NULL_HANDLE)
12086  {
12087  dedicatedAllocInfo.image = dedicatedImage;
12088  allocInfo.pNext = &dedicatedAllocInfo;
12089  }
12090  }
12091 #endif // #if VMA_DEDICATED_ALLOCATION
12092 
12093  // Allocate VkDeviceMemory.
12094  VkDeviceMemory hMemory = VK_NULL_HANDLE;
12095  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
12096  if(res < 0)
12097  {
12098  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
12099  return res;
12100  }
12101 
12102  void* pMappedData = VMA_NULL;
12103  if(map)
12104  {
12105  res = (*m_VulkanFunctions.vkMapMemory)(
12106  m_hDevice,
12107  hMemory,
12108  0,
12109  VK_WHOLE_SIZE,
12110  0,
12111  &pMappedData);
12112  if(res < 0)
12113  {
12114  VMA_DEBUG_LOG(" vkMapMemory FAILED");
12115  FreeVulkanMemory(memTypeIndex, size, hMemory);
12116  return res;
12117  }
12118  }
12119 
12120  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
12121  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
12122  (*pAllocation)->SetUserData(this, pUserData);
12123  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12124  {
12125  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12126  }
12127 
12128  // Register it in m_pDedicatedAllocations.
12129  {
12130  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12131  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12132  VMA_ASSERT(pDedicatedAllocations);
12133  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
12134  }
12135 
12136  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
12137 
12138  return VK_SUCCESS;
12139 }
12140 
12141 void VmaAllocator_T::GetBufferMemoryRequirements(
12142  VkBuffer hBuffer,
12143  VkMemoryRequirements& memReq,
12144  bool& requiresDedicatedAllocation,
12145  bool& prefersDedicatedAllocation) const
12146 {
12147 #if VMA_DEDICATED_ALLOCATION
12148  if(m_UseKhrDedicatedAllocation)
12149  {
12150  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
12151  memReqInfo.buffer = hBuffer;
12152 
12153  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12154 
12155  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12156  memReq2.pNext = &memDedicatedReq;
12157 
12158  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12159 
12160  memReq = memReq2.memoryRequirements;
12161  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12162  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12163  }
12164  else
12165 #endif // #if VMA_DEDICATED_ALLOCATION
12166  {
12167  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
12168  requiresDedicatedAllocation = false;
12169  prefersDedicatedAllocation = false;
12170  }
12171 }
12172 
12173 void VmaAllocator_T::GetImageMemoryRequirements(
12174  VkImage hImage,
12175  VkMemoryRequirements& memReq,
12176  bool& requiresDedicatedAllocation,
12177  bool& prefersDedicatedAllocation) const
12178 {
12179 #if VMA_DEDICATED_ALLOCATION
12180  if(m_UseKhrDedicatedAllocation)
12181  {
12182  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
12183  memReqInfo.image = hImage;
12184 
12185  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
12186 
12187  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
12188  memReq2.pNext = &memDedicatedReq;
12189 
12190  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
12191 
12192  memReq = memReq2.memoryRequirements;
12193  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
12194  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
12195  }
12196  else
12197 #endif // #if VMA_DEDICATED_ALLOCATION
12198  {
12199  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
12200  requiresDedicatedAllocation = false;
12201  prefersDedicatedAllocation = false;
12202  }
12203 }
12204 
12205 VkResult VmaAllocator_T::AllocateMemory(
12206  const VkMemoryRequirements& vkMemReq,
12207  bool requiresDedicatedAllocation,
12208  bool prefersDedicatedAllocation,
12209  VkBuffer dedicatedBuffer,
12210  VkImage dedicatedImage,
12211  const VmaAllocationCreateInfo& createInfo,
12212  VmaSuballocationType suballocType,
12213  VmaAllocation* pAllocation)
12214 {
12215  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
12216 
12217  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
12218  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12219  {
12220  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
12221  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12222  }
12223  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
12225  {
12226  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
12227  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12228  }
12229  if(requiresDedicatedAllocation)
12230  {
12231  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
12232  {
12233  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
12234  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12235  }
12236  if(createInfo.pool != VK_NULL_HANDLE)
12237  {
12238  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
12239  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12240  }
12241  }
12242  if((createInfo.pool != VK_NULL_HANDLE) &&
12243  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
12244  {
12245  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
12246  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12247  }
12248 
12249  if(createInfo.pool != VK_NULL_HANDLE)
12250  {
12251  const VkDeviceSize alignmentForPool = VMA_MAX(
12252  vkMemReq.alignment,
12253  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
12254  return createInfo.pool->m_BlockVector.Allocate(
12255  createInfo.pool,
12256  m_CurrentFrameIndex.load(),
12257  vkMemReq.size,
12258  alignmentForPool,
12259  createInfo,
12260  suballocType,
12261  pAllocation);
12262  }
12263  else
12264  {
12265  // Bit mask of memory Vulkan types acceptable for this allocation.
12266  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
12267  uint32_t memTypeIndex = UINT32_MAX;
12268  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12269  if(res == VK_SUCCESS)
12270  {
12271  VkDeviceSize alignmentForMemType = VMA_MAX(
12272  vkMemReq.alignment,
12273  GetMemoryTypeMinAlignment(memTypeIndex));
12274 
12275  res = AllocateMemoryOfType(
12276  vkMemReq.size,
12277  alignmentForMemType,
12278  requiresDedicatedAllocation || prefersDedicatedAllocation,
12279  dedicatedBuffer,
12280  dedicatedImage,
12281  createInfo,
12282  memTypeIndex,
12283  suballocType,
12284  pAllocation);
12285  // Succeeded on first try.
12286  if(res == VK_SUCCESS)
12287  {
12288  return res;
12289  }
12290  // Allocation from this memory type failed. Try other compatible memory types.
12291  else
12292  {
12293  for(;;)
12294  {
12295  // Remove old memTypeIndex from list of possibilities.
12296  memoryTypeBits &= ~(1u << memTypeIndex);
12297  // Find alternative memTypeIndex.
12298  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
12299  if(res == VK_SUCCESS)
12300  {
12301  alignmentForMemType = VMA_MAX(
12302  vkMemReq.alignment,
12303  GetMemoryTypeMinAlignment(memTypeIndex));
12304 
12305  res = AllocateMemoryOfType(
12306  vkMemReq.size,
12307  alignmentForMemType,
12308  requiresDedicatedAllocation || prefersDedicatedAllocation,
12309  dedicatedBuffer,
12310  dedicatedImage,
12311  createInfo,
12312  memTypeIndex,
12313  suballocType,
12314  pAllocation);
12315  // Allocation from this alternative memory type succeeded.
12316  if(res == VK_SUCCESS)
12317  {
12318  return res;
12319  }
12320  // else: Allocation from this memory type failed. Try next one - next loop iteration.
12321  }
12322  // No other matching memory type index could be found.
12323  else
12324  {
12325  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
12326  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12327  }
12328  }
12329  }
12330  }
12331  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
12332  else
12333  return res;
12334  }
12335 }
12336 
12337 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
12338 {
12339  VMA_ASSERT(allocation);
12340 
12341  if(TouchAllocation(allocation))
12342  {
12343  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12344  {
12345  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
12346  }
12347 
12348  switch(allocation->GetType())
12349  {
12350  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12351  {
12352  VmaBlockVector* pBlockVector = VMA_NULL;
12353  VmaPool hPool = allocation->GetPool();
12354  if(hPool != VK_NULL_HANDLE)
12355  {
12356  pBlockVector = &hPool->m_BlockVector;
12357  }
12358  else
12359  {
12360  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12361  pBlockVector = m_pBlockVectors[memTypeIndex];
12362  }
12363  pBlockVector->Free(allocation);
12364  }
12365  break;
12366  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12367  FreeDedicatedMemory(allocation);
12368  break;
12369  default:
12370  VMA_ASSERT(0);
12371  }
12372  }
12373 
12374  allocation->SetUserData(this, VMA_NULL);
12375  vma_delete(this, allocation);
12376 }
12377 
12378 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
12379 {
12380  // Initialize.
12381  InitStatInfo(pStats->total);
12382  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
12383  InitStatInfo(pStats->memoryType[i]);
12384  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
12385  InitStatInfo(pStats->memoryHeap[i]);
12386 
12387  // Process default pools.
12388  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12389  {
12390  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12391  VMA_ASSERT(pBlockVector);
12392  pBlockVector->AddStats(pStats);
12393  }
12394 
12395  // Process custom pools.
12396  {
12397  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12398  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12399  {
12400  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
12401  }
12402  }
12403 
12404  // Process dedicated allocations.
12405  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12406  {
12407  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
12408  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12409  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12410  VMA_ASSERT(pDedicatedAllocVector);
12411  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
12412  {
12413  VmaStatInfo allocationStatInfo;
12414  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
12415  VmaAddStatInfo(pStats->total, allocationStatInfo);
12416  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12417  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12418  }
12419  }
12420 
12421  // Postprocess.
12422  VmaPostprocessCalcStatInfo(pStats->total);
12423  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
12424  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
12425  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
12426  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
12427 }
12428 
12429 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
12430 
12431 VkResult VmaAllocator_T::Defragment(
12432  VmaAllocation* pAllocations,
12433  size_t allocationCount,
12434  VkBool32* pAllocationsChanged,
12435  const VmaDefragmentationInfo* pDefragmentationInfo,
12436  VmaDefragmentationStats* pDefragmentationStats)
12437 {
12438  if(pAllocationsChanged != VMA_NULL)
12439  {
12440  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
12441  }
12442  if(pDefragmentationStats != VMA_NULL)
12443  {
12444  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
12445  }
12446 
12447  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
12448 
12449  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
12450 
12451  const size_t poolCount = m_Pools.size();
12452 
12453  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
12454  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12455  {
12456  VmaAllocation hAlloc = pAllocations[allocIndex];
12457  VMA_ASSERT(hAlloc);
12458  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
12459  // DedicatedAlloc cannot be defragmented.
12460  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12461  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
12462  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
12463  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
12464  // Lost allocation cannot be defragmented.
12465  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
12466  {
12467  VmaBlockVector* pAllocBlockVector = VMA_NULL;
12468 
12469  const VmaPool hAllocPool = hAlloc->GetPool();
12470  // This allocation belongs to custom pool.
12471  if(hAllocPool != VK_NULL_HANDLE)
12472  {
12473  // Pools with linear algorithm are not defragmented.
12474  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
12475  {
12476  pAllocBlockVector = &hAllocPool->m_BlockVector;
12477  }
12478  }
12479  // This allocation belongs to general pool.
12480  else
12481  {
12482  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
12483  }
12484 
12485  if(pAllocBlockVector != VMA_NULL)
12486  {
12487  VmaDefragmentator* const pDefragmentator =
12488  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
12489  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
12490  &pAllocationsChanged[allocIndex] : VMA_NULL;
12491  pDefragmentator->AddAllocation(hAlloc, pChanged);
12492  }
12493  }
12494  }
12495 
12496  VkResult result = VK_SUCCESS;
12497 
12498  // ======== Main processing.
12499 
12500  VkDeviceSize maxBytesToMove = SIZE_MAX;
12501  uint32_t maxAllocationsToMove = UINT32_MAX;
12502  if(pDefragmentationInfo != VMA_NULL)
12503  {
12504  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
12505  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
12506  }
12507 
12508  // Process standard memory.
12509  for(uint32_t memTypeIndex = 0;
12510  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
12511  ++memTypeIndex)
12512  {
12513  // Only HOST_VISIBLE memory types can be defragmented.
12514  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12515  {
12516  result = m_pBlockVectors[memTypeIndex]->Defragment(
12517  pDefragmentationStats,
12518  maxBytesToMove,
12519  maxAllocationsToMove);
12520  }
12521  }
12522 
12523  // Process custom pools.
12524  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
12525  {
12526  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
12527  pDefragmentationStats,
12528  maxBytesToMove,
12529  maxAllocationsToMove);
12530  }
12531 
12532  // ======== Destroy defragmentators.
12533 
12534  // Process custom pools.
12535  for(size_t poolIndex = poolCount; poolIndex--; )
12536  {
12537  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
12538  }
12539 
12540  // Process standard memory.
12541  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
12542  {
12543  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12544  {
12545  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
12546  }
12547  }
12548 
12549  return result;
12550 }
12551 
12552 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
12553 {
12554  if(hAllocation->CanBecomeLost())
12555  {
12556  /*
12557  Warning: This is a carefully designed algorithm.
12558  Do not modify unless you really know what you're doing :)
12559  */
12560  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12561  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12562  for(;;)
12563  {
12564  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12565  {
12566  pAllocationInfo->memoryType = UINT32_MAX;
12567  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
12568  pAllocationInfo->offset = 0;
12569  pAllocationInfo->size = hAllocation->GetSize();
12570  pAllocationInfo->pMappedData = VMA_NULL;
12571  pAllocationInfo->pUserData = hAllocation->GetUserData();
12572  return;
12573  }
12574  else if(localLastUseFrameIndex == localCurrFrameIndex)
12575  {
12576  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12577  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12578  pAllocationInfo->offset = hAllocation->GetOffset();
12579  pAllocationInfo->size = hAllocation->GetSize();
12580  pAllocationInfo->pMappedData = VMA_NULL;
12581  pAllocationInfo->pUserData = hAllocation->GetUserData();
12582  return;
12583  }
12584  else // Last use time earlier than current time.
12585  {
12586  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12587  {
12588  localLastUseFrameIndex = localCurrFrameIndex;
12589  }
12590  }
12591  }
12592  }
12593  else
12594  {
12595 #if VMA_STATS_STRING_ENABLED
12596  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12597  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12598  for(;;)
12599  {
12600  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12601  if(localLastUseFrameIndex == localCurrFrameIndex)
12602  {
12603  break;
12604  }
12605  else // Last use time earlier than current time.
12606  {
12607  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12608  {
12609  localLastUseFrameIndex = localCurrFrameIndex;
12610  }
12611  }
12612  }
12613 #endif
12614 
12615  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
12616  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
12617  pAllocationInfo->offset = hAllocation->GetOffset();
12618  pAllocationInfo->size = hAllocation->GetSize();
12619  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
12620  pAllocationInfo->pUserData = hAllocation->GetUserData();
12621  }
12622 }
12623 
12624 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
12625 {
12626  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
12627  if(hAllocation->CanBecomeLost())
12628  {
12629  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12630  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12631  for(;;)
12632  {
12633  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
12634  {
12635  return false;
12636  }
12637  else if(localLastUseFrameIndex == localCurrFrameIndex)
12638  {
12639  return true;
12640  }
12641  else // Last use time earlier than current time.
12642  {
12643  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12644  {
12645  localLastUseFrameIndex = localCurrFrameIndex;
12646  }
12647  }
12648  }
12649  }
12650  else
12651  {
12652 #if VMA_STATS_STRING_ENABLED
12653  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
12654  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
12655  for(;;)
12656  {
12657  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
12658  if(localLastUseFrameIndex == localCurrFrameIndex)
12659  {
12660  break;
12661  }
12662  else // Last use time earlier than current time.
12663  {
12664  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
12665  {
12666  localLastUseFrameIndex = localCurrFrameIndex;
12667  }
12668  }
12669  }
12670 #endif
12671 
12672  return true;
12673  }
12674 }
12675 
12676 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
12677 {
12678  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
12679 
12680  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
12681 
12682  if(newCreateInfo.maxBlockCount == 0)
12683  {
12684  newCreateInfo.maxBlockCount = SIZE_MAX;
12685  }
12686  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
12687  {
12688  return VK_ERROR_INITIALIZATION_FAILED;
12689  }
12690 
12691  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
12692 
12693  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
12694 
12695  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
12696  if(res != VK_SUCCESS)
12697  {
12698  vma_delete(this, *pPool);
12699  *pPool = VMA_NULL;
12700  return res;
12701  }
12702 
12703  // Add to m_Pools.
12704  {
12705  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12706  (*pPool)->SetId(m_NextPoolId++);
12707  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
12708  }
12709 
12710  return VK_SUCCESS;
12711 }
12712 
12713 void VmaAllocator_T::DestroyPool(VmaPool pool)
12714 {
12715  // Remove from m_Pools.
12716  {
12717  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12718  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
12719  VMA_ASSERT(success && "Pool not found in Allocator.");
12720  }
12721 
12722  vma_delete(this, pool);
12723 }
12724 
12725 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
12726 {
12727  pool->m_BlockVector.GetPoolStats(pPoolStats);
12728 }
12729 
12730 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
12731 {
12732  m_CurrentFrameIndex.store(frameIndex);
12733 }
12734 
12735 void VmaAllocator_T::MakePoolAllocationsLost(
12736  VmaPool hPool,
12737  size_t* pLostAllocationCount)
12738 {
12739  hPool->m_BlockVector.MakePoolAllocationsLost(
12740  m_CurrentFrameIndex.load(),
12741  pLostAllocationCount);
12742 }
12743 
12744 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
12745 {
12746  return hPool->m_BlockVector.CheckCorruption();
12747 }
12748 
12749 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
12750 {
12751  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
12752 
12753  // Process default pools.
12754  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12755  {
12756  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
12757  {
12758  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
12759  VMA_ASSERT(pBlockVector);
12760  VkResult localRes = pBlockVector->CheckCorruption();
12761  switch(localRes)
12762  {
12763  case VK_ERROR_FEATURE_NOT_PRESENT:
12764  break;
12765  case VK_SUCCESS:
12766  finalRes = VK_SUCCESS;
12767  break;
12768  default:
12769  return localRes;
12770  }
12771  }
12772  }
12773 
12774  // Process custom pools.
12775  {
12776  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12777  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
12778  {
12779  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
12780  {
12781  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
12782  switch(localRes)
12783  {
12784  case VK_ERROR_FEATURE_NOT_PRESENT:
12785  break;
12786  case VK_SUCCESS:
12787  finalRes = VK_SUCCESS;
12788  break;
12789  default:
12790  return localRes;
12791  }
12792  }
12793  }
12794  }
12795 
12796  return finalRes;
12797 }
12798 
12799 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
12800 {
12801  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
12802  (*pAllocation)->InitLost();
12803 }
12804 
12805 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
12806 {
12807  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
12808 
12809  VkResult res;
12810  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12811  {
12812  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12813  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
12814  {
12815  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12816  if(res == VK_SUCCESS)
12817  {
12818  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
12819  }
12820  }
12821  else
12822  {
12823  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
12824  }
12825  }
12826  else
12827  {
12828  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12829  }
12830 
12831  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
12832  {
12833  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
12834  }
12835 
12836  return res;
12837 }
12838 
12839 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
12840 {
12841  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
12842  {
12843  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
12844  }
12845 
12846  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
12847 
12848  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
12849  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12850  {
12851  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12852  m_HeapSizeLimit[heapIndex] += size;
12853  }
12854 }
12855 
12856 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
12857 {
12858  if(hAllocation->CanBecomeLost())
12859  {
12860  return VK_ERROR_MEMORY_MAP_FAILED;
12861  }
12862 
12863  switch(hAllocation->GetType())
12864  {
12865  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12866  {
12867  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12868  char *pBytes = VMA_NULL;
12869  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
12870  if(res == VK_SUCCESS)
12871  {
12872  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
12873  hAllocation->BlockAllocMap();
12874  }
12875  return res;
12876  }
12877  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12878  return hAllocation->DedicatedAllocMap(this, ppData);
12879  default:
12880  VMA_ASSERT(0);
12881  return VK_ERROR_MEMORY_MAP_FAILED;
12882  }
12883 }
12884 
12885 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12886 {
12887  switch(hAllocation->GetType())
12888  {
12889  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12890  {
12891  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12892  hAllocation->BlockAllocUnmap();
12893  pBlock->Unmap(this, 1);
12894  }
12895  break;
12896  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12897  hAllocation->DedicatedAllocUnmap(this);
12898  break;
12899  default:
12900  VMA_ASSERT(0);
12901  }
12902 }
12903 
12904 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12905 {
12906  VkResult res = VK_SUCCESS;
12907  switch(hAllocation->GetType())
12908  {
12909  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12910  res = GetVulkanFunctions().vkBindBufferMemory(
12911  m_hDevice,
12912  hBuffer,
12913  hAllocation->GetMemory(),
12914  0); //memoryOffset
12915  break;
12916  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12917  {
12918  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12919  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
12920  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
12921  break;
12922  }
12923  default:
12924  VMA_ASSERT(0);
12925  }
12926  return res;
12927 }
12928 
12929 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
12930 {
12931  VkResult res = VK_SUCCESS;
12932  switch(hAllocation->GetType())
12933  {
12934  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12935  res = GetVulkanFunctions().vkBindImageMemory(
12936  m_hDevice,
12937  hImage,
12938  hAllocation->GetMemory(),
12939  0); //memoryOffset
12940  break;
12941  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12942  {
12943  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12944  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
12945  res = pBlock->BindImageMemory(this, hAllocation, hImage);
12946  break;
12947  }
12948  default:
12949  VMA_ASSERT(0);
12950  }
12951  return res;
12952 }
12953 
12954 void VmaAllocator_T::FlushOrInvalidateAllocation(
12955  VmaAllocation hAllocation,
12956  VkDeviceSize offset, VkDeviceSize size,
12957  VMA_CACHE_OPERATION op)
12958 {
12959  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
12960  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
12961  {
12962  const VkDeviceSize allocationSize = hAllocation->GetSize();
12963  VMA_ASSERT(offset <= allocationSize);
12964 
12965  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12966 
12967  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12968  memRange.memory = hAllocation->GetMemory();
12969 
12970  switch(hAllocation->GetType())
12971  {
12972  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12973  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12974  if(size == VK_WHOLE_SIZE)
12975  {
12976  memRange.size = allocationSize - memRange.offset;
12977  }
12978  else
12979  {
12980  VMA_ASSERT(offset + size <= allocationSize);
12981  memRange.size = VMA_MIN(
12982  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
12983  allocationSize - memRange.offset);
12984  }
12985  break;
12986 
12987  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12988  {
12989  // 1. Still within this allocation.
12990  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12991  if(size == VK_WHOLE_SIZE)
12992  {
12993  size = allocationSize - offset;
12994  }
12995  else
12996  {
12997  VMA_ASSERT(offset + size <= allocationSize);
12998  }
12999  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
13000 
13001  // 2. Adjust to whole block.
13002  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
13003  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
13004  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
13005  memRange.offset += allocationOffset;
13006  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
13007 
13008  break;
13009  }
13010 
13011  default:
13012  VMA_ASSERT(0);
13013  }
13014 
13015  switch(op)
13016  {
13017  case VMA_CACHE_FLUSH:
13018  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
13019  break;
13020  case VMA_CACHE_INVALIDATE:
13021  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
13022  break;
13023  default:
13024  VMA_ASSERT(0);
13025  }
13026  }
13027  // else: Just ignore this call.
13028 }
13029 
13030 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
13031 {
13032  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
13033 
13034  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
13035  {
13036  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13037  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
13038  VMA_ASSERT(pDedicatedAllocations);
13039  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
13040  VMA_ASSERT(success);
13041  }
13042 
13043  VkDeviceMemory hMemory = allocation->GetMemory();
13044 
13045  if(allocation->GetMappedData() != VMA_NULL)
13046  {
13047  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
13048  }
13049 
13050  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
13051 
13052  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
13053 }
13054 
13055 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
13056 {
13057  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
13058  !hAllocation->CanBecomeLost() &&
13059  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13060  {
13061  void* pData = VMA_NULL;
13062  VkResult res = Map(hAllocation, &pData);
13063  if(res == VK_SUCCESS)
13064  {
13065  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
13066  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
13067  Unmap(hAllocation);
13068  }
13069  else
13070  {
13071  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
13072  }
13073  }
13074 }
13075 
13076 #if VMA_STATS_STRING_ENABLED
13077 
13078 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
13079 {
13080  bool dedicatedAllocationsStarted = false;
13081  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13082  {
13083  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
13084  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
13085  VMA_ASSERT(pDedicatedAllocVector);
13086  if(pDedicatedAllocVector->empty() == false)
13087  {
13088  if(dedicatedAllocationsStarted == false)
13089  {
13090  dedicatedAllocationsStarted = true;
13091  json.WriteString("DedicatedAllocations");
13092  json.BeginObject();
13093  }
13094 
13095  json.BeginString("Type ");
13096  json.ContinueString(memTypeIndex);
13097  json.EndString();
13098 
13099  json.BeginArray();
13100 
13101  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
13102  {
13103  json.BeginObject(true);
13104  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
13105  hAlloc->PrintParameters(json);
13106  json.EndObject();
13107  }
13108 
13109  json.EndArray();
13110  }
13111  }
13112  if(dedicatedAllocationsStarted)
13113  {
13114  json.EndObject();
13115  }
13116 
13117  {
13118  bool allocationsStarted = false;
13119  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13120  {
13121  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
13122  {
13123  if(allocationsStarted == false)
13124  {
13125  allocationsStarted = true;
13126  json.WriteString("DefaultPools");
13127  json.BeginObject();
13128  }
13129 
13130  json.BeginString("Type ");
13131  json.ContinueString(memTypeIndex);
13132  json.EndString();
13133 
13134  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
13135  }
13136  }
13137  if(allocationsStarted)
13138  {
13139  json.EndObject();
13140  }
13141  }
13142 
13143  // Custom pools
13144  {
13145  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
13146  const size_t poolCount = m_Pools.size();
13147  if(poolCount > 0)
13148  {
13149  json.WriteString("Pools");
13150  json.BeginObject();
13151  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13152  {
13153  json.BeginString();
13154  json.ContinueString(m_Pools[poolIndex]->GetId());
13155  json.EndString();
13156 
13157  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
13158  }
13159  json.EndObject();
13160  }
13161  }
13162 }
13163 
13164 #endif // #if VMA_STATS_STRING_ENABLED
13165 
13167 // Public interface
13168 
13169 VkResult vmaCreateAllocator(
13170  const VmaAllocatorCreateInfo* pCreateInfo,
13171  VmaAllocator* pAllocator)
13172 {
13173  VMA_ASSERT(pCreateInfo && pAllocator);
13174  VMA_DEBUG_LOG("vmaCreateAllocator");
13175  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
13176  return (*pAllocator)->Init(pCreateInfo);
13177 }
13178 
13179 void vmaDestroyAllocator(
13180  VmaAllocator allocator)
13181 {
13182  if(allocator != VK_NULL_HANDLE)
13183  {
13184  VMA_DEBUG_LOG("vmaDestroyAllocator");
13185  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
13186  vma_delete(&allocationCallbacks, allocator);
13187  }
13188 }
13189 
13191  VmaAllocator allocator,
13192  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
13193 {
13194  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
13195  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
13196 }
13197 
13199  VmaAllocator allocator,
13200  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
13201 {
13202  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
13203  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
13204 }
13205 
13207  VmaAllocator allocator,
13208  uint32_t memoryTypeIndex,
13209  VkMemoryPropertyFlags* pFlags)
13210 {
13211  VMA_ASSERT(allocator && pFlags);
13212  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
13213  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
13214 }
13215 
13217  VmaAllocator allocator,
13218  uint32_t frameIndex)
13219 {
13220  VMA_ASSERT(allocator);
13221  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
13222 
13223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13224 
13225  allocator->SetCurrentFrameIndex(frameIndex);
13226 }
13227 
13228 void vmaCalculateStats(
13229  VmaAllocator allocator,
13230  VmaStats* pStats)
13231 {
13232  VMA_ASSERT(allocator && pStats);
13233  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13234  allocator->CalculateStats(pStats);
13235 }
13236 
13237 #if VMA_STATS_STRING_ENABLED
13238 
13239 void vmaBuildStatsString(
13240  VmaAllocator allocator,
13241  char** ppStatsString,
13242  VkBool32 detailedMap)
13243 {
13244  VMA_ASSERT(allocator && ppStatsString);
13245  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13246 
13247  VmaStringBuilder sb(allocator);
13248  {
13249  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
13250  json.BeginObject();
13251 
13252  VmaStats stats;
13253  allocator->CalculateStats(&stats);
13254 
13255  json.WriteString("Total");
13256  VmaPrintStatInfo(json, stats.total);
13257 
13258  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
13259  {
13260  json.BeginString("Heap ");
13261  json.ContinueString(heapIndex);
13262  json.EndString();
13263  json.BeginObject();
13264 
13265  json.WriteString("Size");
13266  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
13267 
13268  json.WriteString("Flags");
13269  json.BeginArray(true);
13270  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
13271  {
13272  json.WriteString("DEVICE_LOCAL");
13273  }
13274  json.EndArray();
13275 
13276  if(stats.memoryHeap[heapIndex].blockCount > 0)
13277  {
13278  json.WriteString("Stats");
13279  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
13280  }
13281 
13282  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
13283  {
13284  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
13285  {
13286  json.BeginString("Type ");
13287  json.ContinueString(typeIndex);
13288  json.EndString();
13289 
13290  json.BeginObject();
13291 
13292  json.WriteString("Flags");
13293  json.BeginArray(true);
13294  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
13295  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
13296  {
13297  json.WriteString("DEVICE_LOCAL");
13298  }
13299  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
13300  {
13301  json.WriteString("HOST_VISIBLE");
13302  }
13303  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
13304  {
13305  json.WriteString("HOST_COHERENT");
13306  }
13307  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
13308  {
13309  json.WriteString("HOST_CACHED");
13310  }
13311  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
13312  {
13313  json.WriteString("LAZILY_ALLOCATED");
13314  }
13315  json.EndArray();
13316 
13317  if(stats.memoryType[typeIndex].blockCount > 0)
13318  {
13319  json.WriteString("Stats");
13320  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
13321  }
13322 
13323  json.EndObject();
13324  }
13325  }
13326 
13327  json.EndObject();
13328  }
13329  if(detailedMap == VK_TRUE)
13330  {
13331  allocator->PrintDetailedMap(json);
13332  }
13333 
13334  json.EndObject();
13335  }
13336 
13337  const size_t len = sb.GetLength();
13338  char* const pChars = vma_new_array(allocator, char, len + 1);
13339  if(len > 0)
13340  {
13341  memcpy(pChars, sb.GetData(), len);
13342  }
13343  pChars[len] = '\0';
13344  *ppStatsString = pChars;
13345 }
13346 
13347 void vmaFreeStatsString(
13348  VmaAllocator allocator,
13349  char* pStatsString)
13350 {
13351  if(pStatsString != VMA_NULL)
13352  {
13353  VMA_ASSERT(allocator);
13354  size_t len = strlen(pStatsString);
13355  vma_delete_array(allocator, pStatsString, len + 1);
13356  }
13357 }
13358 
13359 #endif // #if VMA_STATS_STRING_ENABLED
13360 
13361 /*
13362 This function is not protected by any mutex because it just reads immutable data.
13363 */
13364 VkResult vmaFindMemoryTypeIndex(
13365  VmaAllocator allocator,
13366  uint32_t memoryTypeBits,
13367  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13368  uint32_t* pMemoryTypeIndex)
13369 {
13370  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13371  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13372  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13373 
13374  if(pAllocationCreateInfo->memoryTypeBits != 0)
13375  {
13376  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
13377  }
13378 
13379  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
13380  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
13381 
13382  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13383  if(mapped)
13384  {
13385  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13386  }
13387 
13388  // Convert usage to requiredFlags and preferredFlags.
13389  switch(pAllocationCreateInfo->usage)
13390  {
13392  break;
13394  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13395  {
13396  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13397  }
13398  break;
13400  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13401  break;
13403  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13404  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
13405  {
13406  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
13407  }
13408  break;
13410  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
13411  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
13412  break;
13413  default:
13414  break;
13415  }
13416 
13417  *pMemoryTypeIndex = UINT32_MAX;
13418  uint32_t minCost = UINT32_MAX;
13419  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
13420  memTypeIndex < allocator->GetMemoryTypeCount();
13421  ++memTypeIndex, memTypeBit <<= 1)
13422  {
13423  // This memory type is acceptable according to memoryTypeBits bitmask.
13424  if((memTypeBit & memoryTypeBits) != 0)
13425  {
13426  const VkMemoryPropertyFlags currFlags =
13427  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
13428  // This memory type contains requiredFlags.
13429  if((requiredFlags & ~currFlags) == 0)
13430  {
13431  // Calculate cost as number of bits from preferredFlags not present in this memory type.
13432  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
13433  // Remember memory type with lowest cost.
13434  if(currCost < minCost)
13435  {
13436  *pMemoryTypeIndex = memTypeIndex;
13437  if(currCost == 0)
13438  {
13439  return VK_SUCCESS;
13440  }
13441  minCost = currCost;
13442  }
13443  }
13444  }
13445  }
13446  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
13447 }
13448 
13450  VmaAllocator allocator,
13451  const VkBufferCreateInfo* pBufferCreateInfo,
13452  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13453  uint32_t* pMemoryTypeIndex)
13454 {
13455  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13456  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
13457  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13458  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13459 
13460  const VkDevice hDev = allocator->m_hDevice;
13461  VkBuffer hBuffer = VK_NULL_HANDLE;
13462  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
13463  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
13464  if(res == VK_SUCCESS)
13465  {
13466  VkMemoryRequirements memReq = {};
13467  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
13468  hDev, hBuffer, &memReq);
13469 
13470  res = vmaFindMemoryTypeIndex(
13471  allocator,
13472  memReq.memoryTypeBits,
13473  pAllocationCreateInfo,
13474  pMemoryTypeIndex);
13475 
13476  allocator->GetVulkanFunctions().vkDestroyBuffer(
13477  hDev, hBuffer, allocator->GetAllocationCallbacks());
13478  }
13479  return res;
13480 }
13481 
13483  VmaAllocator allocator,
13484  const VkImageCreateInfo* pImageCreateInfo,
13485  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13486  uint32_t* pMemoryTypeIndex)
13487 {
13488  VMA_ASSERT(allocator != VK_NULL_HANDLE);
13489  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
13490  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
13491  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
13492 
13493  const VkDevice hDev = allocator->m_hDevice;
13494  VkImage hImage = VK_NULL_HANDLE;
13495  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
13496  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
13497  if(res == VK_SUCCESS)
13498  {
13499  VkMemoryRequirements memReq = {};
13500  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
13501  hDev, hImage, &memReq);
13502 
13503  res = vmaFindMemoryTypeIndex(
13504  allocator,
13505  memReq.memoryTypeBits,
13506  pAllocationCreateInfo,
13507  pMemoryTypeIndex);
13508 
13509  allocator->GetVulkanFunctions().vkDestroyImage(
13510  hDev, hImage, allocator->GetAllocationCallbacks());
13511  }
13512  return res;
13513 }
13514 
13515 VkResult vmaCreatePool(
13516  VmaAllocator allocator,
13517  const VmaPoolCreateInfo* pCreateInfo,
13518  VmaPool* pPool)
13519 {
13520  VMA_ASSERT(allocator && pCreateInfo && pPool);
13521 
13522  VMA_DEBUG_LOG("vmaCreatePool");
13523 
13524  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13525 
13526  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
13527 
13528 #if VMA_RECORDING_ENABLED
13529  if(allocator->GetRecorder() != VMA_NULL)
13530  {
13531  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
13532  }
13533 #endif
13534 
13535  return res;
13536 }
13537 
13538 void vmaDestroyPool(
13539  VmaAllocator allocator,
13540  VmaPool pool)
13541 {
13542  VMA_ASSERT(allocator);
13543 
13544  if(pool == VK_NULL_HANDLE)
13545  {
13546  return;
13547  }
13548 
13549  VMA_DEBUG_LOG("vmaDestroyPool");
13550 
13551  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13552 
13553 #if VMA_RECORDING_ENABLED
13554  if(allocator->GetRecorder() != VMA_NULL)
13555  {
13556  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
13557  }
13558 #endif
13559 
13560  allocator->DestroyPool(pool);
13561 }
13562 
13563 void vmaGetPoolStats(
13564  VmaAllocator allocator,
13565  VmaPool pool,
13566  VmaPoolStats* pPoolStats)
13567 {
13568  VMA_ASSERT(allocator && pool && pPoolStats);
13569 
13570  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13571 
13572  allocator->GetPoolStats(pool, pPoolStats);
13573 }
13574 
13576  VmaAllocator allocator,
13577  VmaPool pool,
13578  size_t* pLostAllocationCount)
13579 {
13580  VMA_ASSERT(allocator && pool);
13581 
13582  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13583 
13584 #if VMA_RECORDING_ENABLED
13585  if(allocator->GetRecorder() != VMA_NULL)
13586  {
13587  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
13588  }
13589 #endif
13590 
13591  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
13592 }
13593 
13594 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
13595 {
13596  VMA_ASSERT(allocator && pool);
13597 
13598  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13599 
13600  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
13601 
13602  return allocator->CheckPoolCorruption(pool);
13603 }
13604 
13605 VkResult vmaAllocateMemory(
13606  VmaAllocator allocator,
13607  const VkMemoryRequirements* pVkMemoryRequirements,
13608  const VmaAllocationCreateInfo* pCreateInfo,
13609  VmaAllocation* pAllocation,
13610  VmaAllocationInfo* pAllocationInfo)
13611 {
13612  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
13613 
13614  VMA_DEBUG_LOG("vmaAllocateMemory");
13615 
13616  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13617 
13618  VkResult result = allocator->AllocateMemory(
13619  *pVkMemoryRequirements,
13620  false, // requiresDedicatedAllocation
13621  false, // prefersDedicatedAllocation
13622  VK_NULL_HANDLE, // dedicatedBuffer
13623  VK_NULL_HANDLE, // dedicatedImage
13624  *pCreateInfo,
13625  VMA_SUBALLOCATION_TYPE_UNKNOWN,
13626  pAllocation);
13627 
13628 #if VMA_RECORDING_ENABLED
13629  if(allocator->GetRecorder() != VMA_NULL)
13630  {
13631  allocator->GetRecorder()->RecordAllocateMemory(
13632  allocator->GetCurrentFrameIndex(),
13633  *pVkMemoryRequirements,
13634  *pCreateInfo,
13635  *pAllocation);
13636  }
13637 #endif
13638 
13639  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
13640  {
13641  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13642  }
13643 
13644  return result;
13645 }
13646 
13648  VmaAllocator allocator,
13649  VkBuffer buffer,
13650  const VmaAllocationCreateInfo* pCreateInfo,
13651  VmaAllocation* pAllocation,
13652  VmaAllocationInfo* pAllocationInfo)
13653 {
13654  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13655 
13656  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
13657 
13658  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13659 
13660  VkMemoryRequirements vkMemReq = {};
13661  bool requiresDedicatedAllocation = false;
13662  bool prefersDedicatedAllocation = false;
13663  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
13664  requiresDedicatedAllocation,
13665  prefersDedicatedAllocation);
13666 
13667  VkResult result = allocator->AllocateMemory(
13668  vkMemReq,
13669  requiresDedicatedAllocation,
13670  prefersDedicatedAllocation,
13671  buffer, // dedicatedBuffer
13672  VK_NULL_HANDLE, // dedicatedImage
13673  *pCreateInfo,
13674  VMA_SUBALLOCATION_TYPE_BUFFER,
13675  pAllocation);
13676 
13677 #if VMA_RECORDING_ENABLED
13678  if(allocator->GetRecorder() != VMA_NULL)
13679  {
13680  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
13681  allocator->GetCurrentFrameIndex(),
13682  vkMemReq,
13683  requiresDedicatedAllocation,
13684  prefersDedicatedAllocation,
13685  *pCreateInfo,
13686  *pAllocation);
13687  }
13688 #endif
13689 
13690  if(pAllocationInfo && result == VK_SUCCESS)
13691  {
13692  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13693  }
13694 
13695  return result;
13696 }
13697 
13698 VkResult vmaAllocateMemoryForImage(
13699  VmaAllocator allocator,
13700  VkImage image,
13701  const VmaAllocationCreateInfo* pCreateInfo,
13702  VmaAllocation* pAllocation,
13703  VmaAllocationInfo* pAllocationInfo)
13704 {
13705  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
13706 
13707  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
13708 
13709  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13710 
13711  VkMemoryRequirements vkMemReq = {};
13712  bool requiresDedicatedAllocation = false;
13713  bool prefersDedicatedAllocation = false;
13714  allocator->GetImageMemoryRequirements(image, vkMemReq,
13715  requiresDedicatedAllocation, prefersDedicatedAllocation);
13716 
13717  VkResult result = allocator->AllocateMemory(
13718  vkMemReq,
13719  requiresDedicatedAllocation,
13720  prefersDedicatedAllocation,
13721  VK_NULL_HANDLE, // dedicatedBuffer
13722  image, // dedicatedImage
13723  *pCreateInfo,
13724  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
13725  pAllocation);
13726 
13727 #if VMA_RECORDING_ENABLED
13728  if(allocator->GetRecorder() != VMA_NULL)
13729  {
13730  allocator->GetRecorder()->RecordAllocateMemoryForImage(
13731  allocator->GetCurrentFrameIndex(),
13732  vkMemReq,
13733  requiresDedicatedAllocation,
13734  prefersDedicatedAllocation,
13735  *pCreateInfo,
13736  *pAllocation);
13737  }
13738 #endif
13739 
13740  if(pAllocationInfo && result == VK_SUCCESS)
13741  {
13742  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13743  }
13744 
13745  return result;
13746 }
13747 
13748 void vmaFreeMemory(
13749  VmaAllocator allocator,
13750  VmaAllocation allocation)
13751 {
13752  VMA_ASSERT(allocator);
13753 
13754  if(allocation == VK_NULL_HANDLE)
13755  {
13756  return;
13757  }
13758 
13759  VMA_DEBUG_LOG("vmaFreeMemory");
13760 
13761  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13762 
13763 #if VMA_RECORDING_ENABLED
13764  if(allocator->GetRecorder() != VMA_NULL)
13765  {
13766  allocator->GetRecorder()->RecordFreeMemory(
13767  allocator->GetCurrentFrameIndex(),
13768  allocation);
13769  }
13770 #endif
13771 
13772  allocator->FreeMemory(allocation);
13773 }
13774 
13776  VmaAllocator allocator,
13777  VmaAllocation allocation,
13778  VmaAllocationInfo* pAllocationInfo)
13779 {
13780  VMA_ASSERT(allocator && allocation && pAllocationInfo);
13781 
13782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13783 
13784 #if VMA_RECORDING_ENABLED
13785  if(allocator->GetRecorder() != VMA_NULL)
13786  {
13787  allocator->GetRecorder()->RecordGetAllocationInfo(
13788  allocator->GetCurrentFrameIndex(),
13789  allocation);
13790  }
13791 #endif
13792 
13793  allocator->GetAllocationInfo(allocation, pAllocationInfo);
13794 }
13795 
13796 VkBool32 vmaTouchAllocation(
13797  VmaAllocator allocator,
13798  VmaAllocation allocation)
13799 {
13800  VMA_ASSERT(allocator && allocation);
13801 
13802  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13803 
13804 #if VMA_RECORDING_ENABLED
13805  if(allocator->GetRecorder() != VMA_NULL)
13806  {
13807  allocator->GetRecorder()->RecordTouchAllocation(
13808  allocator->GetCurrentFrameIndex(),
13809  allocation);
13810  }
13811 #endif
13812 
13813  return allocator->TouchAllocation(allocation);
13814 }
13815 
13817  VmaAllocator allocator,
13818  VmaAllocation allocation,
13819  void* pUserData)
13820 {
13821  VMA_ASSERT(allocator && allocation);
13822 
13823  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13824 
13825  allocation->SetUserData(allocator, pUserData);
13826 
13827 #if VMA_RECORDING_ENABLED
13828  if(allocator->GetRecorder() != VMA_NULL)
13829  {
13830  allocator->GetRecorder()->RecordSetAllocationUserData(
13831  allocator->GetCurrentFrameIndex(),
13832  allocation,
13833  pUserData);
13834  }
13835 #endif
13836 }
13837 
13839  VmaAllocator allocator,
13840  VmaAllocation* pAllocation)
13841 {
13842  VMA_ASSERT(allocator && pAllocation);
13843 
13844  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
13845 
13846  allocator->CreateLostAllocation(pAllocation);
13847 
13848 #if VMA_RECORDING_ENABLED
13849  if(allocator->GetRecorder() != VMA_NULL)
13850  {
13851  allocator->GetRecorder()->RecordCreateLostAllocation(
13852  allocator->GetCurrentFrameIndex(),
13853  *pAllocation);
13854  }
13855 #endif
13856 }
13857 
13858 VkResult vmaMapMemory(
13859  VmaAllocator allocator,
13860  VmaAllocation allocation,
13861  void** ppData)
13862 {
13863  VMA_ASSERT(allocator && allocation && ppData);
13864 
13865  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13866 
13867  VkResult res = allocator->Map(allocation, ppData);
13868 
13869 #if VMA_RECORDING_ENABLED
13870  if(allocator->GetRecorder() != VMA_NULL)
13871  {
13872  allocator->GetRecorder()->RecordMapMemory(
13873  allocator->GetCurrentFrameIndex(),
13874  allocation);
13875  }
13876 #endif
13877 
13878  return res;
13879 }
13880 
13881 void vmaUnmapMemory(
13882  VmaAllocator allocator,
13883  VmaAllocation allocation)
13884 {
13885  VMA_ASSERT(allocator && allocation);
13886 
13887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13888 
13889 #if VMA_RECORDING_ENABLED
13890  if(allocator->GetRecorder() != VMA_NULL)
13891  {
13892  allocator->GetRecorder()->RecordUnmapMemory(
13893  allocator->GetCurrentFrameIndex(),
13894  allocation);
13895  }
13896 #endif
13897 
13898  allocator->Unmap(allocation);
13899 }
13900 
13901 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13902 {
13903  VMA_ASSERT(allocator && allocation);
13904 
13905  VMA_DEBUG_LOG("vmaFlushAllocation");
13906 
13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13908 
13909  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
13910 
13911 #if VMA_RECORDING_ENABLED
13912  if(allocator->GetRecorder() != VMA_NULL)
13913  {
13914  allocator->GetRecorder()->RecordFlushAllocation(
13915  allocator->GetCurrentFrameIndex(),
13916  allocation, offset, size);
13917  }
13918 #endif
13919 }
13920 
13921 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13922 {
13923  VMA_ASSERT(allocator && allocation);
13924 
13925  VMA_DEBUG_LOG("vmaInvalidateAllocation");
13926 
13927  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13928 
13929  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
13930 
13931 #if VMA_RECORDING_ENABLED
13932  if(allocator->GetRecorder() != VMA_NULL)
13933  {
13934  allocator->GetRecorder()->RecordInvalidateAllocation(
13935  allocator->GetCurrentFrameIndex(),
13936  allocation, offset, size);
13937  }
13938 #endif
13939 }
13940 
13941 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
13942 {
13943  VMA_ASSERT(allocator);
13944 
13945  VMA_DEBUG_LOG("vmaCheckCorruption");
13946 
13947  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13948 
13949  return allocator->CheckCorruption(memoryTypeBits);
13950 }
13951 
13952 VkResult vmaDefragment(
13953  VmaAllocator allocator,
13954  VmaAllocation* pAllocations,
13955  size_t allocationCount,
13956  VkBool32* pAllocationsChanged,
13957  const VmaDefragmentationInfo *pDefragmentationInfo,
13958  VmaDefragmentationStats* pDefragmentationStats)
13959 {
13960  VMA_ASSERT(allocator && pAllocations);
13961 
13962  VMA_DEBUG_LOG("vmaDefragment");
13963 
13964  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13965 
13966  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
13967 }
13968 
13969 VkResult vmaBindBufferMemory(
13970  VmaAllocator allocator,
13971  VmaAllocation allocation,
13972  VkBuffer buffer)
13973 {
13974  VMA_ASSERT(allocator && allocation && buffer);
13975 
13976  VMA_DEBUG_LOG("vmaBindBufferMemory");
13977 
13978  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13979 
13980  return allocator->BindBufferMemory(allocation, buffer);
13981 }
13982 
13983 VkResult vmaBindImageMemory(
13984  VmaAllocator allocator,
13985  VmaAllocation allocation,
13986  VkImage image)
13987 {
13988  VMA_ASSERT(allocator && allocation && image);
13989 
13990  VMA_DEBUG_LOG("vmaBindImageMemory");
13991 
13992  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13993 
13994  return allocator->BindImageMemory(allocation, image);
13995 }
13996 
13997 VkResult vmaCreateBuffer(
13998  VmaAllocator allocator,
13999  const VkBufferCreateInfo* pBufferCreateInfo,
14000  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14001  VkBuffer* pBuffer,
14002  VmaAllocation* pAllocation,
14003  VmaAllocationInfo* pAllocationInfo)
14004 {
14005  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
14006 
14007  VMA_DEBUG_LOG("vmaCreateBuffer");
14008 
14009  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14010 
14011  *pBuffer = VK_NULL_HANDLE;
14012  *pAllocation = VK_NULL_HANDLE;
14013 
14014  // 1. Create VkBuffer.
14015  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
14016  allocator->m_hDevice,
14017  pBufferCreateInfo,
14018  allocator->GetAllocationCallbacks(),
14019  pBuffer);
14020  if(res >= 0)
14021  {
14022  // 2. vkGetBufferMemoryRequirements.
14023  VkMemoryRequirements vkMemReq = {};
14024  bool requiresDedicatedAllocation = false;
14025  bool prefersDedicatedAllocation = false;
14026  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
14027  requiresDedicatedAllocation, prefersDedicatedAllocation);
14028 
14029  // Make sure alignment requirements for specific buffer usages reported
14030  // in Physical Device Properties are included in alignment reported by memory requirements.
14031  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
14032  {
14033  VMA_ASSERT(vkMemReq.alignment %
14034  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
14035  }
14036  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
14037  {
14038  VMA_ASSERT(vkMemReq.alignment %
14039  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
14040  }
14041  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
14042  {
14043  VMA_ASSERT(vkMemReq.alignment %
14044  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
14045  }
14046 
14047  // 3. Allocate memory using allocator.
14048  res = allocator->AllocateMemory(
14049  vkMemReq,
14050  requiresDedicatedAllocation,
14051  prefersDedicatedAllocation,
14052  *pBuffer, // dedicatedBuffer
14053  VK_NULL_HANDLE, // dedicatedImage
14054  *pAllocationCreateInfo,
14055  VMA_SUBALLOCATION_TYPE_BUFFER,
14056  pAllocation);
14057 
14058 #if VMA_RECORDING_ENABLED
14059  if(allocator->GetRecorder() != VMA_NULL)
14060  {
14061  allocator->GetRecorder()->RecordCreateBuffer(
14062  allocator->GetCurrentFrameIndex(),
14063  *pBufferCreateInfo,
14064  *pAllocationCreateInfo,
14065  *pAllocation);
14066  }
14067 #endif
14068 
14069  if(res >= 0)
14070  {
14071  // 3. Bind buffer with memory.
14072  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
14073  if(res >= 0)
14074  {
14075  // All steps succeeded.
14076  #if VMA_STATS_STRING_ENABLED
14077  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
14078  #endif
14079  if(pAllocationInfo != VMA_NULL)
14080  {
14081  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14082  }
14083 
14084  return VK_SUCCESS;
14085  }
14086  allocator->FreeMemory(*pAllocation);
14087  *pAllocation = VK_NULL_HANDLE;
14088  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14089  *pBuffer = VK_NULL_HANDLE;
14090  return res;
14091  }
14092  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
14093  *pBuffer = VK_NULL_HANDLE;
14094  return res;
14095  }
14096  return res;
14097 }
14098 
14099 void vmaDestroyBuffer(
14100  VmaAllocator allocator,
14101  VkBuffer buffer,
14102  VmaAllocation allocation)
14103 {
14104  VMA_ASSERT(allocator);
14105 
14106  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14107  {
14108  return;
14109  }
14110 
14111  VMA_DEBUG_LOG("vmaDestroyBuffer");
14112 
14113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14114 
14115 #if VMA_RECORDING_ENABLED
14116  if(allocator->GetRecorder() != VMA_NULL)
14117  {
14118  allocator->GetRecorder()->RecordDestroyBuffer(
14119  allocator->GetCurrentFrameIndex(),
14120  allocation);
14121  }
14122 #endif
14123 
14124  if(buffer != VK_NULL_HANDLE)
14125  {
14126  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
14127  }
14128 
14129  if(allocation != VK_NULL_HANDLE)
14130  {
14131  allocator->FreeMemory(allocation);
14132  }
14133 }
14134 
14135 VkResult vmaCreateImage(
14136  VmaAllocator allocator,
14137  const VkImageCreateInfo* pImageCreateInfo,
14138  const VmaAllocationCreateInfo* pAllocationCreateInfo,
14139  VkImage* pImage,
14140  VmaAllocation* pAllocation,
14141  VmaAllocationInfo* pAllocationInfo)
14142 {
14143  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
14144 
14145  VMA_DEBUG_LOG("vmaCreateImage");
14146 
14147  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14148 
14149  *pImage = VK_NULL_HANDLE;
14150  *pAllocation = VK_NULL_HANDLE;
14151 
14152  // 1. Create VkImage.
14153  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
14154  allocator->m_hDevice,
14155  pImageCreateInfo,
14156  allocator->GetAllocationCallbacks(),
14157  pImage);
14158  if(res >= 0)
14159  {
14160  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
14161  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
14162  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
14163 
14164  // 2. Allocate memory using allocator.
14165  VkMemoryRequirements vkMemReq = {};
14166  bool requiresDedicatedAllocation = false;
14167  bool prefersDedicatedAllocation = false;
14168  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
14169  requiresDedicatedAllocation, prefersDedicatedAllocation);
14170 
14171  res = allocator->AllocateMemory(
14172  vkMemReq,
14173  requiresDedicatedAllocation,
14174  prefersDedicatedAllocation,
14175  VK_NULL_HANDLE, // dedicatedBuffer
14176  *pImage, // dedicatedImage
14177  *pAllocationCreateInfo,
14178  suballocType,
14179  pAllocation);
14180 
14181 #if VMA_RECORDING_ENABLED
14182  if(allocator->GetRecorder() != VMA_NULL)
14183  {
14184  allocator->GetRecorder()->RecordCreateImage(
14185  allocator->GetCurrentFrameIndex(),
14186  *pImageCreateInfo,
14187  *pAllocationCreateInfo,
14188  *pAllocation);
14189  }
14190 #endif
14191 
14192  if(res >= 0)
14193  {
14194  // 3. Bind image with memory.
14195  res = allocator->BindImageMemory(*pAllocation, *pImage);
14196  if(res >= 0)
14197  {
14198  // All steps succeeded.
14199  #if VMA_STATS_STRING_ENABLED
14200  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
14201  #endif
14202  if(pAllocationInfo != VMA_NULL)
14203  {
14204  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
14205  }
14206 
14207  return VK_SUCCESS;
14208  }
14209  allocator->FreeMemory(*pAllocation);
14210  *pAllocation = VK_NULL_HANDLE;
14211  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14212  *pImage = VK_NULL_HANDLE;
14213  return res;
14214  }
14215  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
14216  *pImage = VK_NULL_HANDLE;
14217  return res;
14218  }
14219  return res;
14220 }
14221 
14222 void vmaDestroyImage(
14223  VmaAllocator allocator,
14224  VkImage image,
14225  VmaAllocation allocation)
14226 {
14227  VMA_ASSERT(allocator);
14228 
14229  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
14230  {
14231  return;
14232  }
14233 
14234  VMA_DEBUG_LOG("vmaDestroyImage");
14235 
14236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
14237 
14238 #if VMA_RECORDING_ENABLED
14239  if(allocator->GetRecorder() != VMA_NULL)
14240  {
14241  allocator->GetRecorder()->RecordDestroyImage(
14242  allocator->GetCurrentFrameIndex(),
14243  allocation);
14244  }
14245 #endif
14246 
14247  if(image != VK_NULL_HANDLE)
14248  {
14249  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
14250  }
14251  if(allocation != VK_NULL_HANDLE)
14252  {
14253  allocator->FreeMemory(allocation);
14254  }
14255 }
14256 
14257 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1446
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1759
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1515
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1477
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2071
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1458
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1716
Definition: vk_mem_alloc.h:1819
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1450
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2171
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1512
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2416
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1970
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1489
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2052
Definition: vk_mem_alloc.h:1796
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1439
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1869
Definition: vk_mem_alloc.h:1743
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1524
Definition: vk_mem_alloc.h:1988
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1577
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1509
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1747
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1649
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1455
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1648
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2420
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1541
VmaStatInfo total
Definition: vk_mem_alloc.h:1658
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2428
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1853
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2411
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1456
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1381
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1518
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2002
Definition: vk_mem_alloc.h:1996
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1584
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2181
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1451
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1475
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1890
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2022
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2058
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1437
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2005
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1694
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2406
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2424
Definition: vk_mem_alloc.h:1733
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1877
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1454
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1654
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1387
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1408
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1479
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1413
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2426
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1864
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2068
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1447
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1637
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2017
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1400
Definition: vk_mem_alloc.h:1992
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1803
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1650
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1404
Definition: vk_mem_alloc.h:1827
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2008
Definition: vk_mem_alloc.h:1742
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1453
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1859
Definition: vk_mem_alloc.h:1850
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1640
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1449
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2030
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1527
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2061
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1848
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1883
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1565
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1656
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1783
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1649
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1460
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1497
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1402
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1459
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2044
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1452
Definition: vk_mem_alloc.h:1814
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1505
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2195
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1521
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1649
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1646
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2049
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:1823
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2176
Definition: vk_mem_alloc.h:1834
Definition: vk_mem_alloc.h:1846
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2422
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1445
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1644
Definition: vk_mem_alloc.h:1699
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1998
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1494
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1642
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1457
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1461
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1770
Definition: vk_mem_alloc.h:1841
Definition: vk_mem_alloc.h:1726
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2190
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1435
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1448
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:1985
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2157
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1831
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1952
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1650
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1469
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1657
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2055
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1650
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2162