diff --git a/docs/html/quick_start.html b/docs/html/quick_start.html index bc21235..628fd09 100644 --- a/docs/html/quick_start.html +++ b/docs/html/quick_start.html @@ -79,6 +79,7 @@ Project setup
  • In exacly one CPP file define following macro before this include. It enables also internal definitions.
  • #define VMA_IMPLEMENTATION
    #include "vk_mem_alloc.h"

    It may be a good idea to create dedicated CPP file just for this purpose.

    +

    Note on language: This library is written in C++, but has C-compatible interface. Thus you can include and use vk_mem_alloc.h in C or C++ code, but full implementation with VMA_IMPLEMENTATION macro must be compiled as C++, NOT as C.

    Please note that this library includes header <vulkan/vulkan.h>, which in turn includes <windows.h> on Windows. If you need some specific macros defined before including these headers (like WIN32_LEAN_AND_MEAN or WINVER for Windows, VK_USE_PLATFORM_WIN32_KHR for Vulkan), you must define them before every #include of this library.

    Initialization

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index abe9954..d3e0ccf 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,188 +65,188 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1464 /*
    1465 Define this macro to 0/1 to disable/enable support for recording functionality,
    1466 available through VmaAllocatorCreateInfo::pRecordSettings.
    1467 */
    1468 #ifndef VMA_RECORDING_ENABLED
    1469  #ifdef _WIN32
    1470  #define VMA_RECORDING_ENABLED 1
    1471  #else
    1472  #define VMA_RECORDING_ENABLED 0
    1473  #endif
    1474 #endif
    1475 
    1476 #ifndef NOMINMAX
    1477  #define NOMINMAX // For windows.h
    1478 #endif
    1479 
    1480 #include <vulkan/vulkan.h>
    1481 
    1482 #if VMA_RECORDING_ENABLED
    1483  #include <windows.h>
    1484 #endif
    1485 
    1486 #if !defined(VMA_DEDICATED_ALLOCATION)
    1487  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1488  #define VMA_DEDICATED_ALLOCATION 1
    1489  #else
    1490  #define VMA_DEDICATED_ALLOCATION 0
    1491  #endif
    1492 #endif
    1493 
    1503 VK_DEFINE_HANDLE(VmaAllocator)
    1504 
    1505 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1507  VmaAllocator allocator,
    1508  uint32_t memoryType,
    1509  VkDeviceMemory memory,
    1510  VkDeviceSize size);
    1512 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1513  VmaAllocator allocator,
    1514  uint32_t memoryType,
    1515  VkDeviceMemory memory,
    1516  VkDeviceSize size);
    1517 
    1531 
    1561 
    1564 typedef VkFlags VmaAllocatorCreateFlags;
    1565 
    1570 typedef struct VmaVulkanFunctions {
    1571  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1572  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1573  PFN_vkAllocateMemory vkAllocateMemory;
    1574  PFN_vkFreeMemory vkFreeMemory;
    1575  PFN_vkMapMemory vkMapMemory;
    1576  PFN_vkUnmapMemory vkUnmapMemory;
    1577  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1578  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1579  PFN_vkBindBufferMemory vkBindBufferMemory;
    1580  PFN_vkBindImageMemory vkBindImageMemory;
    1581  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1582  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1583  PFN_vkCreateBuffer vkCreateBuffer;
    1584  PFN_vkDestroyBuffer vkDestroyBuffer;
    1585  PFN_vkCreateImage vkCreateImage;
    1586  PFN_vkDestroyImage vkDestroyImage;
    1587 #if VMA_DEDICATED_ALLOCATION
    1588  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1589  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1590 #endif
    1592 
    1594 typedef enum VmaRecordFlagBits {
    1601 
    1604 typedef VkFlags VmaRecordFlags;
    1605 
    1607 typedef struct VmaRecordSettings
    1608 {
    1618  const char* pFilePath;
    1620 
    1623 {
    1627 
    1628  VkPhysicalDevice physicalDevice;
    1630 
    1631  VkDevice device;
    1633 
    1636 
    1637  const VkAllocationCallbacks* pAllocationCallbacks;
    1639 
    1678  const VkDeviceSize* pHeapSizeLimit;
    1699 
    1701 VkResult vmaCreateAllocator(
    1702  const VmaAllocatorCreateInfo* pCreateInfo,
    1703  VmaAllocator* pAllocator);
    1704 
    1706 void vmaDestroyAllocator(
    1707  VmaAllocator allocator);
    1708 
    1714  VmaAllocator allocator,
    1715  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1716 
    1722  VmaAllocator allocator,
    1723  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1724 
    1732  VmaAllocator allocator,
    1733  uint32_t memoryTypeIndex,
    1734  VkMemoryPropertyFlags* pFlags);
    1735 
    1745  VmaAllocator allocator,
    1746  uint32_t frameIndex);
    1747 
    1750 typedef struct VmaStatInfo
    1751 {
    1753  uint32_t blockCount;
    1759  VkDeviceSize usedBytes;
    1761  VkDeviceSize unusedBytes;
    1764 } VmaStatInfo;
    1765 
    1767 typedef struct VmaStats
    1768 {
    1769  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1770  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1772 } VmaStats;
    1773 
    1775 void vmaCalculateStats(
    1776  VmaAllocator allocator,
    1777  VmaStats* pStats);
    1778 
    1779 #define VMA_STATS_STRING_ENABLED 1
    1780 
    1781 #if VMA_STATS_STRING_ENABLED
    1782 
    1784 
    1786 void vmaBuildStatsString(
    1787  VmaAllocator allocator,
    1788  char** ppStatsString,
    1789  VkBool32 detailedMap);
    1790 
    1791 void vmaFreeStatsString(
    1792  VmaAllocator allocator,
    1793  char* pStatsString);
    1794 
    1795 #endif // #if VMA_STATS_STRING_ENABLED
    1796 
    1805 VK_DEFINE_HANDLE(VmaPool)
    1806 
    1807 typedef enum VmaMemoryUsage
    1808 {
    1857 } VmaMemoryUsage;
    1858 
    1873 
    1928 
    1941 
    1951 
    1958 
    1962 
    1964 {
    1977  VkMemoryPropertyFlags requiredFlags;
    1982  VkMemoryPropertyFlags preferredFlags;
    1990  uint32_t memoryTypeBits;
    2003  void* pUserData;
    2005 
    2022 VkResult vmaFindMemoryTypeIndex(
    2023  VmaAllocator allocator,
    2024  uint32_t memoryTypeBits,
    2025  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2026  uint32_t* pMemoryTypeIndex);
    2027 
    2041  VmaAllocator allocator,
    2042  const VkBufferCreateInfo* pBufferCreateInfo,
    2043  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2044  uint32_t* pMemoryTypeIndex);
    2045 
    2059  VmaAllocator allocator,
    2060  const VkImageCreateInfo* pImageCreateInfo,
    2061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2062  uint32_t* pMemoryTypeIndex);
    2063 
    2084 
    2101 
    2112 
    2118 
    2121 typedef VkFlags VmaPoolCreateFlags;
    2122 
    2125 typedef struct VmaPoolCreateInfo {
    2140  VkDeviceSize blockSize;
    2169 
    2172 typedef struct VmaPoolStats {
    2175  VkDeviceSize size;
    2178  VkDeviceSize unusedSize;
    2191  VkDeviceSize unusedRangeSizeMax;
    2194  size_t blockCount;
    2195 } VmaPoolStats;
    2196 
    2203 VkResult vmaCreatePool(
    2204  VmaAllocator allocator,
    2205  const VmaPoolCreateInfo* pCreateInfo,
    2206  VmaPool* pPool);
    2207 
    2210 void vmaDestroyPool(
    2211  VmaAllocator allocator,
    2212  VmaPool pool);
    2213 
    2220 void vmaGetPoolStats(
    2221  VmaAllocator allocator,
    2222  VmaPool pool,
    2223  VmaPoolStats* pPoolStats);
    2224 
    2232  VmaAllocator allocator,
    2233  VmaPool pool,
    2234  size_t* pLostAllocationCount);
    2235 
    2250 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2251 
    2276 VK_DEFINE_HANDLE(VmaAllocation)
    2277 
    2278 
    2280 typedef struct VmaAllocationInfo {
    2285  uint32_t memoryType;
    2294  VkDeviceMemory deviceMemory;
    2299  VkDeviceSize offset;
    2304  VkDeviceSize size;
    2318  void* pUserData;
    2320 
    2331 VkResult vmaAllocateMemory(
    2332  VmaAllocator allocator,
    2333  const VkMemoryRequirements* pVkMemoryRequirements,
    2334  const VmaAllocationCreateInfo* pCreateInfo,
    2335  VmaAllocation* pAllocation,
    2336  VmaAllocationInfo* pAllocationInfo);
    2337 
    2345  VmaAllocator allocator,
    2346  VkBuffer buffer,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2352 VkResult vmaAllocateMemoryForImage(
    2353  VmaAllocator allocator,
    2354  VkImage image,
    2355  const VmaAllocationCreateInfo* pCreateInfo,
    2356  VmaAllocation* pAllocation,
    2357  VmaAllocationInfo* pAllocationInfo);
    2358 
    2360 void vmaFreeMemory(
    2361  VmaAllocator allocator,
    2362  VmaAllocation allocation);
    2363 
    2381  VmaAllocator allocator,
    2382  VmaAllocation allocation,
    2383  VmaAllocationInfo* pAllocationInfo);
    2384 
    2399 VkBool32 vmaTouchAllocation(
    2400  VmaAllocator allocator,
    2401  VmaAllocation allocation);
    2402 
    2417  VmaAllocator allocator,
    2418  VmaAllocation allocation,
    2419  void* pUserData);
    2420 
    2432  VmaAllocator allocator,
    2433  VmaAllocation* pAllocation);
    2434 
    2469 VkResult vmaMapMemory(
    2470  VmaAllocator allocator,
    2471  VmaAllocation allocation,
    2472  void** ppData);
    2473 
    2478 void vmaUnmapMemory(
    2479  VmaAllocator allocator,
    2480  VmaAllocation allocation);
    2481 
    2494 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2495 
    2508 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2509 
    2526 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2527 
    2529 typedef struct VmaDefragmentationInfo {
    2534  VkDeviceSize maxBytesToMove;
    2541 
    2543 typedef struct VmaDefragmentationStats {
    2545  VkDeviceSize bytesMoved;
    2547  VkDeviceSize bytesFreed;
    2553 
    2592 VkResult vmaDefragment(
    2593  VmaAllocator allocator,
    2594  VmaAllocation* pAllocations,
    2595  size_t allocationCount,
    2596  VkBool32* pAllocationsChanged,
    2597  const VmaDefragmentationInfo *pDefragmentationInfo,
    2598  VmaDefragmentationStats* pDefragmentationStats);
    2599 
    2612 VkResult vmaBindBufferMemory(
    2613  VmaAllocator allocator,
    2614  VmaAllocation allocation,
    2615  VkBuffer buffer);
    2616 
    2629 VkResult vmaBindImageMemory(
    2630  VmaAllocator allocator,
    2631  VmaAllocation allocation,
    2632  VkImage image);
    2633 
    2660 VkResult vmaCreateBuffer(
    2661  VmaAllocator allocator,
    2662  const VkBufferCreateInfo* pBufferCreateInfo,
    2663  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2664  VkBuffer* pBuffer,
    2665  VmaAllocation* pAllocation,
    2666  VmaAllocationInfo* pAllocationInfo);
    2667 
    2679 void vmaDestroyBuffer(
    2680  VmaAllocator allocator,
    2681  VkBuffer buffer,
    2682  VmaAllocation allocation);
    2683 
    2685 VkResult vmaCreateImage(
    2686  VmaAllocator allocator,
    2687  const VkImageCreateInfo* pImageCreateInfo,
    2688  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2689  VkImage* pImage,
    2690  VmaAllocation* pAllocation,
    2691  VmaAllocationInfo* pAllocationInfo);
    2692 
    2704 void vmaDestroyImage(
    2705  VmaAllocator allocator,
    2706  VkImage image,
    2707  VmaAllocation allocation);
    2708 
    2709 #ifdef __cplusplus
    2710 }
    2711 #endif
    2712 
    2713 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2714 
    2715 // For Visual Studio IntelliSense.
    2716 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2717 #define VMA_IMPLEMENTATION
    2718 #endif
    2719 
    2720 #ifdef VMA_IMPLEMENTATION
    2721 #undef VMA_IMPLEMENTATION
    2722 
    2723 #include <cstdint>
    2724 #include <cstdlib>
    2725 #include <cstring>
    2726 
    2727 /*******************************************************************************
    2728 CONFIGURATION SECTION
    2729 
    2730 Define some of these macros before each #include of this header or change them
    2731 here if you need other then default behavior depending on your environment.
    2732 */
    2733 
    2734 /*
    2735 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2736 internally, like:
    2737 
    2738  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2739 
    2740 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2741 VmaAllocatorCreateInfo::pVulkanFunctions.
    2742 */
    2743 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2744 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2745 #endif
    2746 
    2747 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2748 //#define VMA_USE_STL_CONTAINERS 1
    2749 
    2750 /* Set this macro to 1 to make the library including and using STL containers:
    2751 std::pair, std::vector, std::list, std::unordered_map.
    2752 
    2753 Set it to 0 or undefined to make the library using its own implementation of
    2754 the containers.
    2755 */
    2756 #if VMA_USE_STL_CONTAINERS
    2757  #define VMA_USE_STL_VECTOR 1
    2758  #define VMA_USE_STL_UNORDERED_MAP 1
    2759  #define VMA_USE_STL_LIST 1
    2760 #endif
    2761 
    2762 #if VMA_USE_STL_VECTOR
    2763  #include <vector>
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_UNORDERED_MAP
    2767  #include <unordered_map>
    2768 #endif
    2769 
    2770 #if VMA_USE_STL_LIST
    2771  #include <list>
    2772 #endif
    2773 
    2774 /*
    2775 Following headers are used in this CONFIGURATION section only, so feel free to
    2776 remove them if not needed.
    2777 */
    2778 #include <cassert> // for assert
    2779 #include <algorithm> // for min, max
    2780 #include <mutex> // for std::mutex
    2781 #include <atomic> // for std::atomic
    2782 
    2783 #ifndef VMA_NULL
    2784  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2785  #define VMA_NULL nullptr
    2786 #endif
    2787 
    2788 #if defined(__APPLE__) || defined(__ANDROID__)
    2789 #include <cstdlib>
    2790 void *aligned_alloc(size_t alignment, size_t size)
    2791 {
    2792  // alignment must be >= sizeof(void*)
    2793  if(alignment < sizeof(void*))
    2794  {
    2795  alignment = sizeof(void*);
    2796  }
    2797 
    2798  void *pointer;
    2799  if(posix_memalign(&pointer, alignment, size) == 0)
    2800  return pointer;
    2801  return VMA_NULL;
    2802 }
    2803 #endif
    2804 
    2805 // If your compiler is not compatible with C++11 and definition of
    2806 // aligned_alloc() function is missing, uncommeting following line may help:
    2807 
    2808 //#include <malloc.h>
    2809 
    2810 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2811 #ifndef VMA_ASSERT
    2812  #ifdef _DEBUG
    2813  #define VMA_ASSERT(expr) assert(expr)
    2814  #else
    2815  #define VMA_ASSERT(expr)
    2816  #endif
    2817 #endif
    2818 
    2819 // Assert that will be called very often, like inside data structures e.g. operator[].
    2820 // Making it non-empty can make program slow.
    2821 #ifndef VMA_HEAVY_ASSERT
    2822  #ifdef _DEBUG
    2823  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2824  #else
    2825  #define VMA_HEAVY_ASSERT(expr)
    2826  #endif
    2827 #endif
    2828 
    2829 #ifndef VMA_ALIGN_OF
    2830  #define VMA_ALIGN_OF(type) (__alignof(type))
    2831 #endif
    2832 
    2833 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2834  #if defined(_WIN32)
    2835  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2836  #else
    2837  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2838  #endif
    2839 #endif
    2840 
    2841 #ifndef VMA_SYSTEM_FREE
    2842  #if defined(_WIN32)
    2843  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2844  #else
    2845  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2846  #endif
    2847 #endif
    2848 
    2849 #ifndef VMA_MIN
    2850  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2851 #endif
    2852 
    2853 #ifndef VMA_MAX
    2854  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2855 #endif
    2856 
    2857 #ifndef VMA_SWAP
    2858  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2859 #endif
    2860 
    2861 #ifndef VMA_SORT
    2862  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2863 #endif
    2864 
    2865 #ifndef VMA_DEBUG_LOG
    2866  #define VMA_DEBUG_LOG(format, ...)
    2867  /*
    2868  #define VMA_DEBUG_LOG(format, ...) do { \
    2869  printf(format, __VA_ARGS__); \
    2870  printf("\n"); \
    2871  } while(false)
    2872  */
    2873 #endif
    2874 
    2875 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2876 #if VMA_STATS_STRING_ENABLED
    2877  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2878  {
    2879  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2880  }
    2881  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2882  {
    2883  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2884  }
    2885  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2886  {
    2887  snprintf(outStr, strLen, "%p", ptr);
    2888  }
    2889 #endif
    2890 
    2891 #ifndef VMA_MUTEX
    2892  class VmaMutex
    2893  {
    2894  public:
    2895  VmaMutex() { }
    2896  ~VmaMutex() { }
    2897  void Lock() { m_Mutex.lock(); }
    2898  void Unlock() { m_Mutex.unlock(); }
    2899  private:
    2900  std::mutex m_Mutex;
    2901  };
    2902  #define VMA_MUTEX VmaMutex
    2903 #endif
    2904 
    2905 /*
    2906 If providing your own implementation, you need to implement a subset of std::atomic:
    2907 
    2908 - Constructor(uint32_t desired)
    2909 - uint32_t load() const
    2910 - void store(uint32_t desired)
    2911 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2912 */
    2913 #ifndef VMA_ATOMIC_UINT32
    2914  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2915 #endif
    2916 
    2917 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2918 
    2922  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2923 #endif
    2924 
    2925 #ifndef VMA_DEBUG_ALIGNMENT
    2926 
    2930  #define VMA_DEBUG_ALIGNMENT (1)
    2931 #endif
    2932 
    2933 #ifndef VMA_DEBUG_MARGIN
    2934 
    2938  #define VMA_DEBUG_MARGIN (0)
    2939 #endif
    2940 
    2941 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2942 
    2946  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2947 #endif
    2948 
    2949 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2950 
    2955  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2956 #endif
    2957 
    2958 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2959 
    2963  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2964 #endif
    2965 
    2966 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2967 
    2971  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2972 #endif
    2973 
    2974 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2975  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2977 #endif
    2978 
    2979 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2980  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2982 #endif
    2983 
    2984 #ifndef VMA_CLASS_NO_COPY
    2985  #define VMA_CLASS_NO_COPY(className) \
    2986  private: \
    2987  className(const className&) = delete; \
    2988  className& operator=(const className&) = delete;
    2989 #endif
    2990 
    2991 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2992 
    2993 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2994 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2995 
    2996 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    2997 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    2998 
    2999 /*******************************************************************************
    3000 END OF CONFIGURATION
    3001 */
    3002 
    3003 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3004  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3005 
    3006 // Returns number of bits set to 1 in (v).
    3007 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3008 {
    3009  uint32_t c = v - ((v >> 1) & 0x55555555);
    3010  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3011  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3012  c = ((c >> 8) + c) & 0x00FF00FF;
    3013  c = ((c >> 16) + c) & 0x0000FFFF;
    3014  return c;
    3015 }
    3016 
    3017 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3018 // Use types like uint32_t, uint64_t as T.
    3019 template <typename T>
    3020 static inline T VmaAlignUp(T val, T align)
    3021 {
    3022  return (val + align - 1) / align * align;
    3023 }
    3024 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3025 // Use types like uint32_t, uint64_t as T.
    3026 template <typename T>
    3027 static inline T VmaAlignDown(T val, T align)
    3028 {
    3029  return val / align * align;
    3030 }
    3031 
    3032 // Division with mathematical rounding to nearest number.
    3033 template <typename T>
    3034 static inline T VmaRoundDiv(T x, T y)
    3035 {
    3036  return (x + (y / (T)2)) / y;
    3037 }
    3038 
    3039 /*
    3040 Returns true if given number is a power of two.
    3041 T must be unsigned integer number or signed integer but always nonnegative.
    3042 For 0 returns true.
    3043 */
    3044 template <typename T>
    3045 inline bool VmaIsPow2(T x)
    3046 {
    3047  return (x & (x-1)) == 0;
    3048 }
    3049 
    3050 // Returns smallest power of 2 greater or equal to v.
    3051 static inline uint32_t VmaNextPow2(uint32_t v)
    3052 {
    3053  v--;
    3054  v |= v >> 1;
    3055  v |= v >> 2;
    3056  v |= v >> 4;
    3057  v |= v >> 8;
    3058  v |= v >> 16;
    3059  v++;
    3060  return v;
    3061 }
    3062 static inline uint64_t VmaNextPow2(uint64_t v)
    3063 {
    3064  v--;
    3065  v |= v >> 1;
    3066  v |= v >> 2;
    3067  v |= v >> 4;
    3068  v |= v >> 8;
    3069  v |= v >> 16;
    3070  v |= v >> 32;
    3071  v++;
    3072  return v;
    3073 }
    3074 
    3075 // Returns largest power of 2 less or equal to v.
    3076 static inline uint32_t VmaPrevPow2(uint32_t v)
    3077 {
    3078  v |= v >> 1;
    3079  v |= v >> 2;
    3080  v |= v >> 4;
    3081  v |= v >> 8;
    3082  v |= v >> 16;
    3083  v = v ^ (v >> 1);
    3084  return v;
    3085 }
    3086 static inline uint64_t VmaPrevPow2(uint64_t v)
    3087 {
    3088  v |= v >> 1;
    3089  v |= v >> 2;
    3090  v |= v >> 4;
    3091  v |= v >> 8;
    3092  v |= v >> 16;
    3093  v |= v >> 32;
    3094  v = v ^ (v >> 1);
    3095  return v;
    3096 }
    3097 
    3098 static inline bool VmaStrIsEmpty(const char* pStr)
    3099 {
    3100  return pStr == VMA_NULL || *pStr == '\0';
    3101 }
    3102 
    3103 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3104 {
    3105  switch(algorithm)
    3106  {
    3108  return "Linear";
    3110  return "Buddy";
    3111  case 0:
    3112  return "Default";
    3113  default:
    3114  VMA_ASSERT(0);
    3115  return "";
    3116  }
    3117 }
    3118 
    3119 #ifndef VMA_SORT
    3120 
    3121 template<typename Iterator, typename Compare>
    3122 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3123 {
    3124  Iterator centerValue = end; --centerValue;
    3125  Iterator insertIndex = beg;
    3126  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3127  {
    3128  if(cmp(*memTypeIndex, *centerValue))
    3129  {
    3130  if(insertIndex != memTypeIndex)
    3131  {
    3132  VMA_SWAP(*memTypeIndex, *insertIndex);
    3133  }
    3134  ++insertIndex;
    3135  }
    3136  }
    3137  if(insertIndex != centerValue)
    3138  {
    3139  VMA_SWAP(*insertIndex, *centerValue);
    3140  }
    3141  return insertIndex;
    3142 }
    3143 
    3144 template<typename Iterator, typename Compare>
    3145 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3146 {
    3147  if(beg < end)
    3148  {
    3149  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3150  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3151  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3152  }
    3153 }
    3154 
    3155 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3156 
    3157 #endif // #ifndef VMA_SORT
    3158 
    3159 /*
    3160 Returns true if two memory blocks occupy overlapping pages.
    3161 ResourceA must be in less memory offset than ResourceB.
    3162 
    3163 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3164 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3165 */
    3166 static inline bool VmaBlocksOnSamePage(
    3167  VkDeviceSize resourceAOffset,
    3168  VkDeviceSize resourceASize,
    3169  VkDeviceSize resourceBOffset,
    3170  VkDeviceSize pageSize)
    3171 {
    3172  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3173  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3174  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3175  VkDeviceSize resourceBStart = resourceBOffset;
    3176  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3177  return resourceAEndPage == resourceBStartPage;
    3178 }
    3179 
    3180 enum VmaSuballocationType
    3181 {
    3182  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3183  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3184  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3185  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3186  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3187  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3188  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3189 };
    3190 
    3191 /*
    3192 Returns true if given suballocation types could conflict and must respect
    3193 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3194 or linear image and another one is optimal image. If type is unknown, behave
    3195 conservatively.
    3196 */
    3197 static inline bool VmaIsBufferImageGranularityConflict(
    3198  VmaSuballocationType suballocType1,
    3199  VmaSuballocationType suballocType2)
    3200 {
    3201  if(suballocType1 > suballocType2)
    3202  {
    3203  VMA_SWAP(suballocType1, suballocType2);
    3204  }
    3205 
    3206  switch(suballocType1)
    3207  {
    3208  case VMA_SUBALLOCATION_TYPE_FREE:
    3209  return false;
    3210  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3211  return true;
    3212  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3213  return
    3214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3216  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3217  return
    3218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3220  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3221  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3222  return
    3223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3224  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3225  return false;
    3226  default:
    3227  VMA_ASSERT(0);
    3228  return true;
    3229  }
    3230 }
    3231 
    3232 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3233 {
    3234  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3235  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3236  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3237  {
    3238  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3239  }
    3240 }
    3241 
    3242 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3243 {
    3244  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3245  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3246  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3247  {
    3248  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3249  {
    3250  return false;
    3251  }
    3252  }
    3253  return true;
    3254 }
    3255 
    3256 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3257 struct VmaMutexLock
    3258 {
    3259  VMA_CLASS_NO_COPY(VmaMutexLock)
    3260 public:
    3261  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3262  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3263  {
    3264  if(m_pMutex)
    3265  {
    3266  m_pMutex->Lock();
    3267  }
    3268  }
    3269 
    3270  ~VmaMutexLock()
    3271  {
    3272  if(m_pMutex)
    3273  {
    3274  m_pMutex->Unlock();
    3275  }
    3276  }
    3277 
    3278 private:
    3279  VMA_MUTEX* m_pMutex;
    3280 };
    3281 
    3282 #if VMA_DEBUG_GLOBAL_MUTEX
    3283  static VMA_MUTEX gDebugGlobalMutex;
    3284  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3285 #else
    3286  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3287 #endif
    3288 
    3289 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3290 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3291 
    3292 /*
    3293 Performs binary search and returns iterator to first element that is greater or
    3294 equal to (key), according to comparison (cmp).
    3295 
    3296 Cmp should return true if first argument is less than second argument.
    3297 
    3298 Returned value is the found element, if present in the collection or place where
    3299 new element with value (key) should be inserted.
    3300 */
    3301 template <typename CmpLess, typename IterT, typename KeyT>
    3302 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3303 {
    3304  size_t down = 0, up = (end - beg);
    3305  while(down < up)
    3306  {
    3307  const size_t mid = (down + up) / 2;
    3308  if(cmp(*(beg+mid), key))
    3309  {
    3310  down = mid + 1;
    3311  }
    3312  else
    3313  {
    3314  up = mid;
    3315  }
    3316  }
    3317  return beg + down;
    3318 }
    3319 
    3321 // Memory allocation
    3322 
    3323 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3324 {
    3325  if((pAllocationCallbacks != VMA_NULL) &&
    3326  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3327  {
    3328  return (*pAllocationCallbacks->pfnAllocation)(
    3329  pAllocationCallbacks->pUserData,
    3330  size,
    3331  alignment,
    3332  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3333  }
    3334  else
    3335  {
    3336  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3337  }
    3338 }
    3339 
    3340 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3341 {
    3342  if((pAllocationCallbacks != VMA_NULL) &&
    3343  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3344  {
    3345  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3346  }
    3347  else
    3348  {
    3349  VMA_SYSTEM_FREE(ptr);
    3350  }
    3351 }
    3352 
    3353 template<typename T>
    3354 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3355 {
    3356  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3357 }
    3358 
    3359 template<typename T>
    3360 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3361 {
    3362  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3363 }
    3364 
    3365 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3366 
    3367 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3368 
    3369 template<typename T>
    3370 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3371 {
    3372  ptr->~T();
    3373  VmaFree(pAllocationCallbacks, ptr);
    3374 }
    3375 
    3376 template<typename T>
    3377 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3378 {
    3379  if(ptr != VMA_NULL)
    3380  {
    3381  for(size_t i = count; i--; )
    3382  {
    3383  ptr[i].~T();
    3384  }
    3385  VmaFree(pAllocationCallbacks, ptr);
    3386  }
    3387 }
    3388 
    3389 // STL-compatible allocator.
    3390 template<typename T>
    3391 class VmaStlAllocator
    3392 {
    3393 public:
    3394  const VkAllocationCallbacks* const m_pCallbacks;
    3395  typedef T value_type;
    3396 
    3397  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3398  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3399 
    3400  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3401  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3402 
    3403  template<typename U>
    3404  bool operator==(const VmaStlAllocator<U>& rhs) const
    3405  {
    3406  return m_pCallbacks == rhs.m_pCallbacks;
    3407  }
    3408  template<typename U>
    3409  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3410  {
    3411  return m_pCallbacks != rhs.m_pCallbacks;
    3412  }
    3413 
    3414  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3415 };
    3416 
    3417 #if VMA_USE_STL_VECTOR
    3418 
    3419 #define VmaVector std::vector
    3420 
    3421 template<typename T, typename allocatorT>
    3422 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3423 {
    3424  vec.insert(vec.begin() + index, item);
    3425 }
    3426 
    3427 template<typename T, typename allocatorT>
    3428 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3429 {
    3430  vec.erase(vec.begin() + index);
    3431 }
    3432 
    3433 #else // #if VMA_USE_STL_VECTOR
    3434 
    3435 /* Class with interface compatible with subset of std::vector.
    3436 T must be POD because constructors and destructors are not called and memcpy is
    3437 used for these objects. */
    3438 template<typename T, typename AllocatorT>
    3439 class VmaVector
    3440 {
    3441 public:
    3442  typedef T value_type;
    3443 
    3444  VmaVector(const AllocatorT& allocator) :
    3445  m_Allocator(allocator),
    3446  m_pArray(VMA_NULL),
    3447  m_Count(0),
    3448  m_Capacity(0)
    3449  {
    3450  }
    3451 
    3452  VmaVector(size_t count, const AllocatorT& allocator) :
    3453  m_Allocator(allocator),
    3454  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3455  m_Count(count),
    3456  m_Capacity(count)
    3457  {
    3458  }
    3459 
    3460  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3461  m_Allocator(src.m_Allocator),
    3462  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3463  m_Count(src.m_Count),
    3464  m_Capacity(src.m_Count)
    3465  {
    3466  if(m_Count != 0)
    3467  {
    3468  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3469  }
    3470  }
    3471 
    3472  ~VmaVector()
    3473  {
    3474  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3475  }
    3476 
    3477  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3478  {
    3479  if(&rhs != this)
    3480  {
    3481  resize(rhs.m_Count);
    3482  if(m_Count != 0)
    3483  {
    3484  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3485  }
    3486  }
    3487  return *this;
    3488  }
    3489 
    3490  bool empty() const { return m_Count == 0; }
    3491  size_t size() const { return m_Count; }
    3492  T* data() { return m_pArray; }
    3493  const T* data() const { return m_pArray; }
    3494 
    3495  T& operator[](size_t index)
    3496  {
    3497  VMA_HEAVY_ASSERT(index < m_Count);
    3498  return m_pArray[index];
    3499  }
    3500  const T& operator[](size_t index) const
    3501  {
    3502  VMA_HEAVY_ASSERT(index < m_Count);
    3503  return m_pArray[index];
    3504  }
    3505 
    3506  T& front()
    3507  {
    3508  VMA_HEAVY_ASSERT(m_Count > 0);
    3509  return m_pArray[0];
    3510  }
    3511  const T& front() const
    3512  {
    3513  VMA_HEAVY_ASSERT(m_Count > 0);
    3514  return m_pArray[0];
    3515  }
    3516  T& back()
    3517  {
    3518  VMA_HEAVY_ASSERT(m_Count > 0);
    3519  return m_pArray[m_Count - 1];
    3520  }
    3521  const T& back() const
    3522  {
    3523  VMA_HEAVY_ASSERT(m_Count > 0);
    3524  return m_pArray[m_Count - 1];
    3525  }
    3526 
    3527  void reserve(size_t newCapacity, bool freeMemory = false)
    3528  {
    3529  newCapacity = VMA_MAX(newCapacity, m_Count);
    3530 
    3531  if((newCapacity < m_Capacity) && !freeMemory)
    3532  {
    3533  newCapacity = m_Capacity;
    3534  }
    3535 
    3536  if(newCapacity != m_Capacity)
    3537  {
    3538  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3539  if(m_Count != 0)
    3540  {
    3541  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3542  }
    3543  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3544  m_Capacity = newCapacity;
    3545  m_pArray = newArray;
    3546  }
    3547  }
    3548 
    3549  void resize(size_t newCount, bool freeMemory = false)
    3550  {
    3551  size_t newCapacity = m_Capacity;
    3552  if(newCount > m_Capacity)
    3553  {
    3554  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3555  }
    3556  else if(freeMemory)
    3557  {
    3558  newCapacity = newCount;
    3559  }
    3560 
    3561  if(newCapacity != m_Capacity)
    3562  {
    3563  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3564  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3565  if(elementsToCopy != 0)
    3566  {
    3567  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3568  }
    3569  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3570  m_Capacity = newCapacity;
    3571  m_pArray = newArray;
    3572  }
    3573 
    3574  m_Count = newCount;
    3575  }
    3576 
    3577  void clear(bool freeMemory = false)
    3578  {
    3579  resize(0, freeMemory);
    3580  }
    3581 
    3582  void insert(size_t index, const T& src)
    3583  {
    3584  VMA_HEAVY_ASSERT(index <= m_Count);
    3585  const size_t oldCount = size();
    3586  resize(oldCount + 1);
    3587  if(index < oldCount)
    3588  {
    3589  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3590  }
    3591  m_pArray[index] = src;
    3592  }
    3593 
    3594  void remove(size_t index)
    3595  {
    3596  VMA_HEAVY_ASSERT(index < m_Count);
    3597  const size_t oldCount = size();
    3598  if(index < oldCount - 1)
    3599  {
    3600  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3601  }
    3602  resize(oldCount - 1);
    3603  }
    3604 
    3605  void push_back(const T& src)
    3606  {
    3607  const size_t newIndex = size();
    3608  resize(newIndex + 1);
    3609  m_pArray[newIndex] = src;
    3610  }
    3611 
    3612  void pop_back()
    3613  {
    3614  VMA_HEAVY_ASSERT(m_Count > 0);
    3615  resize(size() - 1);
    3616  }
    3617 
    3618  void push_front(const T& src)
    3619  {
    3620  insert(0, src);
    3621  }
    3622 
    3623  void pop_front()
    3624  {
    3625  VMA_HEAVY_ASSERT(m_Count > 0);
    3626  remove(0);
    3627  }
    3628 
    3629  typedef T* iterator;
    3630 
    3631  iterator begin() { return m_pArray; }
    3632  iterator end() { return m_pArray + m_Count; }
    3633 
    3634 private:
    3635  AllocatorT m_Allocator;
    3636  T* m_pArray;
    3637  size_t m_Count;
    3638  size_t m_Capacity;
    3639 };
    3640 
    3641 template<typename T, typename allocatorT>
    3642 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3643 {
    3644  vec.insert(index, item);
    3645 }
    3646 
    3647 template<typename T, typename allocatorT>
    3648 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3649 {
    3650  vec.remove(index);
    3651 }
    3652 
    3653 #endif // #if VMA_USE_STL_VECTOR
    3654 
    3655 template<typename CmpLess, typename VectorT>
    3656 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3657 {
    3658  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3659  vector.data(),
    3660  vector.data() + vector.size(),
    3661  value,
    3662  CmpLess()) - vector.data();
    3663  VmaVectorInsert(vector, indexToInsert, value);
    3664  return indexToInsert;
    3665 }
    3666 
    3667 template<typename CmpLess, typename VectorT>
    3668 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3669 {
    3670  CmpLess comparator;
    3671  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3672  vector.begin(),
    3673  vector.end(),
    3674  value,
    3675  comparator);
    3676  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3677  {
    3678  size_t indexToRemove = it - vector.begin();
    3679  VmaVectorRemove(vector, indexToRemove);
    3680  return true;
    3681  }
    3682  return false;
    3683 }
    3684 
    3685 template<typename CmpLess, typename IterT, typename KeyT>
    3686 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3687 {
    3688  CmpLess comparator;
    3689  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3690  beg, end, value, comparator);
    3691  if(it == end ||
    3692  (!comparator(*it, value) && !comparator(value, *it)))
    3693  {
    3694  return it;
    3695  }
    3696  return end;
    3697 }
    3698 
    3700 // class VmaPoolAllocator
    3701 
    3702 /*
    3703 Allocator for objects of type T using a list of arrays (pools) to speed up
    3704 allocation. Number of elements that can be allocated is not bounded because
    3705 allocator can create multiple blocks.
    3706 */
    3707 template<typename T>
    3708 class VmaPoolAllocator
    3709 {
    3710  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3711 public:
    3712  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3713  ~VmaPoolAllocator();
    3714  void Clear();
    3715  T* Alloc();
    3716  void Free(T* ptr);
    3717 
    3718 private:
    3719  union Item
    3720  {
    3721  uint32_t NextFreeIndex;
    3722  T Value;
    3723  };
    3724 
    3725  struct ItemBlock
    3726  {
    3727  Item* pItems;
    3728  uint32_t FirstFreeIndex;
    3729  };
    3730 
    3731  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3732  size_t m_ItemsPerBlock;
    3733  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3734 
    3735  ItemBlock& CreateNewBlock();
    3736 };
    3737 
    3738 template<typename T>
    3739 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3740  m_pAllocationCallbacks(pAllocationCallbacks),
    3741  m_ItemsPerBlock(itemsPerBlock),
    3742  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3743 {
    3744  VMA_ASSERT(itemsPerBlock > 0);
    3745 }
    3746 
    3747 template<typename T>
    3748 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3749 {
    3750  Clear();
    3751 }
    3752 
    3753 template<typename T>
    3754 void VmaPoolAllocator<T>::Clear()
    3755 {
    3756  for(size_t i = m_ItemBlocks.size(); i--; )
    3757  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3758  m_ItemBlocks.clear();
    3759 }
    3760 
    3761 template<typename T>
    3762 T* VmaPoolAllocator<T>::Alloc()
    3763 {
    3764  for(size_t i = m_ItemBlocks.size(); i--; )
    3765  {
    3766  ItemBlock& block = m_ItemBlocks[i];
    3767  // This block has some free items: Use first one.
    3768  if(block.FirstFreeIndex != UINT32_MAX)
    3769  {
    3770  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3771  block.FirstFreeIndex = pItem->NextFreeIndex;
    3772  return &pItem->Value;
    3773  }
    3774  }
    3775 
    3776  // No block has free item: Create new one and use it.
    3777  ItemBlock& newBlock = CreateNewBlock();
    3778  Item* const pItem = &newBlock.pItems[0];
    3779  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3780  return &pItem->Value;
    3781 }
    3782 
    3783 template<typename T>
    3784 void VmaPoolAllocator<T>::Free(T* ptr)
    3785 {
    3786  // Search all memory blocks to find ptr.
    3787  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3788  {
    3789  ItemBlock& block = m_ItemBlocks[i];
    3790 
    3791  // Casting to union.
    3792  Item* pItemPtr;
    3793  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3794 
    3795  // Check if pItemPtr is in address range of this block.
    3796  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3797  {
    3798  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3799  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3800  block.FirstFreeIndex = index;
    3801  return;
    3802  }
    3803  }
    3804  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3805 }
    3806 
    3807 template<typename T>
    3808 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3809 {
    3810  ItemBlock newBlock = {
    3811  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3812 
    3813  m_ItemBlocks.push_back(newBlock);
    3814 
    3815  // Setup singly-linked list of all free items in this block.
    3816  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3817  newBlock.pItems[i].NextFreeIndex = i + 1;
    3818  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3819  return m_ItemBlocks.back();
    3820 }
    3821 
    3823 // class VmaRawList, VmaList
    3824 
    3825 #if VMA_USE_STL_LIST
    3826 
    3827 #define VmaList std::list
    3828 
    3829 #else // #if VMA_USE_STL_LIST
    3830 
    3831 template<typename T>
    3832 struct VmaListItem
    3833 {
    3834  VmaListItem* pPrev;
    3835  VmaListItem* pNext;
    3836  T Value;
    3837 };
    3838 
    3839 // Doubly linked list.
    3840 template<typename T>
    3841 class VmaRawList
    3842 {
    3843  VMA_CLASS_NO_COPY(VmaRawList)
    3844 public:
    3845  typedef VmaListItem<T> ItemType;
    3846 
    3847  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3848  ~VmaRawList();
    3849  void Clear();
    3850 
    3851  size_t GetCount() const { return m_Count; }
    3852  bool IsEmpty() const { return m_Count == 0; }
    3853 
    3854  ItemType* Front() { return m_pFront; }
    3855  const ItemType* Front() const { return m_pFront; }
    3856  ItemType* Back() { return m_pBack; }
    3857  const ItemType* Back() const { return m_pBack; }
    3858 
    3859  ItemType* PushBack();
    3860  ItemType* PushFront();
    3861  ItemType* PushBack(const T& value);
    3862  ItemType* PushFront(const T& value);
    3863  void PopBack();
    3864  void PopFront();
    3865 
    3866  // Item can be null - it means PushBack.
    3867  ItemType* InsertBefore(ItemType* pItem);
    3868  // Item can be null - it means PushFront.
    3869  ItemType* InsertAfter(ItemType* pItem);
    3870 
    3871  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3872  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3873 
    3874  void Remove(ItemType* pItem);
    3875 
    3876 private:
    3877  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3878  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3879  ItemType* m_pFront;
    3880  ItemType* m_pBack;
    3881  size_t m_Count;
    3882 };
    3883 
    3884 template<typename T>
    3885 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3886  m_pAllocationCallbacks(pAllocationCallbacks),
    3887  m_ItemAllocator(pAllocationCallbacks, 128),
    3888  m_pFront(VMA_NULL),
    3889  m_pBack(VMA_NULL),
    3890  m_Count(0)
    3891 {
    3892 }
    3893 
    3894 template<typename T>
    3895 VmaRawList<T>::~VmaRawList()
    3896 {
    3897  // Intentionally not calling Clear, because that would be unnecessary
    3898  // computations to return all items to m_ItemAllocator as free.
    3899 }
    3900 
    3901 template<typename T>
    3902 void VmaRawList<T>::Clear()
    3903 {
    3904  if(IsEmpty() == false)
    3905  {
    3906  ItemType* pItem = m_pBack;
    3907  while(pItem != VMA_NULL)
    3908  {
    3909  ItemType* const pPrevItem = pItem->pPrev;
    3910  m_ItemAllocator.Free(pItem);
    3911  pItem = pPrevItem;
    3912  }
    3913  m_pFront = VMA_NULL;
    3914  m_pBack = VMA_NULL;
    3915  m_Count = 0;
    3916  }
    3917 }
    3918 
    3919 template<typename T>
    3920 VmaListItem<T>* VmaRawList<T>::PushBack()
    3921 {
    3922  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3923  pNewItem->pNext = VMA_NULL;
    3924  if(IsEmpty())
    3925  {
    3926  pNewItem->pPrev = VMA_NULL;
    3927  m_pFront = pNewItem;
    3928  m_pBack = pNewItem;
    3929  m_Count = 1;
    3930  }
    3931  else
    3932  {
    3933  pNewItem->pPrev = m_pBack;
    3934  m_pBack->pNext = pNewItem;
    3935  m_pBack = pNewItem;
    3936  ++m_Count;
    3937  }
    3938  return pNewItem;
    3939 }
    3940 
    3941 template<typename T>
    3942 VmaListItem<T>* VmaRawList<T>::PushFront()
    3943 {
    3944  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3945  pNewItem->pPrev = VMA_NULL;
    3946  if(IsEmpty())
    3947  {
    3948  pNewItem->pNext = VMA_NULL;
    3949  m_pFront = pNewItem;
    3950  m_pBack = pNewItem;
    3951  m_Count = 1;
    3952  }
    3953  else
    3954  {
    3955  pNewItem->pNext = m_pFront;
    3956  m_pFront->pPrev = pNewItem;
    3957  m_pFront = pNewItem;
    3958  ++m_Count;
    3959  }
    3960  return pNewItem;
    3961 }
    3962 
    3963 template<typename T>
    3964 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3965 {
    3966  ItemType* const pNewItem = PushBack();
    3967  pNewItem->Value = value;
    3968  return pNewItem;
    3969 }
    3970 
    3971 template<typename T>
    3972 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3973 {
    3974  ItemType* const pNewItem = PushFront();
    3975  pNewItem->Value = value;
    3976  return pNewItem;
    3977 }
    3978 
    3979 template<typename T>
    3980 void VmaRawList<T>::PopBack()
    3981 {
    3982  VMA_HEAVY_ASSERT(m_Count > 0);
    3983  ItemType* const pBackItem = m_pBack;
    3984  ItemType* const pPrevItem = pBackItem->pPrev;
    3985  if(pPrevItem != VMA_NULL)
    3986  {
    3987  pPrevItem->pNext = VMA_NULL;
    3988  }
    3989  m_pBack = pPrevItem;
    3990  m_ItemAllocator.Free(pBackItem);
    3991  --m_Count;
    3992 }
    3993 
    3994 template<typename T>
    3995 void VmaRawList<T>::PopFront()
    3996 {
    3997  VMA_HEAVY_ASSERT(m_Count > 0);
    3998  ItemType* const pFrontItem = m_pFront;
    3999  ItemType* const pNextItem = pFrontItem->pNext;
    4000  if(pNextItem != VMA_NULL)
    4001  {
    4002  pNextItem->pPrev = VMA_NULL;
    4003  }
    4004  m_pFront = pNextItem;
    4005  m_ItemAllocator.Free(pFrontItem);
    4006  --m_Count;
    4007 }
    4008 
    4009 template<typename T>
    4010 void VmaRawList<T>::Remove(ItemType* pItem)
    4011 {
    4012  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4013  VMA_HEAVY_ASSERT(m_Count > 0);
    4014 
    4015  if(pItem->pPrev != VMA_NULL)
    4016  {
    4017  pItem->pPrev->pNext = pItem->pNext;
    4018  }
    4019  else
    4020  {
    4021  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4022  m_pFront = pItem->pNext;
    4023  }
    4024 
    4025  if(pItem->pNext != VMA_NULL)
    4026  {
    4027  pItem->pNext->pPrev = pItem->pPrev;
    4028  }
    4029  else
    4030  {
    4031  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4032  m_pBack = pItem->pPrev;
    4033  }
    4034 
    4035  m_ItemAllocator.Free(pItem);
    4036  --m_Count;
    4037 }
    4038 
    4039 template<typename T>
    4040 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4041 {
    4042  if(pItem != VMA_NULL)
    4043  {
    4044  ItemType* const prevItem = pItem->pPrev;
    4045  ItemType* const newItem = m_ItemAllocator.Alloc();
    4046  newItem->pPrev = prevItem;
    4047  newItem->pNext = pItem;
    4048  pItem->pPrev = newItem;
    4049  if(prevItem != VMA_NULL)
    4050  {
    4051  prevItem->pNext = newItem;
    4052  }
    4053  else
    4054  {
    4055  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4056  m_pFront = newItem;
    4057  }
    4058  ++m_Count;
    4059  return newItem;
    4060  }
    4061  else
    4062  return PushBack();
    4063 }
    4064 
    4065 template<typename T>
    4066 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4067 {
    4068  if(pItem != VMA_NULL)
    4069  {
    4070  ItemType* const nextItem = pItem->pNext;
    4071  ItemType* const newItem = m_ItemAllocator.Alloc();
    4072  newItem->pNext = nextItem;
    4073  newItem->pPrev = pItem;
    4074  pItem->pNext = newItem;
    4075  if(nextItem != VMA_NULL)
    4076  {
    4077  nextItem->pPrev = newItem;
    4078  }
    4079  else
    4080  {
    4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4082  m_pBack = newItem;
    4083  }
    4084  ++m_Count;
    4085  return newItem;
    4086  }
    4087  else
    4088  return PushFront();
    4089 }
    4090 
    4091 template<typename T>
    4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4093 {
    4094  ItemType* const newItem = InsertBefore(pItem);
    4095  newItem->Value = value;
    4096  return newItem;
    4097 }
    4098 
    4099 template<typename T>
    4100 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4101 {
    4102  ItemType* const newItem = InsertAfter(pItem);
    4103  newItem->Value = value;
    4104  return newItem;
    4105 }
    4106 
    4107 template<typename T, typename AllocatorT>
    4108 class VmaList
    4109 {
    4110  VMA_CLASS_NO_COPY(VmaList)
    4111 public:
    4112  class iterator
    4113  {
    4114  public:
    4115  iterator() :
    4116  m_pList(VMA_NULL),
    4117  m_pItem(VMA_NULL)
    4118  {
    4119  }
    4120 
    4121  T& operator*() const
    4122  {
    4123  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4124  return m_pItem->Value;
    4125  }
    4126  T* operator->() const
    4127  {
    4128  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4129  return &m_pItem->Value;
    4130  }
    4131 
    4132  iterator& operator++()
    4133  {
    4134  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4135  m_pItem = m_pItem->pNext;
    4136  return *this;
    4137  }
    4138  iterator& operator--()
    4139  {
    4140  if(m_pItem != VMA_NULL)
    4141  {
    4142  m_pItem = m_pItem->pPrev;
    4143  }
    4144  else
    4145  {
    4146  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4147  m_pItem = m_pList->Back();
    4148  }
    4149  return *this;
    4150  }
    4151 
    4152  iterator operator++(int)
    4153  {
    4154  iterator result = *this;
    4155  ++*this;
    4156  return result;
    4157  }
    4158  iterator operator--(int)
    4159  {
    4160  iterator result = *this;
    4161  --*this;
    4162  return result;
    4163  }
    4164 
    4165  bool operator==(const iterator& rhs) const
    4166  {
    4167  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4168  return m_pItem == rhs.m_pItem;
    4169  }
    4170  bool operator!=(const iterator& rhs) const
    4171  {
    4172  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4173  return m_pItem != rhs.m_pItem;
    4174  }
    4175 
    4176  private:
    4177  VmaRawList<T>* m_pList;
    4178  VmaListItem<T>* m_pItem;
    4179 
    4180  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4181  m_pList(pList),
    4182  m_pItem(pItem)
    4183  {
    4184  }
    4185 
    4186  friend class VmaList<T, AllocatorT>;
    4187  };
    4188 
    4189  class const_iterator
    4190  {
    4191  public:
    4192  const_iterator() :
    4193  m_pList(VMA_NULL),
    4194  m_pItem(VMA_NULL)
    4195  {
    4196  }
    4197 
    4198  const_iterator(const iterator& src) :
    4199  m_pList(src.m_pList),
    4200  m_pItem(src.m_pItem)
    4201  {
    4202  }
    4203 
    4204  const T& operator*() const
    4205  {
    4206  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4207  return m_pItem->Value;
    4208  }
    4209  const T* operator->() const
    4210  {
    4211  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4212  return &m_pItem->Value;
    4213  }
    4214 
    4215  const_iterator& operator++()
    4216  {
    4217  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4218  m_pItem = m_pItem->pNext;
    4219  return *this;
    4220  }
    4221  const_iterator& operator--()
    4222  {
    4223  if(m_pItem != VMA_NULL)
    4224  {
    4225  m_pItem = m_pItem->pPrev;
    4226  }
    4227  else
    4228  {
    4229  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4230  m_pItem = m_pList->Back();
    4231  }
    4232  return *this;
    4233  }
    4234 
    4235  const_iterator operator++(int)
    4236  {
    4237  const_iterator result = *this;
    4238  ++*this;
    4239  return result;
    4240  }
    4241  const_iterator operator--(int)
    4242  {
    4243  const_iterator result = *this;
    4244  --*this;
    4245  return result;
    4246  }
    4247 
    4248  bool operator==(const const_iterator& rhs) const
    4249  {
    4250  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4251  return m_pItem == rhs.m_pItem;
    4252  }
    4253  bool operator!=(const const_iterator& rhs) const
    4254  {
    4255  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4256  return m_pItem != rhs.m_pItem;
    4257  }
    4258 
    4259  private:
    4260  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4261  m_pList(pList),
    4262  m_pItem(pItem)
    4263  {
    4264  }
    4265 
    4266  const VmaRawList<T>* m_pList;
    4267  const VmaListItem<T>* m_pItem;
    4268 
    4269  friend class VmaList<T, AllocatorT>;
    4270  };
    4271 
    4272  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4273 
    4274  bool empty() const { return m_RawList.IsEmpty(); }
    4275  size_t size() const { return m_RawList.GetCount(); }
    4276 
    4277  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4278  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4279 
    4280  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4281  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4282 
    4283  void clear() { m_RawList.Clear(); }
    4284  void push_back(const T& value) { m_RawList.PushBack(value); }
    4285  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4286  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4287 
    4288 private:
    4289  VmaRawList<T> m_RawList;
    4290 };
    4291 
    4292 #endif // #if VMA_USE_STL_LIST
    4293 
    4295 // class VmaMap
    4296 
    4297 // Unused in this version.
    4298 #if 0
    4299 
    4300 #if VMA_USE_STL_UNORDERED_MAP
    4301 
    4302 #define VmaPair std::pair
    4303 
    4304 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4305  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4306 
    4307 #else // #if VMA_USE_STL_UNORDERED_MAP
    4308 
    4309 template<typename T1, typename T2>
    4310 struct VmaPair
    4311 {
    4312  T1 first;
    4313  T2 second;
    4314 
    4315  VmaPair() : first(), second() { }
    4316  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4317 };
    4318 
    4319 /* Class compatible with subset of interface of std::unordered_map.
    4320 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4321 */
    4322 template<typename KeyT, typename ValueT>
    4323 class VmaMap
    4324 {
    4325 public:
    4326  typedef VmaPair<KeyT, ValueT> PairType;
    4327  typedef PairType* iterator;
    4328 
    4329  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4330 
    4331  iterator begin() { return m_Vector.begin(); }
    4332  iterator end() { return m_Vector.end(); }
    4333 
    4334  void insert(const PairType& pair);
    4335  iterator find(const KeyT& key);
    4336  void erase(iterator it);
    4337 
    4338 private:
    4339  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4340 };
    4341 
    4342 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4343 
    4344 template<typename FirstT, typename SecondT>
    4345 struct VmaPairFirstLess
    4346 {
    4347  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4348  {
    4349  return lhs.first < rhs.first;
    4350  }
    4351  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4352  {
    4353  return lhs.first < rhsFirst;
    4354  }
    4355 };
    4356 
    4357 template<typename KeyT, typename ValueT>
    4358 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4359 {
    4360  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4361  m_Vector.data(),
    4362  m_Vector.data() + m_Vector.size(),
    4363  pair,
    4364  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4365  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4366 }
    4367 
    4368 template<typename KeyT, typename ValueT>
    4369 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4370 {
    4371  PairType* it = VmaBinaryFindFirstNotLess(
    4372  m_Vector.data(),
    4373  m_Vector.data() + m_Vector.size(),
    4374  key,
    4375  VmaPairFirstLess<KeyT, ValueT>());
    4376  if((it != m_Vector.end()) && (it->first == key))
    4377  {
    4378  return it;
    4379  }
    4380  else
    4381  {
    4382  return m_Vector.end();
    4383  }
    4384 }
    4385 
    4386 template<typename KeyT, typename ValueT>
    4387 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4388 {
    4389  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4390 }
    4391 
    4392 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4393 
    4394 #endif // #if 0
    4395 
    4397 
    4398 class VmaDeviceMemoryBlock;
    4399 
    4400 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4401 
    4402 struct VmaAllocation_T
    4403 {
    4404  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4405 private:
    4406  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4407 
    4408  enum FLAGS
    4409  {
    4410  FLAG_USER_DATA_STRING = 0x01,
    4411  };
    4412 
    4413 public:
    4414  enum ALLOCATION_TYPE
    4415  {
    4416  ALLOCATION_TYPE_NONE,
    4417  ALLOCATION_TYPE_BLOCK,
    4418  ALLOCATION_TYPE_DEDICATED,
    4419  };
    4420 
    4421  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4422  m_Alignment(1),
    4423  m_Size(0),
    4424  m_pUserData(VMA_NULL),
    4425  m_LastUseFrameIndex(currentFrameIndex),
    4426  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4427  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4428  m_MapCount(0),
    4429  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4430  {
    4431 #if VMA_STATS_STRING_ENABLED
    4432  m_CreationFrameIndex = currentFrameIndex;
    4433  m_BufferImageUsage = 0;
    4434 #endif
    4435  }
    4436 
    4437  ~VmaAllocation_T()
    4438  {
    4439  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4440 
    4441  // Check if owned string was freed.
    4442  VMA_ASSERT(m_pUserData == VMA_NULL);
    4443  }
    4444 
    4445  void InitBlockAllocation(
    4446  VmaPool hPool,
    4447  VmaDeviceMemoryBlock* block,
    4448  VkDeviceSize offset,
    4449  VkDeviceSize alignment,
    4450  VkDeviceSize size,
    4451  VmaSuballocationType suballocationType,
    4452  bool mapped,
    4453  bool canBecomeLost)
    4454  {
    4455  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4456  VMA_ASSERT(block != VMA_NULL);
    4457  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4458  m_Alignment = alignment;
    4459  m_Size = size;
    4460  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4461  m_SuballocationType = (uint8_t)suballocationType;
    4462  m_BlockAllocation.m_hPool = hPool;
    4463  m_BlockAllocation.m_Block = block;
    4464  m_BlockAllocation.m_Offset = offset;
    4465  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4466  }
    4467 
    4468  void InitLost()
    4469  {
    4470  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4471  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4472  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4473  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4474  m_BlockAllocation.m_Block = VMA_NULL;
    4475  m_BlockAllocation.m_Offset = 0;
    4476  m_BlockAllocation.m_CanBecomeLost = true;
    4477  }
    4478 
    4479  void ChangeBlockAllocation(
    4480  VmaAllocator hAllocator,
    4481  VmaDeviceMemoryBlock* block,
    4482  VkDeviceSize offset);
    4483 
    4484  // pMappedData not null means allocation is created with MAPPED flag.
    4485  void InitDedicatedAllocation(
    4486  uint32_t memoryTypeIndex,
    4487  VkDeviceMemory hMemory,
    4488  VmaSuballocationType suballocationType,
    4489  void* pMappedData,
    4490  VkDeviceSize size)
    4491  {
    4492  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4493  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4494  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4495  m_Alignment = 0;
    4496  m_Size = size;
    4497  m_SuballocationType = (uint8_t)suballocationType;
    4498  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4499  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4500  m_DedicatedAllocation.m_hMemory = hMemory;
    4501  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4502  }
    4503 
    4504  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4505  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4506  VkDeviceSize GetSize() const { return m_Size; }
    4507  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4508  void* GetUserData() const { return m_pUserData; }
    4509  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4510  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4511 
    4512  VmaDeviceMemoryBlock* GetBlock() const
    4513  {
    4514  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4515  return m_BlockAllocation.m_Block;
    4516  }
    4517  VkDeviceSize GetOffset() const;
    4518  VkDeviceMemory GetMemory() const;
    4519  uint32_t GetMemoryTypeIndex() const;
    4520  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4521  void* GetMappedData() const;
    4522  bool CanBecomeLost() const;
    4523  VmaPool GetPool() const;
    4524 
    4525  uint32_t GetLastUseFrameIndex() const
    4526  {
    4527  return m_LastUseFrameIndex.load();
    4528  }
    4529  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4530  {
    4531  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4532  }
    4533  /*
    4534  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4535  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4536  - Else, returns false.
    4537 
    4538  If hAllocation is already lost, assert - you should not call it then.
    4539  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4540  */
    4541  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4542 
    4543  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4544  {
    4545  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4546  outInfo.blockCount = 1;
    4547  outInfo.allocationCount = 1;
    4548  outInfo.unusedRangeCount = 0;
    4549  outInfo.usedBytes = m_Size;
    4550  outInfo.unusedBytes = 0;
    4551  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4552  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4553  outInfo.unusedRangeSizeMax = 0;
    4554  }
    4555 
    4556  void BlockAllocMap();
    4557  void BlockAllocUnmap();
    4558  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4559  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4560 
    4561 #if VMA_STATS_STRING_ENABLED
    4562  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4563  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4564 
    4565  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4566  {
    4567  VMA_ASSERT(m_BufferImageUsage == 0);
    4568  m_BufferImageUsage = bufferImageUsage;
    4569  }
    4570 
    4571  void PrintParameters(class VmaJsonWriter& json) const;
    4572 #endif
    4573 
    4574 private:
    4575  VkDeviceSize m_Alignment;
    4576  VkDeviceSize m_Size;
    4577  void* m_pUserData;
    4578  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4579  uint8_t m_Type; // ALLOCATION_TYPE
    4580  uint8_t m_SuballocationType; // VmaSuballocationType
    4581  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4582  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4583  uint8_t m_MapCount;
    4584  uint8_t m_Flags; // enum FLAGS
    4585 
    4586  // Allocation out of VmaDeviceMemoryBlock.
    4587  struct BlockAllocation
    4588  {
    4589  VmaPool m_hPool; // Null if belongs to general memory.
    4590  VmaDeviceMemoryBlock* m_Block;
    4591  VkDeviceSize m_Offset;
    4592  bool m_CanBecomeLost;
    4593  };
    4594 
    4595  // Allocation for an object that has its own private VkDeviceMemory.
    4596  struct DedicatedAllocation
    4597  {
    4598  uint32_t m_MemoryTypeIndex;
    4599  VkDeviceMemory m_hMemory;
    4600  void* m_pMappedData; // Not null means memory is mapped.
    4601  };
    4602 
    4603  union
    4604  {
    4605  // Allocation out of VmaDeviceMemoryBlock.
    4606  BlockAllocation m_BlockAllocation;
    4607  // Allocation for an object that has its own private VkDeviceMemory.
    4608  DedicatedAllocation m_DedicatedAllocation;
    4609  };
    4610 
    4611 #if VMA_STATS_STRING_ENABLED
    4612  uint32_t m_CreationFrameIndex;
    4613  uint32_t m_BufferImageUsage; // 0 if unknown.
    4614 #endif
    4615 
    4616  void FreeUserDataString(VmaAllocator hAllocator);
    4617 };
    4618 
    4619 /*
    4620 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4621 allocated memory block or free.
    4622 */
    4623 struct VmaSuballocation
    4624 {
    4625  VkDeviceSize offset;
    4626  VkDeviceSize size;
    4627  VmaAllocation hAllocation;
    4628  VmaSuballocationType type;
    4629 };
    4630 
    4631 // Comparator for offsets.
    4632 struct VmaSuballocationOffsetLess
    4633 {
    4634  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4635  {
    4636  return lhs.offset < rhs.offset;
    4637  }
    4638 };
    4639 struct VmaSuballocationOffsetGreater
    4640 {
    4641  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4642  {
    4643  return lhs.offset > rhs.offset;
    4644  }
    4645 };
    4646 
    4647 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4648 
    4649 // Cost of one additional allocation lost, as equivalent in bytes.
    4650 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4651 
    4652 /*
    4653 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4654 
    4655 If canMakeOtherLost was false:
    4656 - item points to a FREE suballocation.
    4657 - itemsToMakeLostCount is 0.
    4658 
    4659 If canMakeOtherLost was true:
    4660 - item points to first of sequence of suballocations, which are either FREE,
    4661  or point to VmaAllocations that can become lost.
    4662 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4663  the requested allocation to succeed.
    4664 */
    4665 struct VmaAllocationRequest
    4666 {
    4667  VkDeviceSize offset;
    4668  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4669  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4670  VmaSuballocationList::iterator item;
    4671  size_t itemsToMakeLostCount;
    4672  void* customData;
    4673 
    4674  VkDeviceSize CalcCost() const
    4675  {
    4676  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4677  }
    4678 };
    4679 
    4680 /*
    4681 Data structure used for bookkeeping of allocations and unused ranges of memory
    4682 in a single VkDeviceMemory block.
    4683 */
    4684 class VmaBlockMetadata
    4685 {
    4686 public:
    4687  VmaBlockMetadata(VmaAllocator hAllocator);
    4688  virtual ~VmaBlockMetadata() { }
    4689  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4690 
    4691  // Validates all data structures inside this object. If not valid, returns false.
    4692  virtual bool Validate() const = 0;
    4693  VkDeviceSize GetSize() const { return m_Size; }
    4694  virtual size_t GetAllocationCount() const = 0;
    4695  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4696  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4697  // Returns true if this block is empty - contains only single free suballocation.
    4698  virtual bool IsEmpty() const = 0;
    4699 
    4700  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4701  // Shouldn't modify blockCount.
    4702  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4703 
    4704 #if VMA_STATS_STRING_ENABLED
    4705  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4706 #endif
    4707 
    4708  // Tries to find a place for suballocation with given parameters inside this block.
    4709  // If succeeded, fills pAllocationRequest and returns true.
    4710  // If failed, returns false.
    4711  virtual bool CreateAllocationRequest(
    4712  uint32_t currentFrameIndex,
    4713  uint32_t frameInUseCount,
    4714  VkDeviceSize bufferImageGranularity,
    4715  VkDeviceSize allocSize,
    4716  VkDeviceSize allocAlignment,
    4717  bool upperAddress,
    4718  VmaSuballocationType allocType,
    4719  bool canMakeOtherLost,
    4720  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4721  VmaAllocationRequest* pAllocationRequest) = 0;
    4722 
    4723  virtual bool MakeRequestedAllocationsLost(
    4724  uint32_t currentFrameIndex,
    4725  uint32_t frameInUseCount,
    4726  VmaAllocationRequest* pAllocationRequest) = 0;
    4727 
    4728  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4729 
    4730  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4731 
    4732  // Makes actual allocation based on request. Request must already be checked and valid.
    4733  virtual void Alloc(
    4734  const VmaAllocationRequest& request,
    4735  VmaSuballocationType type,
    4736  VkDeviceSize allocSize,
    4737  bool upperAddress,
    4738  VmaAllocation hAllocation) = 0;
    4739 
    4740  // Frees suballocation assigned to given memory region.
    4741  virtual void Free(const VmaAllocation allocation) = 0;
    4742  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4743 
    4744 protected:
    4745  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4746 
    4747 #if VMA_STATS_STRING_ENABLED
    4748  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4749  VkDeviceSize unusedBytes,
    4750  size_t allocationCount,
    4751  size_t unusedRangeCount) const;
    4752  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4753  VkDeviceSize offset,
    4754  VmaAllocation hAllocation) const;
    4755  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4756  VkDeviceSize offset,
    4757  VkDeviceSize size) const;
    4758  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4759 #endif
    4760 
    4761 private:
    4762  VkDeviceSize m_Size;
    4763  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4764 };
    4765 
    4766 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4767  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4768  return false; \
    4769  } } while(false)
    4770 
    4771 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4772 {
    4773  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4774 public:
    4775  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4776  virtual ~VmaBlockMetadata_Generic();
    4777  virtual void Init(VkDeviceSize size);
    4778 
    4779  virtual bool Validate() const;
    4780  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4781  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4782  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4783  virtual bool IsEmpty() const;
    4784 
    4785  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4786  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4787 
    4788 #if VMA_STATS_STRING_ENABLED
    4789  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4790 #endif
    4791 
    4792  virtual bool CreateAllocationRequest(
    4793  uint32_t currentFrameIndex,
    4794  uint32_t frameInUseCount,
    4795  VkDeviceSize bufferImageGranularity,
    4796  VkDeviceSize allocSize,
    4797  VkDeviceSize allocAlignment,
    4798  bool upperAddress,
    4799  VmaSuballocationType allocType,
    4800  bool canMakeOtherLost,
    4801  uint32_t strategy,
    4802  VmaAllocationRequest* pAllocationRequest);
    4803 
    4804  virtual bool MakeRequestedAllocationsLost(
    4805  uint32_t currentFrameIndex,
    4806  uint32_t frameInUseCount,
    4807  VmaAllocationRequest* pAllocationRequest);
    4808 
    4809  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4810 
    4811  virtual VkResult CheckCorruption(const void* pBlockData);
    4812 
    4813  virtual void Alloc(
    4814  const VmaAllocationRequest& request,
    4815  VmaSuballocationType type,
    4816  VkDeviceSize allocSize,
    4817  bool upperAddress,
    4818  VmaAllocation hAllocation);
    4819 
    4820  virtual void Free(const VmaAllocation allocation);
    4821  virtual void FreeAtOffset(VkDeviceSize offset);
    4822 
    4823 private:
    4824  uint32_t m_FreeCount;
    4825  VkDeviceSize m_SumFreeSize;
    4826  VmaSuballocationList m_Suballocations;
    4827  // Suballocations that are free and have size greater than certain threshold.
    4828  // Sorted by size, ascending.
    4829  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4830 
    4831  bool ValidateFreeSuballocationList() const;
    4832 
    4833  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4834  // If yes, fills pOffset and returns true. If no, returns false.
    4835  bool CheckAllocation(
    4836  uint32_t currentFrameIndex,
    4837  uint32_t frameInUseCount,
    4838  VkDeviceSize bufferImageGranularity,
    4839  VkDeviceSize allocSize,
    4840  VkDeviceSize allocAlignment,
    4841  VmaSuballocationType allocType,
    4842  VmaSuballocationList::const_iterator suballocItem,
    4843  bool canMakeOtherLost,
    4844  VkDeviceSize* pOffset,
    4845  size_t* itemsToMakeLostCount,
    4846  VkDeviceSize* pSumFreeSize,
    4847  VkDeviceSize* pSumItemSize) const;
    4848  // Given free suballocation, it merges it with following one, which must also be free.
    4849  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4850  // Releases given suballocation, making it free.
    4851  // Merges it with adjacent free suballocations if applicable.
    4852  // Returns iterator to new free suballocation at this place.
    4853  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4854  // Given free suballocation, it inserts it into sorted list of
    4855  // m_FreeSuballocationsBySize if it's suitable.
    4856  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4857  // Given free suballocation, it removes it from sorted list of
    4858  // m_FreeSuballocationsBySize if it's suitable.
    4859  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4860 };
    4861 
    4862 /*
    4863 Allocations and their references in internal data structure look like this:
    4864 
    4865 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4866 
    4867  0 +-------+
    4868  | |
    4869  | |
    4870  | |
    4871  +-------+
    4872  | Alloc | 1st[m_1stNullItemsBeginCount]
    4873  +-------+
    4874  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4875  +-------+
    4876  | ... |
    4877  +-------+
    4878  | Alloc | 1st[1st.size() - 1]
    4879  +-------+
    4880  | |
    4881  | |
    4882  | |
    4883 GetSize() +-------+
    4884 
    4885 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4886 
    4887  0 +-------+
    4888  | Alloc | 2nd[0]
    4889  +-------+
    4890  | Alloc | 2nd[1]
    4891  +-------+
    4892  | ... |
    4893  +-------+
    4894  | Alloc | 2nd[2nd.size() - 1]
    4895  +-------+
    4896  | |
    4897  | |
    4898  | |
    4899  +-------+
    4900  | Alloc | 1st[m_1stNullItemsBeginCount]
    4901  +-------+
    4902  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4903  +-------+
    4904  | ... |
    4905  +-------+
    4906  | Alloc | 1st[1st.size() - 1]
    4907  +-------+
    4908  | |
    4909 GetSize() +-------+
    4910 
    4911 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4912 
    4913  0 +-------+
    4914  | |
    4915  | |
    4916  | |
    4917  +-------+
    4918  | Alloc | 1st[m_1stNullItemsBeginCount]
    4919  +-------+
    4920  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4921  +-------+
    4922  | ... |
    4923  +-------+
    4924  | Alloc | 1st[1st.size() - 1]
    4925  +-------+
    4926  | |
    4927  | |
    4928  | |
    4929  +-------+
    4930  | Alloc | 2nd[2nd.size() - 1]
    4931  +-------+
    4932  | ... |
    4933  +-------+
    4934  | Alloc | 2nd[1]
    4935  +-------+
    4936  | Alloc | 2nd[0]
    4937 GetSize() +-------+
    4938 
    4939 */
    4940 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4941 {
    4942  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4943 public:
    4944  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4945  virtual ~VmaBlockMetadata_Linear();
    4946  virtual void Init(VkDeviceSize size);
    4947 
    4948  virtual bool Validate() const;
    4949  virtual size_t GetAllocationCount() const;
    4950  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4951  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4952  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4953 
    4954  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4955  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4956 
    4957 #if VMA_STATS_STRING_ENABLED
    4958  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4959 #endif
    4960 
    4961  virtual bool CreateAllocationRequest(
    4962  uint32_t currentFrameIndex,
    4963  uint32_t frameInUseCount,
    4964  VkDeviceSize bufferImageGranularity,
    4965  VkDeviceSize allocSize,
    4966  VkDeviceSize allocAlignment,
    4967  bool upperAddress,
    4968  VmaSuballocationType allocType,
    4969  bool canMakeOtherLost,
    4970  uint32_t strategy,
    4971  VmaAllocationRequest* pAllocationRequest);
    4972 
    4973  virtual bool MakeRequestedAllocationsLost(
    4974  uint32_t currentFrameIndex,
    4975  uint32_t frameInUseCount,
    4976  VmaAllocationRequest* pAllocationRequest);
    4977 
    4978  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4979 
    4980  virtual VkResult CheckCorruption(const void* pBlockData);
    4981 
    4982  virtual void Alloc(
    4983  const VmaAllocationRequest& request,
    4984  VmaSuballocationType type,
    4985  VkDeviceSize allocSize,
    4986  bool upperAddress,
    4987  VmaAllocation hAllocation);
    4988 
    4989  virtual void Free(const VmaAllocation allocation);
    4990  virtual void FreeAtOffset(VkDeviceSize offset);
    4991 
    4992 private:
    4993  /*
    4994  There are two suballocation vectors, used in ping-pong way.
    4995  The one with index m_1stVectorIndex is called 1st.
    4996  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    4997  2nd can be non-empty only when 1st is not empty.
    4998  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    4999  */
    5000  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5001 
    5002  enum SECOND_VECTOR_MODE
    5003  {
    5004  SECOND_VECTOR_EMPTY,
    5005  /*
    5006  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5007  all have smaller offset.
    5008  */
    5009  SECOND_VECTOR_RING_BUFFER,
    5010  /*
    5011  Suballocations in 2nd vector are upper side of double stack.
    5012  They all have offsets higher than those in 1st vector.
    5013  Top of this stack means smaller offsets, but higher indices in this vector.
    5014  */
    5015  SECOND_VECTOR_DOUBLE_STACK,
    5016  };
    5017 
    5018  VkDeviceSize m_SumFreeSize;
    5019  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5020  uint32_t m_1stVectorIndex;
    5021  SECOND_VECTOR_MODE m_2ndVectorMode;
    5022 
    5023  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5024  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5025  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5026  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5027 
    5028  // Number of items in 1st vector with hAllocation = null at the beginning.
    5029  size_t m_1stNullItemsBeginCount;
    5030  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5031  size_t m_1stNullItemsMiddleCount;
    5032  // Number of items in 2nd vector with hAllocation = null.
    5033  size_t m_2ndNullItemsCount;
    5034 
    5035  bool ShouldCompact1st() const;
    5036  void CleanupAfterFree();
    5037 };
    5038 
    5039 /*
    5040 - GetSize() is the original size of allocated memory block.
    5041 - m_UsableSize is this size aligned down to a power of two.
    5042  All allocations and calculations happen relative to m_UsableSize.
    5043 - GetUnusableSize() is the difference between them.
    5044  It is repoted as separate, unused range, not available for allocations.
    5045 
    5046 Node at level 0 has size = m_UsableSize.
    5047 Each next level contains nodes with size 2 times smaller than current level.
    5048 m_LevelCount is the maximum number of levels to use in the current object.
    5049 */
    5050 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5051 {
    5052  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5053 public:
    5054  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5055  virtual ~VmaBlockMetadata_Buddy();
    5056  virtual void Init(VkDeviceSize size);
    5057 
    5058  virtual bool Validate() const;
    5059  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5060  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5061  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5062  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5063 
    5064  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5065  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5066 
    5067 #if VMA_STATS_STRING_ENABLED
    5068  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5069 #endif
    5070 
    5071  virtual bool CreateAllocationRequest(
    5072  uint32_t currentFrameIndex,
    5073  uint32_t frameInUseCount,
    5074  VkDeviceSize bufferImageGranularity,
    5075  VkDeviceSize allocSize,
    5076  VkDeviceSize allocAlignment,
    5077  bool upperAddress,
    5078  VmaSuballocationType allocType,
    5079  bool canMakeOtherLost,
    5080  uint32_t strategy,
    5081  VmaAllocationRequest* pAllocationRequest);
    5082 
    5083  virtual bool MakeRequestedAllocationsLost(
    5084  uint32_t currentFrameIndex,
    5085  uint32_t frameInUseCount,
    5086  VmaAllocationRequest* pAllocationRequest);
    5087 
    5088  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5089 
    5090  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5091 
    5092  virtual void Alloc(
    5093  const VmaAllocationRequest& request,
    5094  VmaSuballocationType type,
    5095  VkDeviceSize allocSize,
    5096  bool upperAddress,
    5097  VmaAllocation hAllocation);
    5098 
    5099  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5100  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5101 
    5102 private:
    5103  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5104  static const size_t MAX_LEVELS = 30;
    5105 
    5106  struct ValidationContext
    5107  {
    5108  size_t calculatedAllocationCount;
    5109  size_t calculatedFreeCount;
    5110  VkDeviceSize calculatedSumFreeSize;
    5111 
    5112  ValidationContext() :
    5113  calculatedAllocationCount(0),
    5114  calculatedFreeCount(0),
    5115  calculatedSumFreeSize(0) { }
    5116  };
    5117 
    5118  struct Node
    5119  {
    5120  VkDeviceSize offset;
    5121  enum TYPE
    5122  {
    5123  TYPE_FREE,
    5124  TYPE_ALLOCATION,
    5125  TYPE_SPLIT,
    5126  TYPE_COUNT
    5127  } type;
    5128  Node* parent;
    5129  Node* buddy;
    5130 
    5131  union
    5132  {
    5133  struct
    5134  {
    5135  Node* prev;
    5136  Node* next;
    5137  } free;
    5138  struct
    5139  {
    5140  VmaAllocation alloc;
    5141  } allocation;
    5142  struct
    5143  {
    5144  Node* leftChild;
    5145  } split;
    5146  };
    5147  };
    5148 
    5149  // Size of the memory block aligned down to a power of two.
    5150  VkDeviceSize m_UsableSize;
    5151  uint32_t m_LevelCount;
    5152 
    5153  Node* m_Root;
    5154  struct {
    5155  Node* front;
    5156  Node* back;
    5157  } m_FreeList[MAX_LEVELS];
    5158  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5159  size_t m_AllocationCount;
    5160  // Number of nodes in the tree with type == TYPE_FREE.
    5161  size_t m_FreeCount;
    5162  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5163  VkDeviceSize m_SumFreeSize;
    5164 
    5165  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5166  void DeleteNode(Node* node);
    5167  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5168  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5169  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5170  // Alloc passed just for validation. Can be null.
    5171  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5172  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5173  // Adds node to the front of FreeList at given level.
    5174  // node->type must be FREE.
    5175  // node->free.prev, next can be undefined.
    5176  void AddToFreeListFront(uint32_t level, Node* node);
    5177  // Removes node from FreeList at given level.
    5178  // node->type must be FREE.
    5179  // node->free.prev, next stay untouched.
    5180  void RemoveFromFreeList(uint32_t level, Node* node);
    5181 
    5182 #if VMA_STATS_STRING_ENABLED
    5183  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5184 #endif
    5185 };
    5186 
    5187 /*
    5188 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5189 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5190 
    5191 Thread-safety: This class must be externally synchronized.
    5192 */
    5193 class VmaDeviceMemoryBlock
    5194 {
    5195  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5196 public:
    5197  VmaBlockMetadata* m_pMetadata;
    5198 
    5199  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5200 
    5201  ~VmaDeviceMemoryBlock()
    5202  {
    5203  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5204  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5205  }
    5206 
    5207  // Always call after construction.
    5208  void Init(
    5209  VmaAllocator hAllocator,
    5210  uint32_t newMemoryTypeIndex,
    5211  VkDeviceMemory newMemory,
    5212  VkDeviceSize newSize,
    5213  uint32_t id,
    5214  uint32_t algorithm);
    5215  // Always call before destruction.
    5216  void Destroy(VmaAllocator allocator);
    5217 
    5218  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5219  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5220  uint32_t GetId() const { return m_Id; }
    5221  void* GetMappedData() const { return m_pMappedData; }
    5222 
    5223  // Validates all data structures inside this object. If not valid, returns false.
    5224  bool Validate() const;
    5225 
    5226  VkResult CheckCorruption(VmaAllocator hAllocator);
    5227 
    5228  // ppData can be null.
    5229  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5230  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5231 
    5232  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5233  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5234 
    5235  VkResult BindBufferMemory(
    5236  const VmaAllocator hAllocator,
    5237  const VmaAllocation hAllocation,
    5238  VkBuffer hBuffer);
    5239  VkResult BindImageMemory(
    5240  const VmaAllocator hAllocator,
    5241  const VmaAllocation hAllocation,
    5242  VkImage hImage);
    5243 
    5244 private:
    5245  uint32_t m_MemoryTypeIndex;
    5246  uint32_t m_Id;
    5247  VkDeviceMemory m_hMemory;
    5248 
    5249  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5250  // Also protects m_MapCount, m_pMappedData.
    5251  VMA_MUTEX m_Mutex;
    5252  uint32_t m_MapCount;
    5253  void* m_pMappedData;
    5254 };
    5255 
    5256 struct VmaPointerLess
    5257 {
    5258  bool operator()(const void* lhs, const void* rhs) const
    5259  {
    5260  return lhs < rhs;
    5261  }
    5262 };
    5263 
    5264 class VmaDefragmentator;
    5265 
    5266 /*
    5267 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5268 Vulkan memory type.
    5269 
    5270 Synchronized internally with a mutex.
    5271 */
    5272 struct VmaBlockVector
    5273 {
    5274  VMA_CLASS_NO_COPY(VmaBlockVector)
    5275 public:
    5276  VmaBlockVector(
    5277  VmaAllocator hAllocator,
    5278  uint32_t memoryTypeIndex,
    5279  VkDeviceSize preferredBlockSize,
    5280  size_t minBlockCount,
    5281  size_t maxBlockCount,
    5282  VkDeviceSize bufferImageGranularity,
    5283  uint32_t frameInUseCount,
    5284  bool isCustomPool,
    5285  bool explicitBlockSize,
    5286  uint32_t algorithm);
    5287  ~VmaBlockVector();
    5288 
    5289  VkResult CreateMinBlocks();
    5290 
    5291  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5292  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5293  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5294  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5295  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5296 
    5297  void GetPoolStats(VmaPoolStats* pStats);
    5298 
    5299  bool IsEmpty() const { return m_Blocks.empty(); }
    5300  bool IsCorruptionDetectionEnabled() const;
    5301 
    5302  VkResult Allocate(
    5303  VmaPool hCurrentPool,
    5304  uint32_t currentFrameIndex,
    5305  VkDeviceSize size,
    5306  VkDeviceSize alignment,
    5307  const VmaAllocationCreateInfo& createInfo,
    5308  VmaSuballocationType suballocType,
    5309  VmaAllocation* pAllocation);
    5310 
    5311  void Free(
    5312  VmaAllocation hAllocation);
    5313 
    5314  // Adds statistics of this BlockVector to pStats.
    5315  void AddStats(VmaStats* pStats);
    5316 
    5317 #if VMA_STATS_STRING_ENABLED
    5318  void PrintDetailedMap(class VmaJsonWriter& json);
    5319 #endif
    5320 
    5321  void MakePoolAllocationsLost(
    5322  uint32_t currentFrameIndex,
    5323  size_t* pLostAllocationCount);
    5324  VkResult CheckCorruption();
    5325 
    5326  VmaDefragmentator* EnsureDefragmentator(
    5327  VmaAllocator hAllocator,
    5328  uint32_t currentFrameIndex);
    5329 
    5330  VkResult Defragment(
    5331  VmaDefragmentationStats* pDefragmentationStats,
    5332  VkDeviceSize& maxBytesToMove,
    5333  uint32_t& maxAllocationsToMove);
    5334 
    5335  void DestroyDefragmentator();
    5336 
    5337 private:
    5338  friend class VmaDefragmentator;
    5339 
    5340  const VmaAllocator m_hAllocator;
    5341  const uint32_t m_MemoryTypeIndex;
    5342  const VkDeviceSize m_PreferredBlockSize;
    5343  const size_t m_MinBlockCount;
    5344  const size_t m_MaxBlockCount;
    5345  const VkDeviceSize m_BufferImageGranularity;
    5346  const uint32_t m_FrameInUseCount;
    5347  const bool m_IsCustomPool;
    5348  const bool m_ExplicitBlockSize;
    5349  const uint32_t m_Algorithm;
    5350  bool m_HasEmptyBlock;
    5351  VMA_MUTEX m_Mutex;
    5352  // Incrementally sorted by sumFreeSize, ascending.
    5353  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5354  /* There can be at most one allocation that is completely empty - a
    5355  hysteresis to avoid pessimistic case of alternating creation and destruction
    5356  of a VkDeviceMemory. */
    5357  VmaDefragmentator* m_pDefragmentator;
    5358  uint32_t m_NextBlockId;
    5359 
    5360  VkDeviceSize CalcMaxBlockSize() const;
    5361 
    5362  // Finds and removes given block from vector.
    5363  void Remove(VmaDeviceMemoryBlock* pBlock);
    5364 
    5365  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5366  // after this call.
    5367  void IncrementallySortBlocks();
    5368 
    5369  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5370  VkResult AllocateFromBlock(
    5371  VmaDeviceMemoryBlock* pBlock,
    5372  VmaPool hCurrentPool,
    5373  uint32_t currentFrameIndex,
    5374  VkDeviceSize size,
    5375  VkDeviceSize alignment,
    5376  VmaAllocationCreateFlags allocFlags,
    5377  void* pUserData,
    5378  VmaSuballocationType suballocType,
    5379  uint32_t strategy,
    5380  VmaAllocation* pAllocation);
    5381 
    5382  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5383 };
    5384 
    5385 struct VmaPool_T
    5386 {
    5387  VMA_CLASS_NO_COPY(VmaPool_T)
    5388 public:
    5389  VmaBlockVector m_BlockVector;
    5390 
    5391  VmaPool_T(
    5392  VmaAllocator hAllocator,
    5393  const VmaPoolCreateInfo& createInfo,
    5394  VkDeviceSize preferredBlockSize);
    5395  ~VmaPool_T();
    5396 
    5397  uint32_t GetId() const { return m_Id; }
    5398  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5399 
    5400 #if VMA_STATS_STRING_ENABLED
    5401  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5402 #endif
    5403 
    5404 private:
    5405  uint32_t m_Id;
    5406 };
    5407 
    5408 class VmaDefragmentator
    5409 {
    5410  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5411 private:
    5412  const VmaAllocator m_hAllocator;
    5413  VmaBlockVector* const m_pBlockVector;
    5414  uint32_t m_CurrentFrameIndex;
    5415  VkDeviceSize m_BytesMoved;
    5416  uint32_t m_AllocationsMoved;
    5417 
    5418  struct AllocationInfo
    5419  {
    5420  VmaAllocation m_hAllocation;
    5421  VkBool32* m_pChanged;
    5422 
    5423  AllocationInfo() :
    5424  m_hAllocation(VK_NULL_HANDLE),
    5425  m_pChanged(VMA_NULL)
    5426  {
    5427  }
    5428  };
    5429 
    5430  struct AllocationInfoSizeGreater
    5431  {
    5432  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5433  {
    5434  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5435  }
    5436  };
    5437 
    5438  // Used between AddAllocation and Defragment.
    5439  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5440 
    5441  struct BlockInfo
    5442  {
    5443  VmaDeviceMemoryBlock* m_pBlock;
    5444  bool m_HasNonMovableAllocations;
    5445  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5446 
    5447  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5448  m_pBlock(VMA_NULL),
    5449  m_HasNonMovableAllocations(true),
    5450  m_Allocations(pAllocationCallbacks),
    5451  m_pMappedDataForDefragmentation(VMA_NULL)
    5452  {
    5453  }
    5454 
    5455  void CalcHasNonMovableAllocations()
    5456  {
    5457  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5458  const size_t defragmentAllocCount = m_Allocations.size();
    5459  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5460  }
    5461 
    5462  void SortAllocationsBySizeDescecnding()
    5463  {
    5464  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5465  }
    5466 
    5467  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5468  void Unmap(VmaAllocator hAllocator);
    5469 
    5470  private:
    5471  // Not null if mapped for defragmentation only, not originally mapped.
    5472  void* m_pMappedDataForDefragmentation;
    5473  };
    5474 
    5475  struct BlockPointerLess
    5476  {
    5477  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5478  {
    5479  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5480  }
    5481  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5482  {
    5483  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5484  }
    5485  };
    5486 
    5487  // 1. Blocks with some non-movable allocations go first.
    5488  // 2. Blocks with smaller sumFreeSize go first.
    5489  struct BlockInfoCompareMoveDestination
    5490  {
    5491  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5492  {
    5493  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5494  {
    5495  return true;
    5496  }
    5497  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5498  {
    5499  return false;
    5500  }
    5501  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5502  {
    5503  return true;
    5504  }
    5505  return false;
    5506  }
    5507  };
    5508 
    5509  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5510  BlockInfoVector m_Blocks;
    5511 
    5512  VkResult DefragmentRound(
    5513  VkDeviceSize maxBytesToMove,
    5514  uint32_t maxAllocationsToMove);
    5515 
    5516  static bool MoveMakesSense(
    5517  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5518  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5519 
    5520 public:
    5521  VmaDefragmentator(
    5522  VmaAllocator hAllocator,
    5523  VmaBlockVector* pBlockVector,
    5524  uint32_t currentFrameIndex);
    5525 
    5526  ~VmaDefragmentator();
    5527 
    5528  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5529  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5530 
    5531  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5532 
    5533  VkResult Defragment(
    5534  VkDeviceSize maxBytesToMove,
    5535  uint32_t maxAllocationsToMove);
    5536 };
    5537 
    5538 #if VMA_RECORDING_ENABLED
    5539 
    5540 class VmaRecorder
    5541 {
    5542 public:
    5543  VmaRecorder();
    5544  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5545  void WriteConfiguration(
    5546  const VkPhysicalDeviceProperties& devProps,
    5547  const VkPhysicalDeviceMemoryProperties& memProps,
    5548  bool dedicatedAllocationExtensionEnabled);
    5549  ~VmaRecorder();
    5550 
    5551  void RecordCreateAllocator(uint32_t frameIndex);
    5552  void RecordDestroyAllocator(uint32_t frameIndex);
    5553  void RecordCreatePool(uint32_t frameIndex,
    5554  const VmaPoolCreateInfo& createInfo,
    5555  VmaPool pool);
    5556  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5557  void RecordAllocateMemory(uint32_t frameIndex,
    5558  const VkMemoryRequirements& vkMemReq,
    5559  const VmaAllocationCreateInfo& createInfo,
    5560  VmaAllocation allocation);
    5561  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5562  const VkMemoryRequirements& vkMemReq,
    5563  bool requiresDedicatedAllocation,
    5564  bool prefersDedicatedAllocation,
    5565  const VmaAllocationCreateInfo& createInfo,
    5566  VmaAllocation allocation);
    5567  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5568  const VkMemoryRequirements& vkMemReq,
    5569  bool requiresDedicatedAllocation,
    5570  bool prefersDedicatedAllocation,
    5571  const VmaAllocationCreateInfo& createInfo,
    5572  VmaAllocation allocation);
    5573  void RecordFreeMemory(uint32_t frameIndex,
    5574  VmaAllocation allocation);
    5575  void RecordSetAllocationUserData(uint32_t frameIndex,
    5576  VmaAllocation allocation,
    5577  const void* pUserData);
    5578  void RecordCreateLostAllocation(uint32_t frameIndex,
    5579  VmaAllocation allocation);
    5580  void RecordMapMemory(uint32_t frameIndex,
    5581  VmaAllocation allocation);
    5582  void RecordUnmapMemory(uint32_t frameIndex,
    5583  VmaAllocation allocation);
    5584  void RecordFlushAllocation(uint32_t frameIndex,
    5585  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5586  void RecordInvalidateAllocation(uint32_t frameIndex,
    5587  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5588  void RecordCreateBuffer(uint32_t frameIndex,
    5589  const VkBufferCreateInfo& bufCreateInfo,
    5590  const VmaAllocationCreateInfo& allocCreateInfo,
    5591  VmaAllocation allocation);
    5592  void RecordCreateImage(uint32_t frameIndex,
    5593  const VkImageCreateInfo& imageCreateInfo,
    5594  const VmaAllocationCreateInfo& allocCreateInfo,
    5595  VmaAllocation allocation);
    5596  void RecordDestroyBuffer(uint32_t frameIndex,
    5597  VmaAllocation allocation);
    5598  void RecordDestroyImage(uint32_t frameIndex,
    5599  VmaAllocation allocation);
    5600  void RecordTouchAllocation(uint32_t frameIndex,
    5601  VmaAllocation allocation);
    5602  void RecordGetAllocationInfo(uint32_t frameIndex,
    5603  VmaAllocation allocation);
    5604  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5605  VmaPool pool);
    5606 
    5607 private:
    5608  struct CallParams
    5609  {
    5610  uint32_t threadId;
    5611  double time;
    5612  };
    5613 
    5614  class UserDataString
    5615  {
    5616  public:
    5617  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5618  const char* GetString() const { return m_Str; }
    5619 
    5620  private:
    5621  char m_PtrStr[17];
    5622  const char* m_Str;
    5623  };
    5624 
    5625  bool m_UseMutex;
    5626  VmaRecordFlags m_Flags;
    5627  FILE* m_File;
    5628  VMA_MUTEX m_FileMutex;
    5629  int64_t m_Freq;
    5630  int64_t m_StartCounter;
    5631 
    5632  void GetBasicParams(CallParams& outParams);
    5633  void Flush();
    5634 };
    5635 
    5636 #endif // #if VMA_RECORDING_ENABLED
    5637 
    5638 // Main allocator object.
    5639 struct VmaAllocator_T
    5640 {
    5641  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5642 public:
    5643  bool m_UseMutex;
    5644  bool m_UseKhrDedicatedAllocation;
    5645  VkDevice m_hDevice;
    5646  bool m_AllocationCallbacksSpecified;
    5647  VkAllocationCallbacks m_AllocationCallbacks;
    5648  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5649 
    5650  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5651  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5652  VMA_MUTEX m_HeapSizeLimitMutex;
    5653 
    5654  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5655  VkPhysicalDeviceMemoryProperties m_MemProps;
    5656 
    5657  // Default pools.
    5658  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5659 
    5660  // Each vector is sorted by memory (handle value).
    5661  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5662  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5663  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5664 
    5665  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5666  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5667  ~VmaAllocator_T();
    5668 
    5669  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5670  {
    5671  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5672  }
    5673  const VmaVulkanFunctions& GetVulkanFunctions() const
    5674  {
    5675  return m_VulkanFunctions;
    5676  }
    5677 
    5678  VkDeviceSize GetBufferImageGranularity() const
    5679  {
    5680  return VMA_MAX(
    5681  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5682  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5683  }
    5684 
    5685  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5686  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5687 
    5688  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5689  {
    5690  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5691  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5692  }
    5693  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5694  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5695  {
    5696  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5697  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5698  }
    5699  // Minimum alignment for all allocations in specific memory type.
    5700  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5701  {
    5702  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5703  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5704  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5705  }
    5706 
    5707  bool IsIntegratedGpu() const
    5708  {
    5709  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5710  }
    5711 
    5712 #if VMA_RECORDING_ENABLED
    5713  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5714 #endif
    5715 
    5716  void GetBufferMemoryRequirements(
    5717  VkBuffer hBuffer,
    5718  VkMemoryRequirements& memReq,
    5719  bool& requiresDedicatedAllocation,
    5720  bool& prefersDedicatedAllocation) const;
    5721  void GetImageMemoryRequirements(
    5722  VkImage hImage,
    5723  VkMemoryRequirements& memReq,
    5724  bool& requiresDedicatedAllocation,
    5725  bool& prefersDedicatedAllocation) const;
    5726 
    5727  // Main allocation function.
    5728  VkResult AllocateMemory(
    5729  const VkMemoryRequirements& vkMemReq,
    5730  bool requiresDedicatedAllocation,
    5731  bool prefersDedicatedAllocation,
    5732  VkBuffer dedicatedBuffer,
    5733  VkImage dedicatedImage,
    5734  const VmaAllocationCreateInfo& createInfo,
    5735  VmaSuballocationType suballocType,
    5736  VmaAllocation* pAllocation);
    5737 
    5738  // Main deallocation function.
    5739  void FreeMemory(const VmaAllocation allocation);
    5740 
    5741  void CalculateStats(VmaStats* pStats);
    5742 
    5743 #if VMA_STATS_STRING_ENABLED
    5744  void PrintDetailedMap(class VmaJsonWriter& json);
    5745 #endif
    5746 
    5747  VkResult Defragment(
    5748  VmaAllocation* pAllocations,
    5749  size_t allocationCount,
    5750  VkBool32* pAllocationsChanged,
    5751  const VmaDefragmentationInfo* pDefragmentationInfo,
    5752  VmaDefragmentationStats* pDefragmentationStats);
    5753 
    5754  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5755  bool TouchAllocation(VmaAllocation hAllocation);
    5756 
    5757  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5758  void DestroyPool(VmaPool pool);
    5759  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5760 
    5761  void SetCurrentFrameIndex(uint32_t frameIndex);
    5762  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5763 
    5764  void MakePoolAllocationsLost(
    5765  VmaPool hPool,
    5766  size_t* pLostAllocationCount);
    5767  VkResult CheckPoolCorruption(VmaPool hPool);
    5768  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5769 
    5770  void CreateLostAllocation(VmaAllocation* pAllocation);
    5771 
    5772  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5773  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5774 
    5775  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5776  void Unmap(VmaAllocation hAllocation);
    5777 
    5778  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5779  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5780 
    5781  void FlushOrInvalidateAllocation(
    5782  VmaAllocation hAllocation,
    5783  VkDeviceSize offset, VkDeviceSize size,
    5784  VMA_CACHE_OPERATION op);
    5785 
    5786  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5787 
    5788 private:
    5789  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5790 
    5791  VkPhysicalDevice m_PhysicalDevice;
    5792  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5793 
    5794  VMA_MUTEX m_PoolsMutex;
    5795  // Protected by m_PoolsMutex. Sorted by pointer value.
    5796  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5797  uint32_t m_NextPoolId;
    5798 
    5799  VmaVulkanFunctions m_VulkanFunctions;
    5800 
    5801 #if VMA_RECORDING_ENABLED
    5802  VmaRecorder* m_pRecorder;
    5803 #endif
    5804 
    5805  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5806 
    5807  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5808 
    5809  VkResult AllocateMemoryOfType(
    5810  VkDeviceSize size,
    5811  VkDeviceSize alignment,
    5812  bool dedicatedAllocation,
    5813  VkBuffer dedicatedBuffer,
    5814  VkImage dedicatedImage,
    5815  const VmaAllocationCreateInfo& createInfo,
    5816  uint32_t memTypeIndex,
    5817  VmaSuballocationType suballocType,
    5818  VmaAllocation* pAllocation);
    5819 
    5820  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5821  VkResult AllocateDedicatedMemory(
    5822  VkDeviceSize size,
    5823  VmaSuballocationType suballocType,
    5824  uint32_t memTypeIndex,
    5825  bool map,
    5826  bool isUserDataString,
    5827  void* pUserData,
    5828  VkBuffer dedicatedBuffer,
    5829  VkImage dedicatedImage,
    5830  VmaAllocation* pAllocation);
    5831 
    5832  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5833  void FreeDedicatedMemory(VmaAllocation allocation);
    5834 };
    5835 
    5837 // Memory allocation #2 after VmaAllocator_T definition
    5838 
    5839 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5840 {
    5841  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5842 }
    5843 
    5844 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5845 {
    5846  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5847 }
    5848 
    5849 template<typename T>
    5850 static T* VmaAllocate(VmaAllocator hAllocator)
    5851 {
    5852  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5853 }
    5854 
    5855 template<typename T>
    5856 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5857 {
    5858  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5859 }
    5860 
    5861 template<typename T>
    5862 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5863 {
    5864  if(ptr != VMA_NULL)
    5865  {
    5866  ptr->~T();
    5867  VmaFree(hAllocator, ptr);
    5868  }
    5869 }
    5870 
    5871 template<typename T>
    5872 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5873 {
    5874  if(ptr != VMA_NULL)
    5875  {
    5876  for(size_t i = count; i--; )
    5877  ptr[i].~T();
    5878  VmaFree(hAllocator, ptr);
    5879  }
    5880 }
    5881 
    5883 // VmaStringBuilder
    5884 
    5885 #if VMA_STATS_STRING_ENABLED
    5886 
    5887 class VmaStringBuilder
    5888 {
    5889 public:
    5890  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5891  size_t GetLength() const { return m_Data.size(); }
    5892  const char* GetData() const { return m_Data.data(); }
    5893 
    5894  void Add(char ch) { m_Data.push_back(ch); }
    5895  void Add(const char* pStr);
    5896  void AddNewLine() { Add('\n'); }
    5897  void AddNumber(uint32_t num);
    5898  void AddNumber(uint64_t num);
    5899  void AddPointer(const void* ptr);
    5900 
    5901 private:
    5902  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5903 };
    5904 
    5905 void VmaStringBuilder::Add(const char* pStr)
    5906 {
    5907  const size_t strLen = strlen(pStr);
    5908  if(strLen > 0)
    5909  {
    5910  const size_t oldCount = m_Data.size();
    5911  m_Data.resize(oldCount + strLen);
    5912  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5913  }
    5914 }
    5915 
    5916 void VmaStringBuilder::AddNumber(uint32_t num)
    5917 {
    5918  char buf[11];
    5919  VmaUint32ToStr(buf, sizeof(buf), num);
    5920  Add(buf);
    5921 }
    5922 
    5923 void VmaStringBuilder::AddNumber(uint64_t num)
    5924 {
    5925  char buf[21];
    5926  VmaUint64ToStr(buf, sizeof(buf), num);
    5927  Add(buf);
    5928 }
    5929 
    5930 void VmaStringBuilder::AddPointer(const void* ptr)
    5931 {
    5932  char buf[21];
    5933  VmaPtrToStr(buf, sizeof(buf), ptr);
    5934  Add(buf);
    5935 }
    5936 
    5937 #endif // #if VMA_STATS_STRING_ENABLED
    5938 
    5940 // VmaJsonWriter
    5941 
    5942 #if VMA_STATS_STRING_ENABLED
    5943 
    5944 class VmaJsonWriter
    5945 {
    5946  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5947 public:
    5948  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5949  ~VmaJsonWriter();
    5950 
    5951  void BeginObject(bool singleLine = false);
    5952  void EndObject();
    5953 
    5954  void BeginArray(bool singleLine = false);
    5955  void EndArray();
    5956 
    5957  void WriteString(const char* pStr);
    5958  void BeginString(const char* pStr = VMA_NULL);
    5959  void ContinueString(const char* pStr);
    5960  void ContinueString(uint32_t n);
    5961  void ContinueString(uint64_t n);
    5962  void ContinueString_Pointer(const void* ptr);
    5963  void EndString(const char* pStr = VMA_NULL);
    5964 
    5965  void WriteNumber(uint32_t n);
    5966  void WriteNumber(uint64_t n);
    5967  void WriteBool(bool b);
    5968  void WriteNull();
    5969 
    5970 private:
    5971  static const char* const INDENT;
    5972 
    5973  enum COLLECTION_TYPE
    5974  {
    5975  COLLECTION_TYPE_OBJECT,
    5976  COLLECTION_TYPE_ARRAY,
    5977  };
    5978  struct StackItem
    5979  {
    5980  COLLECTION_TYPE type;
    5981  uint32_t valueCount;
    5982  bool singleLineMode;
    5983  };
    5984 
    5985  VmaStringBuilder& m_SB;
    5986  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5987  bool m_InsideString;
    5988 
    5989  void BeginValue(bool isString);
    5990  void WriteIndent(bool oneLess = false);
    5991 };
    5992 
    5993 const char* const VmaJsonWriter::INDENT = " ";
    5994 
    5995 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    5996  m_SB(sb),
    5997  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    5998  m_InsideString(false)
    5999 {
    6000 }
    6001 
    6002 VmaJsonWriter::~VmaJsonWriter()
    6003 {
    6004  VMA_ASSERT(!m_InsideString);
    6005  VMA_ASSERT(m_Stack.empty());
    6006 }
    6007 
    6008 void VmaJsonWriter::BeginObject(bool singleLine)
    6009 {
    6010  VMA_ASSERT(!m_InsideString);
    6011 
    6012  BeginValue(false);
    6013  m_SB.Add('{');
    6014 
    6015  StackItem item;
    6016  item.type = COLLECTION_TYPE_OBJECT;
    6017  item.valueCount = 0;
    6018  item.singleLineMode = singleLine;
    6019  m_Stack.push_back(item);
    6020 }
    6021 
    6022 void VmaJsonWriter::EndObject()
    6023 {
    6024  VMA_ASSERT(!m_InsideString);
    6025 
    6026  WriteIndent(true);
    6027  m_SB.Add('}');
    6028 
    6029  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6030  m_Stack.pop_back();
    6031 }
    6032 
    6033 void VmaJsonWriter::BeginArray(bool singleLine)
    6034 {
    6035  VMA_ASSERT(!m_InsideString);
    6036 
    6037  BeginValue(false);
    6038  m_SB.Add('[');
    6039 
    6040  StackItem item;
    6041  item.type = COLLECTION_TYPE_ARRAY;
    6042  item.valueCount = 0;
    6043  item.singleLineMode = singleLine;
    6044  m_Stack.push_back(item);
    6045 }
    6046 
    6047 void VmaJsonWriter::EndArray()
    6048 {
    6049  VMA_ASSERT(!m_InsideString);
    6050 
    6051  WriteIndent(true);
    6052  m_SB.Add(']');
    6053 
    6054  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6055  m_Stack.pop_back();
    6056 }
    6057 
    6058 void VmaJsonWriter::WriteString(const char* pStr)
    6059 {
    6060  BeginString(pStr);
    6061  EndString();
    6062 }
    6063 
    6064 void VmaJsonWriter::BeginString(const char* pStr)
    6065 {
    6066  VMA_ASSERT(!m_InsideString);
    6067 
    6068  BeginValue(true);
    6069  m_SB.Add('"');
    6070  m_InsideString = true;
    6071  if(pStr != VMA_NULL && pStr[0] != '\0')
    6072  {
    6073  ContinueString(pStr);
    6074  }
    6075 }
    6076 
    6077 void VmaJsonWriter::ContinueString(const char* pStr)
    6078 {
    6079  VMA_ASSERT(m_InsideString);
    6080 
    6081  const size_t strLen = strlen(pStr);
    6082  for(size_t i = 0; i < strLen; ++i)
    6083  {
    6084  char ch = pStr[i];
    6085  if(ch == '\\')
    6086  {
    6087  m_SB.Add("\\\\");
    6088  }
    6089  else if(ch == '"')
    6090  {
    6091  m_SB.Add("\\\"");
    6092  }
    6093  else if(ch >= 32)
    6094  {
    6095  m_SB.Add(ch);
    6096  }
    6097  else switch(ch)
    6098  {
    6099  case '\b':
    6100  m_SB.Add("\\b");
    6101  break;
    6102  case '\f':
    6103  m_SB.Add("\\f");
    6104  break;
    6105  case '\n':
    6106  m_SB.Add("\\n");
    6107  break;
    6108  case '\r':
    6109  m_SB.Add("\\r");
    6110  break;
    6111  case '\t':
    6112  m_SB.Add("\\t");
    6113  break;
    6114  default:
    6115  VMA_ASSERT(0 && "Character not currently supported.");
    6116  break;
    6117  }
    6118  }
    6119 }
    6120 
    6121 void VmaJsonWriter::ContinueString(uint32_t n)
    6122 {
    6123  VMA_ASSERT(m_InsideString);
    6124  m_SB.AddNumber(n);
    6125 }
    6126 
    6127 void VmaJsonWriter::ContinueString(uint64_t n)
    6128 {
    6129  VMA_ASSERT(m_InsideString);
    6130  m_SB.AddNumber(n);
    6131 }
    6132 
    6133 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6134 {
    6135  VMA_ASSERT(m_InsideString);
    6136  m_SB.AddPointer(ptr);
    6137 }
    6138 
    6139 void VmaJsonWriter::EndString(const char* pStr)
    6140 {
    6141  VMA_ASSERT(m_InsideString);
    6142  if(pStr != VMA_NULL && pStr[0] != '\0')
    6143  {
    6144  ContinueString(pStr);
    6145  }
    6146  m_SB.Add('"');
    6147  m_InsideString = false;
    6148 }
    6149 
    6150 void VmaJsonWriter::WriteNumber(uint32_t n)
    6151 {
    6152  VMA_ASSERT(!m_InsideString);
    6153  BeginValue(false);
    6154  m_SB.AddNumber(n);
    6155 }
    6156 
    6157 void VmaJsonWriter::WriteNumber(uint64_t n)
    6158 {
    6159  VMA_ASSERT(!m_InsideString);
    6160  BeginValue(false);
    6161  m_SB.AddNumber(n);
    6162 }
    6163 
    6164 void VmaJsonWriter::WriteBool(bool b)
    6165 {
    6166  VMA_ASSERT(!m_InsideString);
    6167  BeginValue(false);
    6168  m_SB.Add(b ? "true" : "false");
    6169 }
    6170 
    6171 void VmaJsonWriter::WriteNull()
    6172 {
    6173  VMA_ASSERT(!m_InsideString);
    6174  BeginValue(false);
    6175  m_SB.Add("null");
    6176 }
    6177 
    6178 void VmaJsonWriter::BeginValue(bool isString)
    6179 {
    6180  if(!m_Stack.empty())
    6181  {
    6182  StackItem& currItem = m_Stack.back();
    6183  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6184  currItem.valueCount % 2 == 0)
    6185  {
    6186  VMA_ASSERT(isString);
    6187  }
    6188 
    6189  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6190  currItem.valueCount % 2 != 0)
    6191  {
    6192  m_SB.Add(": ");
    6193  }
    6194  else if(currItem.valueCount > 0)
    6195  {
    6196  m_SB.Add(", ");
    6197  WriteIndent();
    6198  }
    6199  else
    6200  {
    6201  WriteIndent();
    6202  }
    6203  ++currItem.valueCount;
    6204  }
    6205 }
    6206 
    6207 void VmaJsonWriter::WriteIndent(bool oneLess)
    6208 {
    6209  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6210  {
    6211  m_SB.AddNewLine();
    6212 
    6213  size_t count = m_Stack.size();
    6214  if(count > 0 && oneLess)
    6215  {
    6216  --count;
    6217  }
    6218  for(size_t i = 0; i < count; ++i)
    6219  {
    6220  m_SB.Add(INDENT);
    6221  }
    6222  }
    6223 }
    6224 
    6225 #endif // #if VMA_STATS_STRING_ENABLED
    6226 
    6228 
    6229 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6230 {
    6231  if(IsUserDataString())
    6232  {
    6233  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6234 
    6235  FreeUserDataString(hAllocator);
    6236 
    6237  if(pUserData != VMA_NULL)
    6238  {
    6239  const char* const newStrSrc = (char*)pUserData;
    6240  const size_t newStrLen = strlen(newStrSrc);
    6241  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6242  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6243  m_pUserData = newStrDst;
    6244  }
    6245  }
    6246  else
    6247  {
    6248  m_pUserData = pUserData;
    6249  }
    6250 }
    6251 
    6252 void VmaAllocation_T::ChangeBlockAllocation(
    6253  VmaAllocator hAllocator,
    6254  VmaDeviceMemoryBlock* block,
    6255  VkDeviceSize offset)
    6256 {
    6257  VMA_ASSERT(block != VMA_NULL);
    6258  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6259 
    6260  // Move mapping reference counter from old block to new block.
    6261  if(block != m_BlockAllocation.m_Block)
    6262  {
    6263  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6264  if(IsPersistentMap())
    6265  ++mapRefCount;
    6266  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6267  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6268  }
    6269 
    6270  m_BlockAllocation.m_Block = block;
    6271  m_BlockAllocation.m_Offset = offset;
    6272 }
    6273 
    6274 VkDeviceSize VmaAllocation_T::GetOffset() const
    6275 {
    6276  switch(m_Type)
    6277  {
    6278  case ALLOCATION_TYPE_BLOCK:
    6279  return m_BlockAllocation.m_Offset;
    6280  case ALLOCATION_TYPE_DEDICATED:
    6281  return 0;
    6282  default:
    6283  VMA_ASSERT(0);
    6284  return 0;
    6285  }
    6286 }
    6287 
    6288 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6289 {
    6290  switch(m_Type)
    6291  {
    6292  case ALLOCATION_TYPE_BLOCK:
    6293  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6294  case ALLOCATION_TYPE_DEDICATED:
    6295  return m_DedicatedAllocation.m_hMemory;
    6296  default:
    6297  VMA_ASSERT(0);
    6298  return VK_NULL_HANDLE;
    6299  }
    6300 }
    6301 
    6302 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6303 {
    6304  switch(m_Type)
    6305  {
    6306  case ALLOCATION_TYPE_BLOCK:
    6307  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6308  case ALLOCATION_TYPE_DEDICATED:
    6309  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6310  default:
    6311  VMA_ASSERT(0);
    6312  return UINT32_MAX;
    6313  }
    6314 }
    6315 
    6316 void* VmaAllocation_T::GetMappedData() const
    6317 {
    6318  switch(m_Type)
    6319  {
    6320  case ALLOCATION_TYPE_BLOCK:
    6321  if(m_MapCount != 0)
    6322  {
    6323  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6324  VMA_ASSERT(pBlockData != VMA_NULL);
    6325  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6326  }
    6327  else
    6328  {
    6329  return VMA_NULL;
    6330  }
    6331  break;
    6332  case ALLOCATION_TYPE_DEDICATED:
    6333  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6334  return m_DedicatedAllocation.m_pMappedData;
    6335  default:
    6336  VMA_ASSERT(0);
    6337  return VMA_NULL;
    6338  }
    6339 }
    6340 
    6341 bool VmaAllocation_T::CanBecomeLost() const
    6342 {
    6343  switch(m_Type)
    6344  {
    6345  case ALLOCATION_TYPE_BLOCK:
    6346  return m_BlockAllocation.m_CanBecomeLost;
    6347  case ALLOCATION_TYPE_DEDICATED:
    6348  return false;
    6349  default:
    6350  VMA_ASSERT(0);
    6351  return false;
    6352  }
    6353 }
    6354 
    6355 VmaPool VmaAllocation_T::GetPool() const
    6356 {
    6357  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6358  return m_BlockAllocation.m_hPool;
    6359 }
    6360 
    6361 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6362 {
    6363  VMA_ASSERT(CanBecomeLost());
    6364 
    6365  /*
    6366  Warning: This is a carefully designed algorithm.
    6367  Do not modify unless you really know what you're doing :)
    6368  */
    6369  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6370  for(;;)
    6371  {
    6372  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6373  {
    6374  VMA_ASSERT(0);
    6375  return false;
    6376  }
    6377  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6378  {
    6379  return false;
    6380  }
    6381  else // Last use time earlier than current time.
    6382  {
    6383  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6384  {
    6385  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6386  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6387  return true;
    6388  }
    6389  }
    6390  }
    6391 }
    6392 
    6393 #if VMA_STATS_STRING_ENABLED
    6394 
    6395 // Correspond to values of enum VmaSuballocationType.
    6396 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6397  "FREE",
    6398  "UNKNOWN",
    6399  "BUFFER",
    6400  "IMAGE_UNKNOWN",
    6401  "IMAGE_LINEAR",
    6402  "IMAGE_OPTIMAL",
    6403 };
    6404 
    6405 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6406 {
    6407  json.WriteString("Type");
    6408  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6409 
    6410  json.WriteString("Size");
    6411  json.WriteNumber(m_Size);
    6412 
    6413  if(m_pUserData != VMA_NULL)
    6414  {
    6415  json.WriteString("UserData");
    6416  if(IsUserDataString())
    6417  {
    6418  json.WriteString((const char*)m_pUserData);
    6419  }
    6420  else
    6421  {
    6422  json.BeginString();
    6423  json.ContinueString_Pointer(m_pUserData);
    6424  json.EndString();
    6425  }
    6426  }
    6427 
    6428  json.WriteString("CreationFrameIndex");
    6429  json.WriteNumber(m_CreationFrameIndex);
    6430 
    6431  json.WriteString("LastUseFrameIndex");
    6432  json.WriteNumber(GetLastUseFrameIndex());
    6433 
    6434  if(m_BufferImageUsage != 0)
    6435  {
    6436  json.WriteString("Usage");
    6437  json.WriteNumber(m_BufferImageUsage);
    6438  }
    6439 }
    6440 
    6441 #endif
    6442 
    6443 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6444 {
    6445  VMA_ASSERT(IsUserDataString());
    6446  if(m_pUserData != VMA_NULL)
    6447  {
    6448  char* const oldStr = (char*)m_pUserData;
    6449  const size_t oldStrLen = strlen(oldStr);
    6450  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6451  m_pUserData = VMA_NULL;
    6452  }
    6453 }
    6454 
    6455 void VmaAllocation_T::BlockAllocMap()
    6456 {
    6457  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6458 
    6459  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6460  {
    6461  ++m_MapCount;
    6462  }
    6463  else
    6464  {
    6465  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6466  }
    6467 }
    6468 
    6469 void VmaAllocation_T::BlockAllocUnmap()
    6470 {
    6471  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6472 
    6473  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6474  {
    6475  --m_MapCount;
    6476  }
    6477  else
    6478  {
    6479  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6480  }
    6481 }
    6482 
    6483 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6484 {
    6485  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6486 
    6487  if(m_MapCount != 0)
    6488  {
    6489  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6490  {
    6491  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6492  *ppData = m_DedicatedAllocation.m_pMappedData;
    6493  ++m_MapCount;
    6494  return VK_SUCCESS;
    6495  }
    6496  else
    6497  {
    6498  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6499  return VK_ERROR_MEMORY_MAP_FAILED;
    6500  }
    6501  }
    6502  else
    6503  {
    6504  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6505  hAllocator->m_hDevice,
    6506  m_DedicatedAllocation.m_hMemory,
    6507  0, // offset
    6508  VK_WHOLE_SIZE,
    6509  0, // flags
    6510  ppData);
    6511  if(result == VK_SUCCESS)
    6512  {
    6513  m_DedicatedAllocation.m_pMappedData = *ppData;
    6514  m_MapCount = 1;
    6515  }
    6516  return result;
    6517  }
    6518 }
    6519 
    6520 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6521 {
    6522  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6523 
    6524  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6525  {
    6526  --m_MapCount;
    6527  if(m_MapCount == 0)
    6528  {
    6529  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6530  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6531  hAllocator->m_hDevice,
    6532  m_DedicatedAllocation.m_hMemory);
    6533  }
    6534  }
    6535  else
    6536  {
    6537  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6538  }
    6539 }
    6540 
    6541 #if VMA_STATS_STRING_ENABLED
    6542 
    6543 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6544 {
    6545  json.BeginObject();
    6546 
    6547  json.WriteString("Blocks");
    6548  json.WriteNumber(stat.blockCount);
    6549 
    6550  json.WriteString("Allocations");
    6551  json.WriteNumber(stat.allocationCount);
    6552 
    6553  json.WriteString("UnusedRanges");
    6554  json.WriteNumber(stat.unusedRangeCount);
    6555 
    6556  json.WriteString("UsedBytes");
    6557  json.WriteNumber(stat.usedBytes);
    6558 
    6559  json.WriteString("UnusedBytes");
    6560  json.WriteNumber(stat.unusedBytes);
    6561 
    6562  if(stat.allocationCount > 1)
    6563  {
    6564  json.WriteString("AllocationSize");
    6565  json.BeginObject(true);
    6566  json.WriteString("Min");
    6567  json.WriteNumber(stat.allocationSizeMin);
    6568  json.WriteString("Avg");
    6569  json.WriteNumber(stat.allocationSizeAvg);
    6570  json.WriteString("Max");
    6571  json.WriteNumber(stat.allocationSizeMax);
    6572  json.EndObject();
    6573  }
    6574 
    6575  if(stat.unusedRangeCount > 1)
    6576  {
    6577  json.WriteString("UnusedRangeSize");
    6578  json.BeginObject(true);
    6579  json.WriteString("Min");
    6580  json.WriteNumber(stat.unusedRangeSizeMin);
    6581  json.WriteString("Avg");
    6582  json.WriteNumber(stat.unusedRangeSizeAvg);
    6583  json.WriteString("Max");
    6584  json.WriteNumber(stat.unusedRangeSizeMax);
    6585  json.EndObject();
    6586  }
    6587 
    6588  json.EndObject();
    6589 }
    6590 
    6591 #endif // #if VMA_STATS_STRING_ENABLED
    6592 
    6593 struct VmaSuballocationItemSizeLess
    6594 {
    6595  bool operator()(
    6596  const VmaSuballocationList::iterator lhs,
    6597  const VmaSuballocationList::iterator rhs) const
    6598  {
    6599  return lhs->size < rhs->size;
    6600  }
    6601  bool operator()(
    6602  const VmaSuballocationList::iterator lhs,
    6603  VkDeviceSize rhsSize) const
    6604  {
    6605  return lhs->size < rhsSize;
    6606  }
    6607 };
    6608 
    6609 
    6611 // class VmaBlockMetadata
    6612 
    6613 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6614  m_Size(0),
    6615  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6616 {
    6617 }
    6618 
    6619 #if VMA_STATS_STRING_ENABLED
    6620 
    6621 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6622  VkDeviceSize unusedBytes,
    6623  size_t allocationCount,
    6624  size_t unusedRangeCount) const
    6625 {
    6626  json.BeginObject();
    6627 
    6628  json.WriteString("TotalBytes");
    6629  json.WriteNumber(GetSize());
    6630 
    6631  json.WriteString("UnusedBytes");
    6632  json.WriteNumber(unusedBytes);
    6633 
    6634  json.WriteString("Allocations");
    6635  json.WriteNumber((uint64_t)allocationCount);
    6636 
    6637  json.WriteString("UnusedRanges");
    6638  json.WriteNumber((uint64_t)unusedRangeCount);
    6639 
    6640  json.WriteString("Suballocations");
    6641  json.BeginArray();
    6642 }
    6643 
    6644 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6645  VkDeviceSize offset,
    6646  VmaAllocation hAllocation) const
    6647 {
    6648  json.BeginObject(true);
    6649 
    6650  json.WriteString("Offset");
    6651  json.WriteNumber(offset);
    6652 
    6653  hAllocation->PrintParameters(json);
    6654 
    6655  json.EndObject();
    6656 }
    6657 
    6658 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6659  VkDeviceSize offset,
    6660  VkDeviceSize size) const
    6661 {
    6662  json.BeginObject(true);
    6663 
    6664  json.WriteString("Offset");
    6665  json.WriteNumber(offset);
    6666 
    6667  json.WriteString("Type");
    6668  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6669 
    6670  json.WriteString("Size");
    6671  json.WriteNumber(size);
    6672 
    6673  json.EndObject();
    6674 }
    6675 
    6676 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6677 {
    6678  json.EndArray();
    6679  json.EndObject();
    6680 }
    6681 
    6682 #endif // #if VMA_STATS_STRING_ENABLED
    6683 
    6685 // class VmaBlockMetadata_Generic
    6686 
    6687 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6688  VmaBlockMetadata(hAllocator),
    6689  m_FreeCount(0),
    6690  m_SumFreeSize(0),
    6691  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6692  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6693 {
    6694 }
    6695 
    6696 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6697 {
    6698 }
    6699 
    6700 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6701 {
    6702  VmaBlockMetadata::Init(size);
    6703 
    6704  m_FreeCount = 1;
    6705  m_SumFreeSize = size;
    6706 
    6707  VmaSuballocation suballoc = {};
    6708  suballoc.offset = 0;
    6709  suballoc.size = size;
    6710  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6711  suballoc.hAllocation = VK_NULL_HANDLE;
    6712 
    6713  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6714  m_Suballocations.push_back(suballoc);
    6715  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6716  --suballocItem;
    6717  m_FreeSuballocationsBySize.push_back(suballocItem);
    6718 }
    6719 
    6720 bool VmaBlockMetadata_Generic::Validate() const
    6721 {
    6722  VMA_VALIDATE(!m_Suballocations.empty());
    6723 
    6724  // Expected offset of new suballocation as calculated from previous ones.
    6725  VkDeviceSize calculatedOffset = 0;
    6726  // Expected number of free suballocations as calculated from traversing their list.
    6727  uint32_t calculatedFreeCount = 0;
    6728  // Expected sum size of free suballocations as calculated from traversing their list.
    6729  VkDeviceSize calculatedSumFreeSize = 0;
    6730  // Expected number of free suballocations that should be registered in
    6731  // m_FreeSuballocationsBySize calculated from traversing their list.
    6732  size_t freeSuballocationsToRegister = 0;
    6733  // True if previous visited suballocation was free.
    6734  bool prevFree = false;
    6735 
    6736  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6737  suballocItem != m_Suballocations.cend();
    6738  ++suballocItem)
    6739  {
    6740  const VmaSuballocation& subAlloc = *suballocItem;
    6741 
    6742  // Actual offset of this suballocation doesn't match expected one.
    6743  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6744 
    6745  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6746  // Two adjacent free suballocations are invalid. They should be merged.
    6747  VMA_VALIDATE(!prevFree || !currFree);
    6748 
    6749  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6750 
    6751  if(currFree)
    6752  {
    6753  calculatedSumFreeSize += subAlloc.size;
    6754  ++calculatedFreeCount;
    6755  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6756  {
    6757  ++freeSuballocationsToRegister;
    6758  }
    6759 
    6760  // Margin required between allocations - every free space must be at least that large.
    6761  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6762  }
    6763  else
    6764  {
    6765  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6766  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6767 
    6768  // Margin required between allocations - previous allocation must be free.
    6769  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6770  }
    6771 
    6772  calculatedOffset += subAlloc.size;
    6773  prevFree = currFree;
    6774  }
    6775 
    6776  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6777  // match expected one.
    6778  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6779 
    6780  VkDeviceSize lastSize = 0;
    6781  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6782  {
    6783  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6784 
    6785  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6786  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6787  // They must be sorted by size ascending.
    6788  VMA_VALIDATE(suballocItem->size >= lastSize);
    6789 
    6790  lastSize = suballocItem->size;
    6791  }
    6792 
    6793  // Check if totals match calculacted values.
    6794  VMA_VALIDATE(ValidateFreeSuballocationList());
    6795  VMA_VALIDATE(calculatedOffset == GetSize());
    6796  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6797  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6798 
    6799  return true;
    6800 }
    6801 
    6802 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6803 {
    6804  if(!m_FreeSuballocationsBySize.empty())
    6805  {
    6806  return m_FreeSuballocationsBySize.back()->size;
    6807  }
    6808  else
    6809  {
    6810  return 0;
    6811  }
    6812 }
    6813 
    6814 bool VmaBlockMetadata_Generic::IsEmpty() const
    6815 {
    6816  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6817 }
    6818 
    6819 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6820 {
    6821  outInfo.blockCount = 1;
    6822 
    6823  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6824  outInfo.allocationCount = rangeCount - m_FreeCount;
    6825  outInfo.unusedRangeCount = m_FreeCount;
    6826 
    6827  outInfo.unusedBytes = m_SumFreeSize;
    6828  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6829 
    6830  outInfo.allocationSizeMin = UINT64_MAX;
    6831  outInfo.allocationSizeMax = 0;
    6832  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6833  outInfo.unusedRangeSizeMax = 0;
    6834 
    6835  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6836  suballocItem != m_Suballocations.cend();
    6837  ++suballocItem)
    6838  {
    6839  const VmaSuballocation& suballoc = *suballocItem;
    6840  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6841  {
    6842  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6843  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6844  }
    6845  else
    6846  {
    6847  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6848  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6849  }
    6850  }
    6851 }
    6852 
    6853 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6854 {
    6855  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6856 
    6857  inoutStats.size += GetSize();
    6858  inoutStats.unusedSize += m_SumFreeSize;
    6859  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6860  inoutStats.unusedRangeCount += m_FreeCount;
    6861  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6862 }
    6863 
    6864 #if VMA_STATS_STRING_ENABLED
    6865 
    6866 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6867 {
    6868  PrintDetailedMap_Begin(json,
    6869  m_SumFreeSize, // unusedBytes
    6870  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6871  m_FreeCount); // unusedRangeCount
    6872 
    6873  size_t i = 0;
    6874  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6875  suballocItem != m_Suballocations.cend();
    6876  ++suballocItem, ++i)
    6877  {
    6878  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6879  {
    6880  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6881  }
    6882  else
    6883  {
    6884  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6885  }
    6886  }
    6887 
    6888  PrintDetailedMap_End(json);
    6889 }
    6890 
    6891 #endif // #if VMA_STATS_STRING_ENABLED
    6892 
    6893 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6894  uint32_t currentFrameIndex,
    6895  uint32_t frameInUseCount,
    6896  VkDeviceSize bufferImageGranularity,
    6897  VkDeviceSize allocSize,
    6898  VkDeviceSize allocAlignment,
    6899  bool upperAddress,
    6900  VmaSuballocationType allocType,
    6901  bool canMakeOtherLost,
    6902  uint32_t strategy,
    6903  VmaAllocationRequest* pAllocationRequest)
    6904 {
    6905  VMA_ASSERT(allocSize > 0);
    6906  VMA_ASSERT(!upperAddress);
    6907  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6908  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6909  VMA_HEAVY_ASSERT(Validate());
    6910 
    6911  // There is not enough total free space in this block to fullfill the request: Early return.
    6912  if(canMakeOtherLost == false &&
    6913  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6914  {
    6915  return false;
    6916  }
    6917 
    6918  // New algorithm, efficiently searching freeSuballocationsBySize.
    6919  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6920  if(freeSuballocCount > 0)
    6921  {
    6923  {
    6924  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6925  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6926  m_FreeSuballocationsBySize.data(),
    6927  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6928  allocSize + 2 * VMA_DEBUG_MARGIN,
    6929  VmaSuballocationItemSizeLess());
    6930  size_t index = it - m_FreeSuballocationsBySize.data();
    6931  for(; index < freeSuballocCount; ++index)
    6932  {
    6933  if(CheckAllocation(
    6934  currentFrameIndex,
    6935  frameInUseCount,
    6936  bufferImageGranularity,
    6937  allocSize,
    6938  allocAlignment,
    6939  allocType,
    6940  m_FreeSuballocationsBySize[index],
    6941  false, // canMakeOtherLost
    6942  &pAllocationRequest->offset,
    6943  &pAllocationRequest->itemsToMakeLostCount,
    6944  &pAllocationRequest->sumFreeSize,
    6945  &pAllocationRequest->sumItemSize))
    6946  {
    6947  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6948  return true;
    6949  }
    6950  }
    6951  }
    6952  else // WORST_FIT, FIRST_FIT
    6953  {
    6954  // Search staring from biggest suballocations.
    6955  for(size_t index = freeSuballocCount; index--; )
    6956  {
    6957  if(CheckAllocation(
    6958  currentFrameIndex,
    6959  frameInUseCount,
    6960  bufferImageGranularity,
    6961  allocSize,
    6962  allocAlignment,
    6963  allocType,
    6964  m_FreeSuballocationsBySize[index],
    6965  false, // canMakeOtherLost
    6966  &pAllocationRequest->offset,
    6967  &pAllocationRequest->itemsToMakeLostCount,
    6968  &pAllocationRequest->sumFreeSize,
    6969  &pAllocationRequest->sumItemSize))
    6970  {
    6971  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6972  return true;
    6973  }
    6974  }
    6975  }
    6976  }
    6977 
    6978  if(canMakeOtherLost)
    6979  {
    6980  // Brute-force algorithm. TODO: Come up with something better.
    6981 
    6982  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6983  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6984 
    6985  VmaAllocationRequest tmpAllocRequest = {};
    6986  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6987  suballocIt != m_Suballocations.end();
    6988  ++suballocIt)
    6989  {
    6990  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6991  suballocIt->hAllocation->CanBecomeLost())
    6992  {
    6993  if(CheckAllocation(
    6994  currentFrameIndex,
    6995  frameInUseCount,
    6996  bufferImageGranularity,
    6997  allocSize,
    6998  allocAlignment,
    6999  allocType,
    7000  suballocIt,
    7001  canMakeOtherLost,
    7002  &tmpAllocRequest.offset,
    7003  &tmpAllocRequest.itemsToMakeLostCount,
    7004  &tmpAllocRequest.sumFreeSize,
    7005  &tmpAllocRequest.sumItemSize))
    7006  {
    7007  tmpAllocRequest.item = suballocIt;
    7008 
    7009  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7011  {
    7012  *pAllocationRequest = tmpAllocRequest;
    7013  }
    7014  }
    7015  }
    7016  }
    7017 
    7018  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7019  {
    7020  return true;
    7021  }
    7022  }
    7023 
    7024  return false;
    7025 }
    7026 
    7027 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7028  uint32_t currentFrameIndex,
    7029  uint32_t frameInUseCount,
    7030  VmaAllocationRequest* pAllocationRequest)
    7031 {
    7032  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7033  {
    7034  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7035  {
    7036  ++pAllocationRequest->item;
    7037  }
    7038  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7039  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7040  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7041  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7042  {
    7043  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7044  --pAllocationRequest->itemsToMakeLostCount;
    7045  }
    7046  else
    7047  {
    7048  return false;
    7049  }
    7050  }
    7051 
    7052  VMA_HEAVY_ASSERT(Validate());
    7053  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7054  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7055 
    7056  return true;
    7057 }
    7058 
    7059 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7060 {
    7061  uint32_t lostAllocationCount = 0;
    7062  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7063  it != m_Suballocations.end();
    7064  ++it)
    7065  {
    7066  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7067  it->hAllocation->CanBecomeLost() &&
    7068  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7069  {
    7070  it = FreeSuballocation(it);
    7071  ++lostAllocationCount;
    7072  }
    7073  }
    7074  return lostAllocationCount;
    7075 }
    7076 
    7077 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7078 {
    7079  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7080  it != m_Suballocations.end();
    7081  ++it)
    7082  {
    7083  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7084  {
    7085  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7086  {
    7087  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7088  return VK_ERROR_VALIDATION_FAILED_EXT;
    7089  }
    7090  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7091  {
    7092  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7093  return VK_ERROR_VALIDATION_FAILED_EXT;
    7094  }
    7095  }
    7096  }
    7097 
    7098  return VK_SUCCESS;
    7099 }
    7100 
    7101 void VmaBlockMetadata_Generic::Alloc(
    7102  const VmaAllocationRequest& request,
    7103  VmaSuballocationType type,
    7104  VkDeviceSize allocSize,
    7105  bool upperAddress,
    7106  VmaAllocation hAllocation)
    7107 {
    7108  VMA_ASSERT(!upperAddress);
    7109  VMA_ASSERT(request.item != m_Suballocations.end());
    7110  VmaSuballocation& suballoc = *request.item;
    7111  // Given suballocation is a free block.
    7112  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7113  // Given offset is inside this suballocation.
    7114  VMA_ASSERT(request.offset >= suballoc.offset);
    7115  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7116  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7117  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7118 
    7119  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7120  // it to become used.
    7121  UnregisterFreeSuballocation(request.item);
    7122 
    7123  suballoc.offset = request.offset;
    7124  suballoc.size = allocSize;
    7125  suballoc.type = type;
    7126  suballoc.hAllocation = hAllocation;
    7127 
    7128  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7129  if(paddingEnd)
    7130  {
    7131  VmaSuballocation paddingSuballoc = {};
    7132  paddingSuballoc.offset = request.offset + allocSize;
    7133  paddingSuballoc.size = paddingEnd;
    7134  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7135  VmaSuballocationList::iterator next = request.item;
    7136  ++next;
    7137  const VmaSuballocationList::iterator paddingEndItem =
    7138  m_Suballocations.insert(next, paddingSuballoc);
    7139  RegisterFreeSuballocation(paddingEndItem);
    7140  }
    7141 
    7142  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7143  if(paddingBegin)
    7144  {
    7145  VmaSuballocation paddingSuballoc = {};
    7146  paddingSuballoc.offset = request.offset - paddingBegin;
    7147  paddingSuballoc.size = paddingBegin;
    7148  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7149  const VmaSuballocationList::iterator paddingBeginItem =
    7150  m_Suballocations.insert(request.item, paddingSuballoc);
    7151  RegisterFreeSuballocation(paddingBeginItem);
    7152  }
    7153 
    7154  // Update totals.
    7155  m_FreeCount = m_FreeCount - 1;
    7156  if(paddingBegin > 0)
    7157  {
    7158  ++m_FreeCount;
    7159  }
    7160  if(paddingEnd > 0)
    7161  {
    7162  ++m_FreeCount;
    7163  }
    7164  m_SumFreeSize -= allocSize;
    7165 }
    7166 
    7167 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7168 {
    7169  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7170  suballocItem != m_Suballocations.end();
    7171  ++suballocItem)
    7172  {
    7173  VmaSuballocation& suballoc = *suballocItem;
    7174  if(suballoc.hAllocation == allocation)
    7175  {
    7176  FreeSuballocation(suballocItem);
    7177  VMA_HEAVY_ASSERT(Validate());
    7178  return;
    7179  }
    7180  }
    7181  VMA_ASSERT(0 && "Not found!");
    7182 }
    7183 
    7184 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7185 {
    7186  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7187  suballocItem != m_Suballocations.end();
    7188  ++suballocItem)
    7189  {
    7190  VmaSuballocation& suballoc = *suballocItem;
    7191  if(suballoc.offset == offset)
    7192  {
    7193  FreeSuballocation(suballocItem);
    7194  return;
    7195  }
    7196  }
    7197  VMA_ASSERT(0 && "Not found!");
    7198 }
    7199 
    7200 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7201 {
    7202  VkDeviceSize lastSize = 0;
    7203  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7204  {
    7205  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7206 
    7207  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7208  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7209  VMA_VALIDATE(it->size >= lastSize);
    7210  lastSize = it->size;
    7211  }
    7212  return true;
    7213 }
    7214 
    7215 bool VmaBlockMetadata_Generic::CheckAllocation(
    7216  uint32_t currentFrameIndex,
    7217  uint32_t frameInUseCount,
    7218  VkDeviceSize bufferImageGranularity,
    7219  VkDeviceSize allocSize,
    7220  VkDeviceSize allocAlignment,
    7221  VmaSuballocationType allocType,
    7222  VmaSuballocationList::const_iterator suballocItem,
    7223  bool canMakeOtherLost,
    7224  VkDeviceSize* pOffset,
    7225  size_t* itemsToMakeLostCount,
    7226  VkDeviceSize* pSumFreeSize,
    7227  VkDeviceSize* pSumItemSize) const
    7228 {
    7229  VMA_ASSERT(allocSize > 0);
    7230  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7231  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7232  VMA_ASSERT(pOffset != VMA_NULL);
    7233 
    7234  *itemsToMakeLostCount = 0;
    7235  *pSumFreeSize = 0;
    7236  *pSumItemSize = 0;
    7237 
    7238  if(canMakeOtherLost)
    7239  {
    7240  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7241  {
    7242  *pSumFreeSize = suballocItem->size;
    7243  }
    7244  else
    7245  {
    7246  if(suballocItem->hAllocation->CanBecomeLost() &&
    7247  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7248  {
    7249  ++*itemsToMakeLostCount;
    7250  *pSumItemSize = suballocItem->size;
    7251  }
    7252  else
    7253  {
    7254  return false;
    7255  }
    7256  }
    7257 
    7258  // Remaining size is too small for this request: Early return.
    7259  if(GetSize() - suballocItem->offset < allocSize)
    7260  {
    7261  return false;
    7262  }
    7263 
    7264  // Start from offset equal to beginning of this suballocation.
    7265  *pOffset = suballocItem->offset;
    7266 
    7267  // Apply VMA_DEBUG_MARGIN at the beginning.
    7268  if(VMA_DEBUG_MARGIN > 0)
    7269  {
    7270  *pOffset += VMA_DEBUG_MARGIN;
    7271  }
    7272 
    7273  // Apply alignment.
    7274  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7275 
    7276  // Check previous suballocations for BufferImageGranularity conflicts.
    7277  // Make bigger alignment if necessary.
    7278  if(bufferImageGranularity > 1)
    7279  {
    7280  bool bufferImageGranularityConflict = false;
    7281  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7282  while(prevSuballocItem != m_Suballocations.cbegin())
    7283  {
    7284  --prevSuballocItem;
    7285  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7286  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7287  {
    7288  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7289  {
    7290  bufferImageGranularityConflict = true;
    7291  break;
    7292  }
    7293  }
    7294  else
    7295  // Already on previous page.
    7296  break;
    7297  }
    7298  if(bufferImageGranularityConflict)
    7299  {
    7300  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7301  }
    7302  }
    7303 
    7304  // Now that we have final *pOffset, check if we are past suballocItem.
    7305  // If yes, return false - this function should be called for another suballocItem as starting point.
    7306  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7307  {
    7308  return false;
    7309  }
    7310 
    7311  // Calculate padding at the beginning based on current offset.
    7312  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7313 
    7314  // Calculate required margin at the end.
    7315  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7316 
    7317  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7318  // Another early return check.
    7319  if(suballocItem->offset + totalSize > GetSize())
    7320  {
    7321  return false;
    7322  }
    7323 
    7324  // Advance lastSuballocItem until desired size is reached.
    7325  // Update itemsToMakeLostCount.
    7326  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7327  if(totalSize > suballocItem->size)
    7328  {
    7329  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7330  while(remainingSize > 0)
    7331  {
    7332  ++lastSuballocItem;
    7333  if(lastSuballocItem == m_Suballocations.cend())
    7334  {
    7335  return false;
    7336  }
    7337  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7338  {
    7339  *pSumFreeSize += lastSuballocItem->size;
    7340  }
    7341  else
    7342  {
    7343  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7344  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7345  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7346  {
    7347  ++*itemsToMakeLostCount;
    7348  *pSumItemSize += lastSuballocItem->size;
    7349  }
    7350  else
    7351  {
    7352  return false;
    7353  }
    7354  }
    7355  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7356  remainingSize - lastSuballocItem->size : 0;
    7357  }
    7358  }
    7359 
    7360  // Check next suballocations for BufferImageGranularity conflicts.
    7361  // If conflict exists, we must mark more allocations lost or fail.
    7362  if(bufferImageGranularity > 1)
    7363  {
    7364  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7365  ++nextSuballocItem;
    7366  while(nextSuballocItem != m_Suballocations.cend())
    7367  {
    7368  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7369  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7370  {
    7371  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7372  {
    7373  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7374  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7375  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7376  {
    7377  ++*itemsToMakeLostCount;
    7378  }
    7379  else
    7380  {
    7381  return false;
    7382  }
    7383  }
    7384  }
    7385  else
    7386  {
    7387  // Already on next page.
    7388  break;
    7389  }
    7390  ++nextSuballocItem;
    7391  }
    7392  }
    7393  }
    7394  else
    7395  {
    7396  const VmaSuballocation& suballoc = *suballocItem;
    7397  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7398 
    7399  *pSumFreeSize = suballoc.size;
    7400 
    7401  // Size of this suballocation is too small for this request: Early return.
    7402  if(suballoc.size < allocSize)
    7403  {
    7404  return false;
    7405  }
    7406 
    7407  // Start from offset equal to beginning of this suballocation.
    7408  *pOffset = suballoc.offset;
    7409 
    7410  // Apply VMA_DEBUG_MARGIN at the beginning.
    7411  if(VMA_DEBUG_MARGIN > 0)
    7412  {
    7413  *pOffset += VMA_DEBUG_MARGIN;
    7414  }
    7415 
    7416  // Apply alignment.
    7417  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7418 
    7419  // Check previous suballocations for BufferImageGranularity conflicts.
    7420  // Make bigger alignment if necessary.
    7421  if(bufferImageGranularity > 1)
    7422  {
    7423  bool bufferImageGranularityConflict = false;
    7424  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7425  while(prevSuballocItem != m_Suballocations.cbegin())
    7426  {
    7427  --prevSuballocItem;
    7428  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7429  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7430  {
    7431  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7432  {
    7433  bufferImageGranularityConflict = true;
    7434  break;
    7435  }
    7436  }
    7437  else
    7438  // Already on previous page.
    7439  break;
    7440  }
    7441  if(bufferImageGranularityConflict)
    7442  {
    7443  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7444  }
    7445  }
    7446 
    7447  // Calculate padding at the beginning based on current offset.
    7448  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7449 
    7450  // Calculate required margin at the end.
    7451  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7452 
    7453  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7454  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7455  {
    7456  return false;
    7457  }
    7458 
    7459  // Check next suballocations for BufferImageGranularity conflicts.
    7460  // If conflict exists, allocation cannot be made here.
    7461  if(bufferImageGranularity > 1)
    7462  {
    7463  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7464  ++nextSuballocItem;
    7465  while(nextSuballocItem != m_Suballocations.cend())
    7466  {
    7467  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7468  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7469  {
    7470  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7471  {
    7472  return false;
    7473  }
    7474  }
    7475  else
    7476  {
    7477  // Already on next page.
    7478  break;
    7479  }
    7480  ++nextSuballocItem;
    7481  }
    7482  }
    7483  }
    7484 
    7485  // All tests passed: Success. pOffset is already filled.
    7486  return true;
    7487 }
    7488 
    7489 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7490 {
    7491  VMA_ASSERT(item != m_Suballocations.end());
    7492  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7493 
    7494  VmaSuballocationList::iterator nextItem = item;
    7495  ++nextItem;
    7496  VMA_ASSERT(nextItem != m_Suballocations.end());
    7497  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7498 
    7499  item->size += nextItem->size;
    7500  --m_FreeCount;
    7501  m_Suballocations.erase(nextItem);
    7502 }
    7503 
    7504 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7505 {
    7506  // Change this suballocation to be marked as free.
    7507  VmaSuballocation& suballoc = *suballocItem;
    7508  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7509  suballoc.hAllocation = VK_NULL_HANDLE;
    7510 
    7511  // Update totals.
    7512  ++m_FreeCount;
    7513  m_SumFreeSize += suballoc.size;
    7514 
    7515  // Merge with previous and/or next suballocation if it's also free.
    7516  bool mergeWithNext = false;
    7517  bool mergeWithPrev = false;
    7518 
    7519  VmaSuballocationList::iterator nextItem = suballocItem;
    7520  ++nextItem;
    7521  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7522  {
    7523  mergeWithNext = true;
    7524  }
    7525 
    7526  VmaSuballocationList::iterator prevItem = suballocItem;
    7527  if(suballocItem != m_Suballocations.begin())
    7528  {
    7529  --prevItem;
    7530  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7531  {
    7532  mergeWithPrev = true;
    7533  }
    7534  }
    7535 
    7536  if(mergeWithNext)
    7537  {
    7538  UnregisterFreeSuballocation(nextItem);
    7539  MergeFreeWithNext(suballocItem);
    7540  }
    7541 
    7542  if(mergeWithPrev)
    7543  {
    7544  UnregisterFreeSuballocation(prevItem);
    7545  MergeFreeWithNext(prevItem);
    7546  RegisterFreeSuballocation(prevItem);
    7547  return prevItem;
    7548  }
    7549  else
    7550  {
    7551  RegisterFreeSuballocation(suballocItem);
    7552  return suballocItem;
    7553  }
    7554 }
    7555 
    7556 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7557 {
    7558  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7559  VMA_ASSERT(item->size > 0);
    7560 
    7561  // You may want to enable this validation at the beginning or at the end of
    7562  // this function, depending on what do you want to check.
    7563  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7564 
    7565  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7566  {
    7567  if(m_FreeSuballocationsBySize.empty())
    7568  {
    7569  m_FreeSuballocationsBySize.push_back(item);
    7570  }
    7571  else
    7572  {
    7573  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7574  }
    7575  }
    7576 
    7577  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7578 }
    7579 
    7580 
    7581 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7582 {
    7583  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7584  VMA_ASSERT(item->size > 0);
    7585 
    7586  // You may want to enable this validation at the beginning or at the end of
    7587  // this function, depending on what do you want to check.
    7588  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7589 
    7590  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7591  {
    7592  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7593  m_FreeSuballocationsBySize.data(),
    7594  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7595  item,
    7596  VmaSuballocationItemSizeLess());
    7597  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7598  index < m_FreeSuballocationsBySize.size();
    7599  ++index)
    7600  {
    7601  if(m_FreeSuballocationsBySize[index] == item)
    7602  {
    7603  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7604  return;
    7605  }
    7606  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7607  }
    7608  VMA_ASSERT(0 && "Not found.");
    7609  }
    7610 
    7611  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7612 }
    7613 
    7615 // class VmaBlockMetadata_Linear
    7616 
    7617 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7618  VmaBlockMetadata(hAllocator),
    7619  m_SumFreeSize(0),
    7620  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7621  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7622  m_1stVectorIndex(0),
    7623  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7624  m_1stNullItemsBeginCount(0),
    7625  m_1stNullItemsMiddleCount(0),
    7626  m_2ndNullItemsCount(0)
    7627 {
    7628 }
    7629 
    7630 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7631 {
    7632 }
    7633 
    7634 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7635 {
    7636  VmaBlockMetadata::Init(size);
    7637  m_SumFreeSize = size;
    7638 }
    7639 
    7640 bool VmaBlockMetadata_Linear::Validate() const
    7641 {
    7642  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7643  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7644 
    7645  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7646  VMA_VALIDATE(!suballocations1st.empty() ||
    7647  suballocations2nd.empty() ||
    7648  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7649 
    7650  if(!suballocations1st.empty())
    7651  {
    7652  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7653  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7654  // Null item at the end should be just pop_back().
    7655  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7656  }
    7657  if(!suballocations2nd.empty())
    7658  {
    7659  // Null item at the end should be just pop_back().
    7660  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7661  }
    7662 
    7663  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7664  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7665 
    7666  VkDeviceSize sumUsedSize = 0;
    7667  const size_t suballoc1stCount = suballocations1st.size();
    7668  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7669 
    7670  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7671  {
    7672  const size_t suballoc2ndCount = suballocations2nd.size();
    7673  size_t nullItem2ndCount = 0;
    7674  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7675  {
    7676  const VmaSuballocation& suballoc = suballocations2nd[i];
    7677  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7678 
    7679  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7680  VMA_VALIDATE(suballoc.offset >= offset);
    7681 
    7682  if(!currFree)
    7683  {
    7684  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7685  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7686  sumUsedSize += suballoc.size;
    7687  }
    7688  else
    7689  {
    7690  ++nullItem2ndCount;
    7691  }
    7692 
    7693  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7694  }
    7695 
    7696  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7697  }
    7698 
    7699  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7700  {
    7701  const VmaSuballocation& suballoc = suballocations1st[i];
    7702  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7703  suballoc.hAllocation == VK_NULL_HANDLE);
    7704  }
    7705 
    7706  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7707 
    7708  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7709  {
    7710  const VmaSuballocation& suballoc = suballocations1st[i];
    7711  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7712 
    7713  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7714  VMA_VALIDATE(suballoc.offset >= offset);
    7715  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7716 
    7717  if(!currFree)
    7718  {
    7719  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7720  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7721  sumUsedSize += suballoc.size;
    7722  }
    7723  else
    7724  {
    7725  ++nullItem1stCount;
    7726  }
    7727 
    7728  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7729  }
    7730  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7731 
    7732  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7733  {
    7734  const size_t suballoc2ndCount = suballocations2nd.size();
    7735  size_t nullItem2ndCount = 0;
    7736  for(size_t i = suballoc2ndCount; i--; )
    7737  {
    7738  const VmaSuballocation& suballoc = suballocations2nd[i];
    7739  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7740 
    7741  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7742  VMA_VALIDATE(suballoc.offset >= offset);
    7743 
    7744  if(!currFree)
    7745  {
    7746  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7747  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7748  sumUsedSize += suballoc.size;
    7749  }
    7750  else
    7751  {
    7752  ++nullItem2ndCount;
    7753  }
    7754 
    7755  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7756  }
    7757 
    7758  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7759  }
    7760 
    7761  VMA_VALIDATE(offset <= GetSize());
    7762  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7763 
    7764  return true;
    7765 }
    7766 
    7767 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7768 {
    7769  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7770  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7771 }
    7772 
    7773 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7774 {
    7775  const VkDeviceSize size = GetSize();
    7776 
    7777  /*
    7778  We don't consider gaps inside allocation vectors with freed allocations because
    7779  they are not suitable for reuse in linear allocator. We consider only space that
    7780  is available for new allocations.
    7781  */
    7782  if(IsEmpty())
    7783  {
    7784  return size;
    7785  }
    7786 
    7787  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7788 
    7789  switch(m_2ndVectorMode)
    7790  {
    7791  case SECOND_VECTOR_EMPTY:
    7792  /*
    7793  Available space is after end of 1st, as well as before beginning of 1st (which
    7794  whould make it a ring buffer).
    7795  */
    7796  {
    7797  const size_t suballocations1stCount = suballocations1st.size();
    7798  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7799  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7800  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7801  return VMA_MAX(
    7802  firstSuballoc.offset,
    7803  size - (lastSuballoc.offset + lastSuballoc.size));
    7804  }
    7805  break;
    7806 
    7807  case SECOND_VECTOR_RING_BUFFER:
    7808  /*
    7809  Available space is only between end of 2nd and beginning of 1st.
    7810  */
    7811  {
    7812  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7813  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7814  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7815  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7816  }
    7817  break;
    7818 
    7819  case SECOND_VECTOR_DOUBLE_STACK:
    7820  /*
    7821  Available space is only between end of 1st and top of 2nd.
    7822  */
    7823  {
    7824  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7825  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7826  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7827  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7828  }
    7829  break;
    7830 
    7831  default:
    7832  VMA_ASSERT(0);
    7833  return 0;
    7834  }
    7835 }
    7836 
    7837 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7838 {
    7839  const VkDeviceSize size = GetSize();
    7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7842  const size_t suballoc1stCount = suballocations1st.size();
    7843  const size_t suballoc2ndCount = suballocations2nd.size();
    7844 
    7845  outInfo.blockCount = 1;
    7846  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7847  outInfo.unusedRangeCount = 0;
    7848  outInfo.usedBytes = 0;
    7849  outInfo.allocationSizeMin = UINT64_MAX;
    7850  outInfo.allocationSizeMax = 0;
    7851  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7852  outInfo.unusedRangeSizeMax = 0;
    7853 
    7854  VkDeviceSize lastOffset = 0;
    7855 
    7856  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7857  {
    7858  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7859  size_t nextAlloc2ndIndex = 0;
    7860  while(lastOffset < freeSpace2ndTo1stEnd)
    7861  {
    7862  // Find next non-null allocation or move nextAllocIndex to the end.
    7863  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7864  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7865  {
    7866  ++nextAlloc2ndIndex;
    7867  }
    7868 
    7869  // Found non-null allocation.
    7870  if(nextAlloc2ndIndex < suballoc2ndCount)
    7871  {
    7872  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7873 
    7874  // 1. Process free space before this allocation.
    7875  if(lastOffset < suballoc.offset)
    7876  {
    7877  // There is free space from lastOffset to suballoc.offset.
    7878  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7879  ++outInfo.unusedRangeCount;
    7880  outInfo.unusedBytes += unusedRangeSize;
    7881  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7882  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7883  }
    7884 
    7885  // 2. Process this allocation.
    7886  // There is allocation with suballoc.offset, suballoc.size.
    7887  outInfo.usedBytes += suballoc.size;
    7888  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7889  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7890 
    7891  // 3. Prepare for next iteration.
    7892  lastOffset = suballoc.offset + suballoc.size;
    7893  ++nextAlloc2ndIndex;
    7894  }
    7895  // We are at the end.
    7896  else
    7897  {
    7898  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7899  if(lastOffset < freeSpace2ndTo1stEnd)
    7900  {
    7901  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7902  ++outInfo.unusedRangeCount;
    7903  outInfo.unusedBytes += unusedRangeSize;
    7904  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7905  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7906  }
    7907 
    7908  // End of loop.
    7909  lastOffset = freeSpace2ndTo1stEnd;
    7910  }
    7911  }
    7912  }
    7913 
    7914  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7915  const VkDeviceSize freeSpace1stTo2ndEnd =
    7916  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7917  while(lastOffset < freeSpace1stTo2ndEnd)
    7918  {
    7919  // Find next non-null allocation or move nextAllocIndex to the end.
    7920  while(nextAlloc1stIndex < suballoc1stCount &&
    7921  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7922  {
    7923  ++nextAlloc1stIndex;
    7924  }
    7925 
    7926  // Found non-null allocation.
    7927  if(nextAlloc1stIndex < suballoc1stCount)
    7928  {
    7929  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7930 
    7931  // 1. Process free space before this allocation.
    7932  if(lastOffset < suballoc.offset)
    7933  {
    7934  // There is free space from lastOffset to suballoc.offset.
    7935  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7936  ++outInfo.unusedRangeCount;
    7937  outInfo.unusedBytes += unusedRangeSize;
    7938  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7939  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7940  }
    7941 
    7942  // 2. Process this allocation.
    7943  // There is allocation with suballoc.offset, suballoc.size.
    7944  outInfo.usedBytes += suballoc.size;
    7945  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7946  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7947 
    7948  // 3. Prepare for next iteration.
    7949  lastOffset = suballoc.offset + suballoc.size;
    7950  ++nextAlloc1stIndex;
    7951  }
    7952  // We are at the end.
    7953  else
    7954  {
    7955  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7956  if(lastOffset < freeSpace1stTo2ndEnd)
    7957  {
    7958  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7959  ++outInfo.unusedRangeCount;
    7960  outInfo.unusedBytes += unusedRangeSize;
    7961  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7962  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7963  }
    7964 
    7965  // End of loop.
    7966  lastOffset = freeSpace1stTo2ndEnd;
    7967  }
    7968  }
    7969 
    7970  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7971  {
    7972  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7973  while(lastOffset < size)
    7974  {
    7975  // Find next non-null allocation or move nextAllocIndex to the end.
    7976  while(nextAlloc2ndIndex != SIZE_MAX &&
    7977  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7978  {
    7979  --nextAlloc2ndIndex;
    7980  }
    7981 
    7982  // Found non-null allocation.
    7983  if(nextAlloc2ndIndex != SIZE_MAX)
    7984  {
    7985  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7986 
    7987  // 1. Process free space before this allocation.
    7988  if(lastOffset < suballoc.offset)
    7989  {
    7990  // There is free space from lastOffset to suballoc.offset.
    7991  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7992  ++outInfo.unusedRangeCount;
    7993  outInfo.unusedBytes += unusedRangeSize;
    7994  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7995  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7996  }
    7997 
    7998  // 2. Process this allocation.
    7999  // There is allocation with suballoc.offset, suballoc.size.
    8000  outInfo.usedBytes += suballoc.size;
    8001  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8002  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8003 
    8004  // 3. Prepare for next iteration.
    8005  lastOffset = suballoc.offset + suballoc.size;
    8006  --nextAlloc2ndIndex;
    8007  }
    8008  // We are at the end.
    8009  else
    8010  {
    8011  // There is free space from lastOffset to size.
    8012  if(lastOffset < size)
    8013  {
    8014  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8015  ++outInfo.unusedRangeCount;
    8016  outInfo.unusedBytes += unusedRangeSize;
    8017  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8018  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8019  }
    8020 
    8021  // End of loop.
    8022  lastOffset = size;
    8023  }
    8024  }
    8025  }
    8026 
    8027  outInfo.unusedBytes = size - outInfo.usedBytes;
    8028 }
    8029 
    8030 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8031 {
    8032  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8033  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8034  const VkDeviceSize size = GetSize();
    8035  const size_t suballoc1stCount = suballocations1st.size();
    8036  const size_t suballoc2ndCount = suballocations2nd.size();
    8037 
    8038  inoutStats.size += size;
    8039 
    8040  VkDeviceSize lastOffset = 0;
    8041 
    8042  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8043  {
    8044  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8045  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8046  while(lastOffset < freeSpace2ndTo1stEnd)
    8047  {
    8048  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8049  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8050  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8051  {
    8052  ++nextAlloc2ndIndex;
    8053  }
    8054 
    8055  // Found non-null allocation.
    8056  if(nextAlloc2ndIndex < suballoc2ndCount)
    8057  {
    8058  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8059 
    8060  // 1. Process free space before this allocation.
    8061  if(lastOffset < suballoc.offset)
    8062  {
    8063  // There is free space from lastOffset to suballoc.offset.
    8064  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8065  inoutStats.unusedSize += unusedRangeSize;
    8066  ++inoutStats.unusedRangeCount;
    8067  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8068  }
    8069 
    8070  // 2. Process this allocation.
    8071  // There is allocation with suballoc.offset, suballoc.size.
    8072  ++inoutStats.allocationCount;
    8073 
    8074  // 3. Prepare for next iteration.
    8075  lastOffset = suballoc.offset + suballoc.size;
    8076  ++nextAlloc2ndIndex;
    8077  }
    8078  // We are at the end.
    8079  else
    8080  {
    8081  if(lastOffset < freeSpace2ndTo1stEnd)
    8082  {
    8083  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8084  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8085  inoutStats.unusedSize += unusedRangeSize;
    8086  ++inoutStats.unusedRangeCount;
    8087  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8088  }
    8089 
    8090  // End of loop.
    8091  lastOffset = freeSpace2ndTo1stEnd;
    8092  }
    8093  }
    8094  }
    8095 
    8096  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8097  const VkDeviceSize freeSpace1stTo2ndEnd =
    8098  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8099  while(lastOffset < freeSpace1stTo2ndEnd)
    8100  {
    8101  // Find next non-null allocation or move nextAllocIndex to the end.
    8102  while(nextAlloc1stIndex < suballoc1stCount &&
    8103  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8104  {
    8105  ++nextAlloc1stIndex;
    8106  }
    8107 
    8108  // Found non-null allocation.
    8109  if(nextAlloc1stIndex < suballoc1stCount)
    8110  {
    8111  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8112 
    8113  // 1. Process free space before this allocation.
    8114  if(lastOffset < suballoc.offset)
    8115  {
    8116  // There is free space from lastOffset to suballoc.offset.
    8117  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8118  inoutStats.unusedSize += unusedRangeSize;
    8119  ++inoutStats.unusedRangeCount;
    8120  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8121  }
    8122 
    8123  // 2. Process this allocation.
    8124  // There is allocation with suballoc.offset, suballoc.size.
    8125  ++inoutStats.allocationCount;
    8126 
    8127  // 3. Prepare for next iteration.
    8128  lastOffset = suballoc.offset + suballoc.size;
    8129  ++nextAlloc1stIndex;
    8130  }
    8131  // We are at the end.
    8132  else
    8133  {
    8134  if(lastOffset < freeSpace1stTo2ndEnd)
    8135  {
    8136  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8137  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8138  inoutStats.unusedSize += unusedRangeSize;
    8139  ++inoutStats.unusedRangeCount;
    8140  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8141  }
    8142 
    8143  // End of loop.
    8144  lastOffset = freeSpace1stTo2ndEnd;
    8145  }
    8146  }
    8147 
    8148  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8149  {
    8150  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8151  while(lastOffset < size)
    8152  {
    8153  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8154  while(nextAlloc2ndIndex != SIZE_MAX &&
    8155  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8156  {
    8157  --nextAlloc2ndIndex;
    8158  }
    8159 
    8160  // Found non-null allocation.
    8161  if(nextAlloc2ndIndex != SIZE_MAX)
    8162  {
    8163  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8164 
    8165  // 1. Process free space before this allocation.
    8166  if(lastOffset < suballoc.offset)
    8167  {
    8168  // There is free space from lastOffset to suballoc.offset.
    8169  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8170  inoutStats.unusedSize += unusedRangeSize;
    8171  ++inoutStats.unusedRangeCount;
    8172  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8173  }
    8174 
    8175  // 2. Process this allocation.
    8176  // There is allocation with suballoc.offset, suballoc.size.
    8177  ++inoutStats.allocationCount;
    8178 
    8179  // 3. Prepare for next iteration.
    8180  lastOffset = suballoc.offset + suballoc.size;
    8181  --nextAlloc2ndIndex;
    8182  }
    8183  // We are at the end.
    8184  else
    8185  {
    8186  if(lastOffset < size)
    8187  {
    8188  // There is free space from lastOffset to size.
    8189  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8190  inoutStats.unusedSize += unusedRangeSize;
    8191  ++inoutStats.unusedRangeCount;
    8192  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8193  }
    8194 
    8195  // End of loop.
    8196  lastOffset = size;
    8197  }
    8198  }
    8199  }
    8200 }
    8201 
    8202 #if VMA_STATS_STRING_ENABLED
    8203 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8204 {
    8205  const VkDeviceSize size = GetSize();
    8206  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8207  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8208  const size_t suballoc1stCount = suballocations1st.size();
    8209  const size_t suballoc2ndCount = suballocations2nd.size();
    8210 
    8211  // FIRST PASS
    8212 
    8213  size_t unusedRangeCount = 0;
    8214  VkDeviceSize usedBytes = 0;
    8215 
    8216  VkDeviceSize lastOffset = 0;
    8217 
    8218  size_t alloc2ndCount = 0;
    8219  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8220  {
    8221  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8222  size_t nextAlloc2ndIndex = 0;
    8223  while(lastOffset < freeSpace2ndTo1stEnd)
    8224  {
    8225  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8226  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8227  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8228  {
    8229  ++nextAlloc2ndIndex;
    8230  }
    8231 
    8232  // Found non-null allocation.
    8233  if(nextAlloc2ndIndex < suballoc2ndCount)
    8234  {
    8235  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8236 
    8237  // 1. Process free space before this allocation.
    8238  if(lastOffset < suballoc.offset)
    8239  {
    8240  // There is free space from lastOffset to suballoc.offset.
    8241  ++unusedRangeCount;
    8242  }
    8243 
    8244  // 2. Process this allocation.
    8245  // There is allocation with suballoc.offset, suballoc.size.
    8246  ++alloc2ndCount;
    8247  usedBytes += suballoc.size;
    8248 
    8249  // 3. Prepare for next iteration.
    8250  lastOffset = suballoc.offset + suballoc.size;
    8251  ++nextAlloc2ndIndex;
    8252  }
    8253  // We are at the end.
    8254  else
    8255  {
    8256  if(lastOffset < freeSpace2ndTo1stEnd)
    8257  {
    8258  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8259  ++unusedRangeCount;
    8260  }
    8261 
    8262  // End of loop.
    8263  lastOffset = freeSpace2ndTo1stEnd;
    8264  }
    8265  }
    8266  }
    8267 
    8268  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8269  size_t alloc1stCount = 0;
    8270  const VkDeviceSize freeSpace1stTo2ndEnd =
    8271  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8272  while(lastOffset < freeSpace1stTo2ndEnd)
    8273  {
    8274  // Find next non-null allocation or move nextAllocIndex to the end.
    8275  while(nextAlloc1stIndex < suballoc1stCount &&
    8276  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8277  {
    8278  ++nextAlloc1stIndex;
    8279  }
    8280 
    8281  // Found non-null allocation.
    8282  if(nextAlloc1stIndex < suballoc1stCount)
    8283  {
    8284  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8285 
    8286  // 1. Process free space before this allocation.
    8287  if(lastOffset < suballoc.offset)
    8288  {
    8289  // There is free space from lastOffset to suballoc.offset.
    8290  ++unusedRangeCount;
    8291  }
    8292 
    8293  // 2. Process this allocation.
    8294  // There is allocation with suballoc.offset, suballoc.size.
    8295  ++alloc1stCount;
    8296  usedBytes += suballoc.size;
    8297 
    8298  // 3. Prepare for next iteration.
    8299  lastOffset = suballoc.offset + suballoc.size;
    8300  ++nextAlloc1stIndex;
    8301  }
    8302  // We are at the end.
    8303  else
    8304  {
    8305  if(lastOffset < size)
    8306  {
    8307  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8308  ++unusedRangeCount;
    8309  }
    8310 
    8311  // End of loop.
    8312  lastOffset = freeSpace1stTo2ndEnd;
    8313  }
    8314  }
    8315 
    8316  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8317  {
    8318  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8319  while(lastOffset < size)
    8320  {
    8321  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8322  while(nextAlloc2ndIndex != SIZE_MAX &&
    8323  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8324  {
    8325  --nextAlloc2ndIndex;
    8326  }
    8327 
    8328  // Found non-null allocation.
    8329  if(nextAlloc2ndIndex != SIZE_MAX)
    8330  {
    8331  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8332 
    8333  // 1. Process free space before this allocation.
    8334  if(lastOffset < suballoc.offset)
    8335  {
    8336  // There is free space from lastOffset to suballoc.offset.
    8337  ++unusedRangeCount;
    8338  }
    8339 
    8340  // 2. Process this allocation.
    8341  // There is allocation with suballoc.offset, suballoc.size.
    8342  ++alloc2ndCount;
    8343  usedBytes += suballoc.size;
    8344 
    8345  // 3. Prepare for next iteration.
    8346  lastOffset = suballoc.offset + suballoc.size;
    8347  --nextAlloc2ndIndex;
    8348  }
    8349  // We are at the end.
    8350  else
    8351  {
    8352  if(lastOffset < size)
    8353  {
    8354  // There is free space from lastOffset to size.
    8355  ++unusedRangeCount;
    8356  }
    8357 
    8358  // End of loop.
    8359  lastOffset = size;
    8360  }
    8361  }
    8362  }
    8363 
    8364  const VkDeviceSize unusedBytes = size - usedBytes;
    8365  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8366 
    8367  // SECOND PASS
    8368  lastOffset = 0;
    8369 
    8370  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8371  {
    8372  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8373  size_t nextAlloc2ndIndex = 0;
    8374  while(lastOffset < freeSpace2ndTo1stEnd)
    8375  {
    8376  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8377  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8378  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8379  {
    8380  ++nextAlloc2ndIndex;
    8381  }
    8382 
    8383  // Found non-null allocation.
    8384  if(nextAlloc2ndIndex < suballoc2ndCount)
    8385  {
    8386  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8387 
    8388  // 1. Process free space before this allocation.
    8389  if(lastOffset < suballoc.offset)
    8390  {
    8391  // There is free space from lastOffset to suballoc.offset.
    8392  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8393  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8394  }
    8395 
    8396  // 2. Process this allocation.
    8397  // There is allocation with suballoc.offset, suballoc.size.
    8398  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8399 
    8400  // 3. Prepare for next iteration.
    8401  lastOffset = suballoc.offset + suballoc.size;
    8402  ++nextAlloc2ndIndex;
    8403  }
    8404  // We are at the end.
    8405  else
    8406  {
    8407  if(lastOffset < freeSpace2ndTo1stEnd)
    8408  {
    8409  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8410  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8411  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8412  }
    8413 
    8414  // End of loop.
    8415  lastOffset = freeSpace2ndTo1stEnd;
    8416  }
    8417  }
    8418  }
    8419 
    8420  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8421  while(lastOffset < freeSpace1stTo2ndEnd)
    8422  {
    8423  // Find next non-null allocation or move nextAllocIndex to the end.
    8424  while(nextAlloc1stIndex < suballoc1stCount &&
    8425  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8426  {
    8427  ++nextAlloc1stIndex;
    8428  }
    8429 
    8430  // Found non-null allocation.
    8431  if(nextAlloc1stIndex < suballoc1stCount)
    8432  {
    8433  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8434 
    8435  // 1. Process free space before this allocation.
    8436  if(lastOffset < suballoc.offset)
    8437  {
    8438  // There is free space from lastOffset to suballoc.offset.
    8439  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8440  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8441  }
    8442 
    8443  // 2. Process this allocation.
    8444  // There is allocation with suballoc.offset, suballoc.size.
    8445  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8446 
    8447  // 3. Prepare for next iteration.
    8448  lastOffset = suballoc.offset + suballoc.size;
    8449  ++nextAlloc1stIndex;
    8450  }
    8451  // We are at the end.
    8452  else
    8453  {
    8454  if(lastOffset < freeSpace1stTo2ndEnd)
    8455  {
    8456  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8457  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8458  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8459  }
    8460 
    8461  // End of loop.
    8462  lastOffset = freeSpace1stTo2ndEnd;
    8463  }
    8464  }
    8465 
    8466  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8467  {
    8468  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8469  while(lastOffset < size)
    8470  {
    8471  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8472  while(nextAlloc2ndIndex != SIZE_MAX &&
    8473  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8474  {
    8475  --nextAlloc2ndIndex;
    8476  }
    8477 
    8478  // Found non-null allocation.
    8479  if(nextAlloc2ndIndex != SIZE_MAX)
    8480  {
    8481  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8482 
    8483  // 1. Process free space before this allocation.
    8484  if(lastOffset < suballoc.offset)
    8485  {
    8486  // There is free space from lastOffset to suballoc.offset.
    8487  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8488  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8489  }
    8490 
    8491  // 2. Process this allocation.
    8492  // There is allocation with suballoc.offset, suballoc.size.
    8493  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8494 
    8495  // 3. Prepare for next iteration.
    8496  lastOffset = suballoc.offset + suballoc.size;
    8497  --nextAlloc2ndIndex;
    8498  }
    8499  // We are at the end.
    8500  else
    8501  {
    8502  if(lastOffset < size)
    8503  {
    8504  // There is free space from lastOffset to size.
    8505  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8506  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8507  }
    8508 
    8509  // End of loop.
    8510  lastOffset = size;
    8511  }
    8512  }
    8513  }
    8514 
    8515  PrintDetailedMap_End(json);
    8516 }
    8517 #endif // #if VMA_STATS_STRING_ENABLED
    8518 
    8519 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8520  uint32_t currentFrameIndex,
    8521  uint32_t frameInUseCount,
    8522  VkDeviceSize bufferImageGranularity,
    8523  VkDeviceSize allocSize,
    8524  VkDeviceSize allocAlignment,
    8525  bool upperAddress,
    8526  VmaSuballocationType allocType,
    8527  bool canMakeOtherLost,
    8528  uint32_t strategy,
    8529  VmaAllocationRequest* pAllocationRequest)
    8530 {
    8531  VMA_ASSERT(allocSize > 0);
    8532  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8533  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8534  VMA_HEAVY_ASSERT(Validate());
    8535 
    8536  const VkDeviceSize size = GetSize();
    8537  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8538  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8539 
    8540  if(upperAddress)
    8541  {
    8542  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8543  {
    8544  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8545  return false;
    8546  }
    8547 
    8548  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8549  if(allocSize > size)
    8550  {
    8551  return false;
    8552  }
    8553  VkDeviceSize resultBaseOffset = size - allocSize;
    8554  if(!suballocations2nd.empty())
    8555  {
    8556  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8557  resultBaseOffset = lastSuballoc.offset - allocSize;
    8558  if(allocSize > lastSuballoc.offset)
    8559  {
    8560  return false;
    8561  }
    8562  }
    8563 
    8564  // Start from offset equal to end of free space.
    8565  VkDeviceSize resultOffset = resultBaseOffset;
    8566 
    8567  // Apply VMA_DEBUG_MARGIN at the end.
    8568  if(VMA_DEBUG_MARGIN > 0)
    8569  {
    8570  if(resultOffset < VMA_DEBUG_MARGIN)
    8571  {
    8572  return false;
    8573  }
    8574  resultOffset -= VMA_DEBUG_MARGIN;
    8575  }
    8576 
    8577  // Apply alignment.
    8578  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8579 
    8580  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8581  // Make bigger alignment if necessary.
    8582  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8583  {
    8584  bool bufferImageGranularityConflict = false;
    8585  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8586  {
    8587  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8588  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8589  {
    8590  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8591  {
    8592  bufferImageGranularityConflict = true;
    8593  break;
    8594  }
    8595  }
    8596  else
    8597  // Already on previous page.
    8598  break;
    8599  }
    8600  if(bufferImageGranularityConflict)
    8601  {
    8602  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8603  }
    8604  }
    8605 
    8606  // There is enough free space.
    8607  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8608  suballocations1st.back().offset + suballocations1st.back().size :
    8609  0;
    8610  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8611  {
    8612  // Check previous suballocations for BufferImageGranularity conflicts.
    8613  // If conflict exists, allocation cannot be made here.
    8614  if(bufferImageGranularity > 1)
    8615  {
    8616  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8617  {
    8618  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8619  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8620  {
    8621  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8622  {
    8623  return false;
    8624  }
    8625  }
    8626  else
    8627  {
    8628  // Already on next page.
    8629  break;
    8630  }
    8631  }
    8632  }
    8633 
    8634  // All tests passed: Success.
    8635  pAllocationRequest->offset = resultOffset;
    8636  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8637  pAllocationRequest->sumItemSize = 0;
    8638  // pAllocationRequest->item unused.
    8639  pAllocationRequest->itemsToMakeLostCount = 0;
    8640  return true;
    8641  }
    8642  }
    8643  else // !upperAddress
    8644  {
    8645  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8646  {
    8647  // Try to allocate at the end of 1st vector.
    8648 
    8649  VkDeviceSize resultBaseOffset = 0;
    8650  if(!suballocations1st.empty())
    8651  {
    8652  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8653  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8654  }
    8655 
    8656  // Start from offset equal to beginning of free space.
    8657  VkDeviceSize resultOffset = resultBaseOffset;
    8658 
    8659  // Apply VMA_DEBUG_MARGIN at the beginning.
    8660  if(VMA_DEBUG_MARGIN > 0)
    8661  {
    8662  resultOffset += VMA_DEBUG_MARGIN;
    8663  }
    8664 
    8665  // Apply alignment.
    8666  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8667 
    8668  // Check previous suballocations for BufferImageGranularity conflicts.
    8669  // Make bigger alignment if necessary.
    8670  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8671  {
    8672  bool bufferImageGranularityConflict = false;
    8673  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8674  {
    8675  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8676  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8677  {
    8678  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8679  {
    8680  bufferImageGranularityConflict = true;
    8681  break;
    8682  }
    8683  }
    8684  else
    8685  // Already on previous page.
    8686  break;
    8687  }
    8688  if(bufferImageGranularityConflict)
    8689  {
    8690  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8691  }
    8692  }
    8693 
    8694  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8695  suballocations2nd.back().offset : size;
    8696 
    8697  // There is enough free space at the end after alignment.
    8698  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8699  {
    8700  // Check next suballocations for BufferImageGranularity conflicts.
    8701  // If conflict exists, allocation cannot be made here.
    8702  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8703  {
    8704  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8705  {
    8706  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8707  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8708  {
    8709  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8710  {
    8711  return false;
    8712  }
    8713  }
    8714  else
    8715  {
    8716  // Already on previous page.
    8717  break;
    8718  }
    8719  }
    8720  }
    8721 
    8722  // All tests passed: Success.
    8723  pAllocationRequest->offset = resultOffset;
    8724  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8725  pAllocationRequest->sumItemSize = 0;
    8726  // pAllocationRequest->item unused.
    8727  pAllocationRequest->itemsToMakeLostCount = 0;
    8728  return true;
    8729  }
    8730  }
    8731 
    8732  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8733  // beginning of 1st vector as the end of free space.
    8734  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8735  {
    8736  VMA_ASSERT(!suballocations1st.empty());
    8737 
    8738  VkDeviceSize resultBaseOffset = 0;
    8739  if(!suballocations2nd.empty())
    8740  {
    8741  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8742  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8743  }
    8744 
    8745  // Start from offset equal to beginning of free space.
    8746  VkDeviceSize resultOffset = resultBaseOffset;
    8747 
    8748  // Apply VMA_DEBUG_MARGIN at the beginning.
    8749  if(VMA_DEBUG_MARGIN > 0)
    8750  {
    8751  resultOffset += VMA_DEBUG_MARGIN;
    8752  }
    8753 
    8754  // Apply alignment.
    8755  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8756 
    8757  // Check previous suballocations for BufferImageGranularity conflicts.
    8758  // Make bigger alignment if necessary.
    8759  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8760  {
    8761  bool bufferImageGranularityConflict = false;
    8762  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8763  {
    8764  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8765  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8766  {
    8767  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8768  {
    8769  bufferImageGranularityConflict = true;
    8770  break;
    8771  }
    8772  }
    8773  else
    8774  // Already on previous page.
    8775  break;
    8776  }
    8777  if(bufferImageGranularityConflict)
    8778  {
    8779  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8780  }
    8781  }
    8782 
    8783  pAllocationRequest->itemsToMakeLostCount = 0;
    8784  pAllocationRequest->sumItemSize = 0;
    8785  size_t index1st = m_1stNullItemsBeginCount;
    8786 
    8787  if(canMakeOtherLost)
    8788  {
    8789  while(index1st < suballocations1st.size() &&
    8790  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8791  {
    8792  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8793  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8794  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8795  {
    8796  // No problem.
    8797  }
    8798  else
    8799  {
    8800  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8801  if(suballoc.hAllocation->CanBecomeLost() &&
    8802  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8803  {
    8804  ++pAllocationRequest->itemsToMakeLostCount;
    8805  pAllocationRequest->sumItemSize += suballoc.size;
    8806  }
    8807  else
    8808  {
    8809  return false;
    8810  }
    8811  }
    8812  ++index1st;
    8813  }
    8814 
    8815  // Check next suballocations for BufferImageGranularity conflicts.
    8816  // If conflict exists, we must mark more allocations lost or fail.
    8817  if(bufferImageGranularity > 1)
    8818  {
    8819  while(index1st < suballocations1st.size())
    8820  {
    8821  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8822  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8823  {
    8824  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8825  {
    8826  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8827  if(suballoc.hAllocation->CanBecomeLost() &&
    8828  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8829  {
    8830  ++pAllocationRequest->itemsToMakeLostCount;
    8831  pAllocationRequest->sumItemSize += suballoc.size;
    8832  }
    8833  else
    8834  {
    8835  return false;
    8836  }
    8837  }
    8838  }
    8839  else
    8840  {
    8841  // Already on next page.
    8842  break;
    8843  }
    8844  ++index1st;
    8845  }
    8846  }
    8847  }
    8848 
    8849  // There is enough free space at the end after alignment.
    8850  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8851  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8852  {
    8853  // Check next suballocations for BufferImageGranularity conflicts.
    8854  // If conflict exists, allocation cannot be made here.
    8855  if(bufferImageGranularity > 1)
    8856  {
    8857  for(size_t nextSuballocIndex = index1st;
    8858  nextSuballocIndex < suballocations1st.size();
    8859  nextSuballocIndex++)
    8860  {
    8861  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8862  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8863  {
    8864  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8865  {
    8866  return false;
    8867  }
    8868  }
    8869  else
    8870  {
    8871  // Already on next page.
    8872  break;
    8873  }
    8874  }
    8875  }
    8876 
    8877  // All tests passed: Success.
    8878  pAllocationRequest->offset = resultOffset;
    8879  pAllocationRequest->sumFreeSize =
    8880  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8881  - resultBaseOffset
    8882  - pAllocationRequest->sumItemSize;
    8883  // pAllocationRequest->item unused.
    8884  return true;
    8885  }
    8886  }
    8887  }
    8888 
    8889  return false;
    8890 }
    8891 
    8892 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8893  uint32_t currentFrameIndex,
    8894  uint32_t frameInUseCount,
    8895  VmaAllocationRequest* pAllocationRequest)
    8896 {
    8897  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8898  {
    8899  return true;
    8900  }
    8901 
    8902  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8903 
    8904  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8905  size_t index1st = m_1stNullItemsBeginCount;
    8906  size_t madeLostCount = 0;
    8907  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8908  {
    8909  VMA_ASSERT(index1st < suballocations1st.size());
    8910  VmaSuballocation& suballoc = suballocations1st[index1st];
    8911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8912  {
    8913  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8914  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8915  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8916  {
    8917  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8918  suballoc.hAllocation = VK_NULL_HANDLE;
    8919  m_SumFreeSize += suballoc.size;
    8920  ++m_1stNullItemsMiddleCount;
    8921  ++madeLostCount;
    8922  }
    8923  else
    8924  {
    8925  return false;
    8926  }
    8927  }
    8928  ++index1st;
    8929  }
    8930 
    8931  CleanupAfterFree();
    8932  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8933 
    8934  return true;
    8935 }
    8936 
    8937 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8938 {
    8939  uint32_t lostAllocationCount = 0;
    8940 
    8941  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8942  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8943  {
    8944  VmaSuballocation& suballoc = suballocations1st[i];
    8945  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8946  suballoc.hAllocation->CanBecomeLost() &&
    8947  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8948  {
    8949  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8950  suballoc.hAllocation = VK_NULL_HANDLE;
    8951  ++m_1stNullItemsMiddleCount;
    8952  m_SumFreeSize += suballoc.size;
    8953  ++lostAllocationCount;
    8954  }
    8955  }
    8956 
    8957  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8958  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8959  {
    8960  VmaSuballocation& suballoc = suballocations2nd[i];
    8961  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8962  suballoc.hAllocation->CanBecomeLost() &&
    8963  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8964  {
    8965  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8966  suballoc.hAllocation = VK_NULL_HANDLE;
    8967  ++m_2ndNullItemsCount;
    8968  ++lostAllocationCount;
    8969  }
    8970  }
    8971 
    8972  if(lostAllocationCount)
    8973  {
    8974  CleanupAfterFree();
    8975  }
    8976 
    8977  return lostAllocationCount;
    8978 }
    8979 
    8980 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8981 {
    8982  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8983  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8984  {
    8985  const VmaSuballocation& suballoc = suballocations1st[i];
    8986  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8987  {
    8988  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8989  {
    8990  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8991  return VK_ERROR_VALIDATION_FAILED_EXT;
    8992  }
    8993  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8994  {
    8995  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8996  return VK_ERROR_VALIDATION_FAILED_EXT;
    8997  }
    8998  }
    8999  }
    9000 
    9001  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9002  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9003  {
    9004  const VmaSuballocation& suballoc = suballocations2nd[i];
    9005  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9006  {
    9007  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9008  {
    9009  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9010  return VK_ERROR_VALIDATION_FAILED_EXT;
    9011  }
    9012  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9013  {
    9014  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9015  return VK_ERROR_VALIDATION_FAILED_EXT;
    9016  }
    9017  }
    9018  }
    9019 
    9020  return VK_SUCCESS;
    9021 }
    9022 
    9023 void VmaBlockMetadata_Linear::Alloc(
    9024  const VmaAllocationRequest& request,
    9025  VmaSuballocationType type,
    9026  VkDeviceSize allocSize,
    9027  bool upperAddress,
    9028  VmaAllocation hAllocation)
    9029 {
    9030  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9031 
    9032  if(upperAddress)
    9033  {
    9034  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9035  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9036  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9037  suballocations2nd.push_back(newSuballoc);
    9038  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9039  }
    9040  else
    9041  {
    9042  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9043 
    9044  // First allocation.
    9045  if(suballocations1st.empty())
    9046  {
    9047  suballocations1st.push_back(newSuballoc);
    9048  }
    9049  else
    9050  {
    9051  // New allocation at the end of 1st vector.
    9052  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9053  {
    9054  // Check if it fits before the end of the block.
    9055  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9056  suballocations1st.push_back(newSuballoc);
    9057  }
    9058  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9059  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9060  {
    9061  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9062 
    9063  switch(m_2ndVectorMode)
    9064  {
    9065  case SECOND_VECTOR_EMPTY:
    9066  // First allocation from second part ring buffer.
    9067  VMA_ASSERT(suballocations2nd.empty());
    9068  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9069  break;
    9070  case SECOND_VECTOR_RING_BUFFER:
    9071  // 2-part ring buffer is already started.
    9072  VMA_ASSERT(!suballocations2nd.empty());
    9073  break;
    9074  case SECOND_VECTOR_DOUBLE_STACK:
    9075  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9076  break;
    9077  default:
    9078  VMA_ASSERT(0);
    9079  }
    9080 
    9081  suballocations2nd.push_back(newSuballoc);
    9082  }
    9083  else
    9084  {
    9085  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9086  }
    9087  }
    9088  }
    9089 
    9090  m_SumFreeSize -= newSuballoc.size;
    9091 }
    9092 
    9093 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9094 {
    9095  FreeAtOffset(allocation->GetOffset());
    9096 }
    9097 
    9098 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9099 {
    9100  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9101  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9102 
    9103  if(!suballocations1st.empty())
    9104  {
    9105  // First allocation: Mark it as next empty at the beginning.
    9106  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9107  if(firstSuballoc.offset == offset)
    9108  {
    9109  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9110  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9111  m_SumFreeSize += firstSuballoc.size;
    9112  ++m_1stNullItemsBeginCount;
    9113  CleanupAfterFree();
    9114  return;
    9115  }
    9116  }
    9117 
    9118  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9119  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9120  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9121  {
    9122  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9123  if(lastSuballoc.offset == offset)
    9124  {
    9125  m_SumFreeSize += lastSuballoc.size;
    9126  suballocations2nd.pop_back();
    9127  CleanupAfterFree();
    9128  return;
    9129  }
    9130  }
    9131  // Last allocation in 1st vector.
    9132  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9133  {
    9134  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9135  if(lastSuballoc.offset == offset)
    9136  {
    9137  m_SumFreeSize += lastSuballoc.size;
    9138  suballocations1st.pop_back();
    9139  CleanupAfterFree();
    9140  return;
    9141  }
    9142  }
    9143 
    9144  // Item from the middle of 1st vector.
    9145  {
    9146  VmaSuballocation refSuballoc;
    9147  refSuballoc.offset = offset;
    9148  // Rest of members stays uninitialized intentionally for better performance.
    9149  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9150  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9151  suballocations1st.end(),
    9152  refSuballoc);
    9153  if(it != suballocations1st.end())
    9154  {
    9155  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9156  it->hAllocation = VK_NULL_HANDLE;
    9157  ++m_1stNullItemsMiddleCount;
    9158  m_SumFreeSize += it->size;
    9159  CleanupAfterFree();
    9160  return;
    9161  }
    9162  }
    9163 
    9164  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9165  {
    9166  // Item from the middle of 2nd vector.
    9167  VmaSuballocation refSuballoc;
    9168  refSuballoc.offset = offset;
    9169  // Rest of members stays uninitialized intentionally for better performance.
    9170  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9171  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9172  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9173  if(it != suballocations2nd.end())
    9174  {
    9175  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9176  it->hAllocation = VK_NULL_HANDLE;
    9177  ++m_2ndNullItemsCount;
    9178  m_SumFreeSize += it->size;
    9179  CleanupAfterFree();
    9180  return;
    9181  }
    9182  }
    9183 
    9184  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9185 }
    9186 
    9187 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9188 {
    9189  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9190  const size_t suballocCount = AccessSuballocations1st().size();
    9191  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9192 }
    9193 
    9194 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9195 {
    9196  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9197  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9198 
    9199  if(IsEmpty())
    9200  {
    9201  suballocations1st.clear();
    9202  suballocations2nd.clear();
    9203  m_1stNullItemsBeginCount = 0;
    9204  m_1stNullItemsMiddleCount = 0;
    9205  m_2ndNullItemsCount = 0;
    9206  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9207  }
    9208  else
    9209  {
    9210  const size_t suballoc1stCount = suballocations1st.size();
    9211  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9212  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9213 
    9214  // Find more null items at the beginning of 1st vector.
    9215  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9216  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9217  {
    9218  ++m_1stNullItemsBeginCount;
    9219  --m_1stNullItemsMiddleCount;
    9220  }
    9221 
    9222  // Find more null items at the end of 1st vector.
    9223  while(m_1stNullItemsMiddleCount > 0 &&
    9224  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9225  {
    9226  --m_1stNullItemsMiddleCount;
    9227  suballocations1st.pop_back();
    9228  }
    9229 
    9230  // Find more null items at the end of 2nd vector.
    9231  while(m_2ndNullItemsCount > 0 &&
    9232  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9233  {
    9234  --m_2ndNullItemsCount;
    9235  suballocations2nd.pop_back();
    9236  }
    9237 
    9238  if(ShouldCompact1st())
    9239  {
    9240  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9241  size_t srcIndex = m_1stNullItemsBeginCount;
    9242  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9243  {
    9244  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9245  {
    9246  ++srcIndex;
    9247  }
    9248  if(dstIndex != srcIndex)
    9249  {
    9250  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9251  }
    9252  ++srcIndex;
    9253  }
    9254  suballocations1st.resize(nonNullItemCount);
    9255  m_1stNullItemsBeginCount = 0;
    9256  m_1stNullItemsMiddleCount = 0;
    9257  }
    9258 
    9259  // 2nd vector became empty.
    9260  if(suballocations2nd.empty())
    9261  {
    9262  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9263  }
    9264 
    9265  // 1st vector became empty.
    9266  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9267  {
    9268  suballocations1st.clear();
    9269  m_1stNullItemsBeginCount = 0;
    9270 
    9271  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9272  {
    9273  // Swap 1st with 2nd. Now 2nd is empty.
    9274  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9275  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9276  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9277  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9278  {
    9279  ++m_1stNullItemsBeginCount;
    9280  --m_1stNullItemsMiddleCount;
    9281  }
    9282  m_2ndNullItemsCount = 0;
    9283  m_1stVectorIndex ^= 1;
    9284  }
    9285  }
    9286  }
    9287 
    9288  VMA_HEAVY_ASSERT(Validate());
    9289 }
    9290 
    9291 
    9293 // class VmaBlockMetadata_Buddy
    9294 
    9295 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9296  VmaBlockMetadata(hAllocator),
    9297  m_Root(VMA_NULL),
    9298  m_AllocationCount(0),
    9299  m_FreeCount(1),
    9300  m_SumFreeSize(0)
    9301 {
    9302  memset(m_FreeList, 0, sizeof(m_FreeList));
    9303 }
    9304 
    9305 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9306 {
    9307  DeleteNode(m_Root);
    9308 }
    9309 
    9310 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9311 {
    9312  VmaBlockMetadata::Init(size);
    9313 
    9314  m_UsableSize = VmaPrevPow2(size);
    9315  m_SumFreeSize = m_UsableSize;
    9316 
    9317  // Calculate m_LevelCount.
    9318  m_LevelCount = 1;
    9319  while(m_LevelCount < MAX_LEVELS &&
    9320  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9321  {
    9322  ++m_LevelCount;
    9323  }
    9324 
    9325  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9326  rootNode->offset = 0;
    9327  rootNode->type = Node::TYPE_FREE;
    9328  rootNode->parent = VMA_NULL;
    9329  rootNode->buddy = VMA_NULL;
    9330 
    9331  m_Root = rootNode;
    9332  AddToFreeListFront(0, rootNode);
    9333 }
    9334 
    9335 bool VmaBlockMetadata_Buddy::Validate() const
    9336 {
    9337  // Validate tree.
    9338  ValidationContext ctx;
    9339  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9340  {
    9341  VMA_VALIDATE(false && "ValidateNode failed.");
    9342  }
    9343  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9344  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9345 
    9346  // Validate free node lists.
    9347  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9348  {
    9349  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9350  m_FreeList[level].front->free.prev == VMA_NULL);
    9351 
    9352  for(Node* node = m_FreeList[level].front;
    9353  node != VMA_NULL;
    9354  node = node->free.next)
    9355  {
    9356  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9357 
    9358  if(node->free.next == VMA_NULL)
    9359  {
    9360  VMA_VALIDATE(m_FreeList[level].back == node);
    9361  }
    9362  else
    9363  {
    9364  VMA_VALIDATE(node->free.next->free.prev == node);
    9365  }
    9366  }
    9367  }
    9368 
    9369  // Validate that free lists ar higher levels are empty.
    9370  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9371  {
    9372  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9373  }
    9374 
    9375  return true;
    9376 }
    9377 
    9378 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9379 {
    9380  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9381  {
    9382  if(m_FreeList[level].front != VMA_NULL)
    9383  {
    9384  return LevelToNodeSize(level);
    9385  }
    9386  }
    9387  return 0;
    9388 }
    9389 
    9390 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9391 {
    9392  const VkDeviceSize unusableSize = GetUnusableSize();
    9393 
    9394  outInfo.blockCount = 1;
    9395 
    9396  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9397  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9398 
    9399  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9400  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9401  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9402 
    9403  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9404 
    9405  if(unusableSize > 0)
    9406  {
    9407  ++outInfo.unusedRangeCount;
    9408  outInfo.unusedBytes += unusableSize;
    9409  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9410  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9411  }
    9412 }
    9413 
    9414 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9415 {
    9416  const VkDeviceSize unusableSize = GetUnusableSize();
    9417 
    9418  inoutStats.size += GetSize();
    9419  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9420  inoutStats.allocationCount += m_AllocationCount;
    9421  inoutStats.unusedRangeCount += m_FreeCount;
    9422  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9423 
    9424  if(unusableSize > 0)
    9425  {
    9426  ++inoutStats.unusedRangeCount;
    9427  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9428  }
    9429 }
    9430 
    9431 #if VMA_STATS_STRING_ENABLED
    9432 
    9433 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9434 {
    9435  // TODO optimize
    9436  VmaStatInfo stat;
    9437  CalcAllocationStatInfo(stat);
    9438 
    9439  PrintDetailedMap_Begin(
    9440  json,
    9441  stat.unusedBytes,
    9442  stat.allocationCount,
    9443  stat.unusedRangeCount);
    9444 
    9445  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9446 
    9447  const VkDeviceSize unusableSize = GetUnusableSize();
    9448  if(unusableSize > 0)
    9449  {
    9450  PrintDetailedMap_UnusedRange(json,
    9451  m_UsableSize, // offset
    9452  unusableSize); // size
    9453  }
    9454 
    9455  PrintDetailedMap_End(json);
    9456 }
    9457 
    9458 #endif // #if VMA_STATS_STRING_ENABLED
    9459 
    9460 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9461  uint32_t currentFrameIndex,
    9462  uint32_t frameInUseCount,
    9463  VkDeviceSize bufferImageGranularity,
    9464  VkDeviceSize allocSize,
    9465  VkDeviceSize allocAlignment,
    9466  bool upperAddress,
    9467  VmaSuballocationType allocType,
    9468  bool canMakeOtherLost,
    9469  uint32_t strategy,
    9470  VmaAllocationRequest* pAllocationRequest)
    9471 {
    9472  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9473 
    9474  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9475  // Whenever it might be an OPTIMAL image...
    9476  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9477  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9478  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9479  {
    9480  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9481  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9482  }
    9483 
    9484  if(allocSize > m_UsableSize)
    9485  {
    9486  return false;
    9487  }
    9488 
    9489  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9490  for(uint32_t level = targetLevel + 1; level--; )
    9491  {
    9492  for(Node* freeNode = m_FreeList[level].front;
    9493  freeNode != VMA_NULL;
    9494  freeNode = freeNode->free.next)
    9495  {
    9496  if(freeNode->offset % allocAlignment == 0)
    9497  {
    9498  pAllocationRequest->offset = freeNode->offset;
    9499  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9500  pAllocationRequest->sumItemSize = 0;
    9501  pAllocationRequest->itemsToMakeLostCount = 0;
    9502  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9503  return true;
    9504  }
    9505  }
    9506  }
    9507 
    9508  return false;
    9509 }
    9510 
    9511 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9512  uint32_t currentFrameIndex,
    9513  uint32_t frameInUseCount,
    9514  VmaAllocationRequest* pAllocationRequest)
    9515 {
    9516  /*
    9517  Lost allocations are not supported in buddy allocator at the moment.
    9518  Support might be added in the future.
    9519  */
    9520  return pAllocationRequest->itemsToMakeLostCount == 0;
    9521 }
    9522 
    9523 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9524 {
    9525  /*
    9526  Lost allocations are not supported in buddy allocator at the moment.
    9527  Support might be added in the future.
    9528  */
    9529  return 0;
    9530 }
    9531 
    9532 void VmaBlockMetadata_Buddy::Alloc(
    9533  const VmaAllocationRequest& request,
    9534  VmaSuballocationType type,
    9535  VkDeviceSize allocSize,
    9536  bool upperAddress,
    9537  VmaAllocation hAllocation)
    9538 {
    9539  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9540  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9541 
    9542  Node* currNode = m_FreeList[currLevel].front;
    9543  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9544  while(currNode->offset != request.offset)
    9545  {
    9546  currNode = currNode->free.next;
    9547  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9548  }
    9549 
    9550  // Go down, splitting free nodes.
    9551  while(currLevel < targetLevel)
    9552  {
    9553  // currNode is already first free node at currLevel.
    9554  // Remove it from list of free nodes at this currLevel.
    9555  RemoveFromFreeList(currLevel, currNode);
    9556 
    9557  const uint32_t childrenLevel = currLevel + 1;
    9558 
    9559  // Create two free sub-nodes.
    9560  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9561  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9562 
    9563  leftChild->offset = currNode->offset;
    9564  leftChild->type = Node::TYPE_FREE;
    9565  leftChild->parent = currNode;
    9566  leftChild->buddy = rightChild;
    9567 
    9568  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9569  rightChild->type = Node::TYPE_FREE;
    9570  rightChild->parent = currNode;
    9571  rightChild->buddy = leftChild;
    9572 
    9573  // Convert current currNode to split type.
    9574  currNode->type = Node::TYPE_SPLIT;
    9575  currNode->split.leftChild = leftChild;
    9576 
    9577  // Add child nodes to free list. Order is important!
    9578  AddToFreeListFront(childrenLevel, rightChild);
    9579  AddToFreeListFront(childrenLevel, leftChild);
    9580 
    9581  ++m_FreeCount;
    9582  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9583  ++currLevel;
    9584  currNode = m_FreeList[currLevel].front;
    9585 
    9586  /*
    9587  We can be sure that currNode, as left child of node previously split,
    9588  also fullfills the alignment requirement.
    9589  */
    9590  }
    9591 
    9592  // Remove from free list.
    9593  VMA_ASSERT(currLevel == targetLevel &&
    9594  currNode != VMA_NULL &&
    9595  currNode->type == Node::TYPE_FREE);
    9596  RemoveFromFreeList(currLevel, currNode);
    9597 
    9598  // Convert to allocation node.
    9599  currNode->type = Node::TYPE_ALLOCATION;
    9600  currNode->allocation.alloc = hAllocation;
    9601 
    9602  ++m_AllocationCount;
    9603  --m_FreeCount;
    9604  m_SumFreeSize -= allocSize;
    9605 }
    9606 
    9607 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9608 {
    9609  if(node->type == Node::TYPE_SPLIT)
    9610  {
    9611  DeleteNode(node->split.leftChild->buddy);
    9612  DeleteNode(node->split.leftChild);
    9613  }
    9614 
    9615  vma_delete(GetAllocationCallbacks(), node);
    9616 }
    9617 
    9618 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9619 {
    9620  VMA_VALIDATE(level < m_LevelCount);
    9621  VMA_VALIDATE(curr->parent == parent);
    9622  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9623  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9624  switch(curr->type)
    9625  {
    9626  case Node::TYPE_FREE:
    9627  // curr->free.prev, next are validated separately.
    9628  ctx.calculatedSumFreeSize += levelNodeSize;
    9629  ++ctx.calculatedFreeCount;
    9630  break;
    9631  case Node::TYPE_ALLOCATION:
    9632  ++ctx.calculatedAllocationCount;
    9633  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9634  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9635  break;
    9636  case Node::TYPE_SPLIT:
    9637  {
    9638  const uint32_t childrenLevel = level + 1;
    9639  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9640  const Node* const leftChild = curr->split.leftChild;
    9641  VMA_VALIDATE(leftChild != VMA_NULL);
    9642  VMA_VALIDATE(leftChild->offset == curr->offset);
    9643  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9644  {
    9645  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9646  }
    9647  const Node* const rightChild = leftChild->buddy;
    9648  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9649  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9650  {
    9651  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9652  }
    9653  }
    9654  break;
    9655  default:
    9656  return false;
    9657  }
    9658 
    9659  return true;
    9660 }
    9661 
    9662 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9663 {
    9664  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9665  uint32_t level = 0;
    9666  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9667  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9668  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9669  {
    9670  ++level;
    9671  currLevelNodeSize = nextLevelNodeSize;
    9672  nextLevelNodeSize = currLevelNodeSize >> 1;
    9673  }
    9674  return level;
    9675 }
    9676 
    9677 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9678 {
    9679  // Find node and level.
    9680  Node* node = m_Root;
    9681  VkDeviceSize nodeOffset = 0;
    9682  uint32_t level = 0;
    9683  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9684  while(node->type == Node::TYPE_SPLIT)
    9685  {
    9686  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9687  if(offset < nodeOffset + nextLevelSize)
    9688  {
    9689  node = node->split.leftChild;
    9690  }
    9691  else
    9692  {
    9693  node = node->split.leftChild->buddy;
    9694  nodeOffset += nextLevelSize;
    9695  }
    9696  ++level;
    9697  levelNodeSize = nextLevelSize;
    9698  }
    9699 
    9700  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9701  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9702 
    9703  ++m_FreeCount;
    9704  --m_AllocationCount;
    9705  m_SumFreeSize += alloc->GetSize();
    9706 
    9707  node->type = Node::TYPE_FREE;
    9708 
    9709  // Join free nodes if possible.
    9710  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9711  {
    9712  RemoveFromFreeList(level, node->buddy);
    9713  Node* const parent = node->parent;
    9714 
    9715  vma_delete(GetAllocationCallbacks(), node->buddy);
    9716  vma_delete(GetAllocationCallbacks(), node);
    9717  parent->type = Node::TYPE_FREE;
    9718 
    9719  node = parent;
    9720  --level;
    9721  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9722  --m_FreeCount;
    9723  }
    9724 
    9725  AddToFreeListFront(level, node);
    9726 }
    9727 
    9728 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9729 {
    9730  switch(node->type)
    9731  {
    9732  case Node::TYPE_FREE:
    9733  ++outInfo.unusedRangeCount;
    9734  outInfo.unusedBytes += levelNodeSize;
    9735  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9736  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9737  break;
    9738  case Node::TYPE_ALLOCATION:
    9739  {
    9740  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9741  ++outInfo.allocationCount;
    9742  outInfo.usedBytes += allocSize;
    9743  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9744  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9745 
    9746  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9747  if(unusedRangeSize > 0)
    9748  {
    9749  ++outInfo.unusedRangeCount;
    9750  outInfo.unusedBytes += unusedRangeSize;
    9751  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9752  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9753  }
    9754  }
    9755  break;
    9756  case Node::TYPE_SPLIT:
    9757  {
    9758  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9759  const Node* const leftChild = node->split.leftChild;
    9760  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9761  const Node* const rightChild = leftChild->buddy;
    9762  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9763  }
    9764  break;
    9765  default:
    9766  VMA_ASSERT(0);
    9767  }
    9768 }
    9769 
    9770 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9771 {
    9772  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9773 
    9774  // List is empty.
    9775  Node* const frontNode = m_FreeList[level].front;
    9776  if(frontNode == VMA_NULL)
    9777  {
    9778  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9779  node->free.prev = node->free.next = VMA_NULL;
    9780  m_FreeList[level].front = m_FreeList[level].back = node;
    9781  }
    9782  else
    9783  {
    9784  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9785  node->free.prev = VMA_NULL;
    9786  node->free.next = frontNode;
    9787  frontNode->free.prev = node;
    9788  m_FreeList[level].front = node;
    9789  }
    9790 }
    9791 
    9792 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9793 {
    9794  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9795 
    9796  // It is at the front.
    9797  if(node->free.prev == VMA_NULL)
    9798  {
    9799  VMA_ASSERT(m_FreeList[level].front == node);
    9800  m_FreeList[level].front = node->free.next;
    9801  }
    9802  else
    9803  {
    9804  Node* const prevFreeNode = node->free.prev;
    9805  VMA_ASSERT(prevFreeNode->free.next == node);
    9806  prevFreeNode->free.next = node->free.next;
    9807  }
    9808 
    9809  // It is at the back.
    9810  if(node->free.next == VMA_NULL)
    9811  {
    9812  VMA_ASSERT(m_FreeList[level].back == node);
    9813  m_FreeList[level].back = node->free.prev;
    9814  }
    9815  else
    9816  {
    9817  Node* const nextFreeNode = node->free.next;
    9818  VMA_ASSERT(nextFreeNode->free.prev == node);
    9819  nextFreeNode->free.prev = node->free.prev;
    9820  }
    9821 }
    9822 
    9823 #if VMA_STATS_STRING_ENABLED
    9824 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9825 {
    9826  switch(node->type)
    9827  {
    9828  case Node::TYPE_FREE:
    9829  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9830  break;
    9831  case Node::TYPE_ALLOCATION:
    9832  {
    9833  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9834  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9835  if(allocSize < levelNodeSize)
    9836  {
    9837  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9838  }
    9839  }
    9840  break;
    9841  case Node::TYPE_SPLIT:
    9842  {
    9843  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9844  const Node* const leftChild = node->split.leftChild;
    9845  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9846  const Node* const rightChild = leftChild->buddy;
    9847  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9848  }
    9849  break;
    9850  default:
    9851  VMA_ASSERT(0);
    9852  }
    9853 }
    9854 #endif // #if VMA_STATS_STRING_ENABLED
    9855 
    9856 
    9858 // class VmaDeviceMemoryBlock
    9859 
    9860 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9861  m_pMetadata(VMA_NULL),
    9862  m_MemoryTypeIndex(UINT32_MAX),
    9863  m_Id(0),
    9864  m_hMemory(VK_NULL_HANDLE),
    9865  m_MapCount(0),
    9866  m_pMappedData(VMA_NULL)
    9867 {
    9868 }
    9869 
    9870 void VmaDeviceMemoryBlock::Init(
    9871  VmaAllocator hAllocator,
    9872  uint32_t newMemoryTypeIndex,
    9873  VkDeviceMemory newMemory,
    9874  VkDeviceSize newSize,
    9875  uint32_t id,
    9876  uint32_t algorithm)
    9877 {
    9878  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9879 
    9880  m_MemoryTypeIndex = newMemoryTypeIndex;
    9881  m_Id = id;
    9882  m_hMemory = newMemory;
    9883 
    9884  switch(algorithm)
    9885  {
    9887  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9888  break;
    9890  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9891  break;
    9892  default:
    9893  VMA_ASSERT(0);
    9894  // Fall-through.
    9895  case 0:
    9896  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9897  }
    9898  m_pMetadata->Init(newSize);
    9899 }
    9900 
    9901 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9902 {
    9903  // This is the most important assert in the entire library.
    9904  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9905  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9906 
    9907  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9908  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9909  m_hMemory = VK_NULL_HANDLE;
    9910 
    9911  vma_delete(allocator, m_pMetadata);
    9912  m_pMetadata = VMA_NULL;
    9913 }
    9914 
    9915 bool VmaDeviceMemoryBlock::Validate() const
    9916 {
    9917  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9918  (m_pMetadata->GetSize() != 0));
    9919 
    9920  return m_pMetadata->Validate();
    9921 }
    9922 
    9923 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9924 {
    9925  void* pData = nullptr;
    9926  VkResult res = Map(hAllocator, 1, &pData);
    9927  if(res != VK_SUCCESS)
    9928  {
    9929  return res;
    9930  }
    9931 
    9932  res = m_pMetadata->CheckCorruption(pData);
    9933 
    9934  Unmap(hAllocator, 1);
    9935 
    9936  return res;
    9937 }
    9938 
    9939 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9940 {
    9941  if(count == 0)
    9942  {
    9943  return VK_SUCCESS;
    9944  }
    9945 
    9946  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9947  if(m_MapCount != 0)
    9948  {
    9949  m_MapCount += count;
    9950  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9951  if(ppData != VMA_NULL)
    9952  {
    9953  *ppData = m_pMappedData;
    9954  }
    9955  return VK_SUCCESS;
    9956  }
    9957  else
    9958  {
    9959  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9960  hAllocator->m_hDevice,
    9961  m_hMemory,
    9962  0, // offset
    9963  VK_WHOLE_SIZE,
    9964  0, // flags
    9965  &m_pMappedData);
    9966  if(result == VK_SUCCESS)
    9967  {
    9968  if(ppData != VMA_NULL)
    9969  {
    9970  *ppData = m_pMappedData;
    9971  }
    9972  m_MapCount = count;
    9973  }
    9974  return result;
    9975  }
    9976 }
    9977 
    9978 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9979 {
    9980  if(count == 0)
    9981  {
    9982  return;
    9983  }
    9984 
    9985  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9986  if(m_MapCount >= count)
    9987  {
    9988  m_MapCount -= count;
    9989  if(m_MapCount == 0)
    9990  {
    9991  m_pMappedData = VMA_NULL;
    9992  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9993  }
    9994  }
    9995  else
    9996  {
    9997  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    9998  }
    9999 }
    10000 
    10001 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10002 {
    10003  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10004  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10005 
    10006  void* pData;
    10007  VkResult res = Map(hAllocator, 1, &pData);
    10008  if(res != VK_SUCCESS)
    10009  {
    10010  return res;
    10011  }
    10012 
    10013  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10014  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10015 
    10016  Unmap(hAllocator, 1);
    10017 
    10018  return VK_SUCCESS;
    10019 }
    10020 
    10021 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10022 {
    10023  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10024  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10025 
    10026  void* pData;
    10027  VkResult res = Map(hAllocator, 1, &pData);
    10028  if(res != VK_SUCCESS)
    10029  {
    10030  return res;
    10031  }
    10032 
    10033  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10034  {
    10035  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10036  }
    10037  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10038  {
    10039  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10040  }
    10041 
    10042  Unmap(hAllocator, 1);
    10043 
    10044  return VK_SUCCESS;
    10045 }
    10046 
    10047 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10048  const VmaAllocator hAllocator,
    10049  const VmaAllocation hAllocation,
    10050  VkBuffer hBuffer)
    10051 {
    10052  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10053  hAllocation->GetBlock() == this);
    10054  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10055  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10056  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10057  hAllocator->m_hDevice,
    10058  hBuffer,
    10059  m_hMemory,
    10060  hAllocation->GetOffset());
    10061 }
    10062 
    10063 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10064  const VmaAllocator hAllocator,
    10065  const VmaAllocation hAllocation,
    10066  VkImage hImage)
    10067 {
    10068  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10069  hAllocation->GetBlock() == this);
    10070  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10071  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10072  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10073  hAllocator->m_hDevice,
    10074  hImage,
    10075  m_hMemory,
    10076  hAllocation->GetOffset());
    10077 }
    10078 
    10079 static void InitStatInfo(VmaStatInfo& outInfo)
    10080 {
    10081  memset(&outInfo, 0, sizeof(outInfo));
    10082  outInfo.allocationSizeMin = UINT64_MAX;
    10083  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10084 }
    10085 
    10086 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10087 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10088 {
    10089  inoutInfo.blockCount += srcInfo.blockCount;
    10090  inoutInfo.allocationCount += srcInfo.allocationCount;
    10091  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10092  inoutInfo.usedBytes += srcInfo.usedBytes;
    10093  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10094  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10095  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10096  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10097  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10098 }
    10099 
    10100 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10101 {
    10102  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10103  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10104  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10105  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10106 }
    10107 
    10108 VmaPool_T::VmaPool_T(
    10109  VmaAllocator hAllocator,
    10110  const VmaPoolCreateInfo& createInfo,
    10111  VkDeviceSize preferredBlockSize) :
    10112  m_BlockVector(
    10113  hAllocator,
    10114  createInfo.memoryTypeIndex,
    10115  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10116  createInfo.minBlockCount,
    10117  createInfo.maxBlockCount,
    10118  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10119  createInfo.frameInUseCount,
    10120  true, // isCustomPool
    10121  createInfo.blockSize != 0, // explicitBlockSize
    10122  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10123  m_Id(0)
    10124 {
    10125 }
    10126 
    10127 VmaPool_T::~VmaPool_T()
    10128 {
    10129 }
    10130 
    10131 #if VMA_STATS_STRING_ENABLED
    10132 
    10133 #endif // #if VMA_STATS_STRING_ENABLED
    10134 
    10135 VmaBlockVector::VmaBlockVector(
    10136  VmaAllocator hAllocator,
    10137  uint32_t memoryTypeIndex,
    10138  VkDeviceSize preferredBlockSize,
    10139  size_t minBlockCount,
    10140  size_t maxBlockCount,
    10141  VkDeviceSize bufferImageGranularity,
    10142  uint32_t frameInUseCount,
    10143  bool isCustomPool,
    10144  bool explicitBlockSize,
    10145  uint32_t algorithm) :
    10146  m_hAllocator(hAllocator),
    10147  m_MemoryTypeIndex(memoryTypeIndex),
    10148  m_PreferredBlockSize(preferredBlockSize),
    10149  m_MinBlockCount(minBlockCount),
    10150  m_MaxBlockCount(maxBlockCount),
    10151  m_BufferImageGranularity(bufferImageGranularity),
    10152  m_FrameInUseCount(frameInUseCount),
    10153  m_IsCustomPool(isCustomPool),
    10154  m_ExplicitBlockSize(explicitBlockSize),
    10155  m_Algorithm(algorithm),
    10156  m_HasEmptyBlock(false),
    10157  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10158  m_pDefragmentator(VMA_NULL),
    10159  m_NextBlockId(0)
    10160 {
    10161 }
    10162 
    10163 VmaBlockVector::~VmaBlockVector()
    10164 {
    10165  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10166 
    10167  for(size_t i = m_Blocks.size(); i--; )
    10168  {
    10169  m_Blocks[i]->Destroy(m_hAllocator);
    10170  vma_delete(m_hAllocator, m_Blocks[i]);
    10171  }
    10172 }
    10173 
    10174 VkResult VmaBlockVector::CreateMinBlocks()
    10175 {
    10176  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10177  {
    10178  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10179  if(res != VK_SUCCESS)
    10180  {
    10181  return res;
    10182  }
    10183  }
    10184  return VK_SUCCESS;
    10185 }
    10186 
    10187 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10188 {
    10189  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10190 
    10191  const size_t blockCount = m_Blocks.size();
    10192 
    10193  pStats->size = 0;
    10194  pStats->unusedSize = 0;
    10195  pStats->allocationCount = 0;
    10196  pStats->unusedRangeCount = 0;
    10197  pStats->unusedRangeSizeMax = 0;
    10198  pStats->blockCount = blockCount;
    10199 
    10200  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10201  {
    10202  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10203  VMA_ASSERT(pBlock);
    10204  VMA_HEAVY_ASSERT(pBlock->Validate());
    10205  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10206  }
    10207 }
    10208 
    10209 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10210 {
    10211  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10212  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10213  (VMA_DEBUG_MARGIN > 0) &&
    10214  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10215 }
    10216 
    10217 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10218 
    10219 VkResult VmaBlockVector::Allocate(
    10220  VmaPool hCurrentPool,
    10221  uint32_t currentFrameIndex,
    10222  VkDeviceSize size,
    10223  VkDeviceSize alignment,
    10224  const VmaAllocationCreateInfo& createInfo,
    10225  VmaSuballocationType suballocType,
    10226  VmaAllocation* pAllocation)
    10227 {
    10228  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10229  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10230  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10231  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10232  const bool canCreateNewBlock =
    10233  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10234  (m_Blocks.size() < m_MaxBlockCount);
    10235  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10236 
    10237  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10238  // Which in turn is available only when maxBlockCount = 1.
    10239  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10240  {
    10241  canMakeOtherLost = false;
    10242  }
    10243 
    10244  // Upper address can only be used with linear allocator and within single memory block.
    10245  if(isUpperAddress &&
    10246  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10247  {
    10248  return VK_ERROR_FEATURE_NOT_PRESENT;
    10249  }
    10250 
    10251  // Validate strategy.
    10252  switch(strategy)
    10253  {
    10254  case 0:
    10256  break;
    10260  break;
    10261  default:
    10262  return VK_ERROR_FEATURE_NOT_PRESENT;
    10263  }
    10264 
    10265  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10266  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10267  {
    10268  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10269  }
    10270 
    10271  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10272 
    10273  /*
    10274  Under certain condition, this whole section can be skipped for optimization, so
    10275  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10276  e.g. for custom pools with linear algorithm.
    10277  */
    10278  if(!canMakeOtherLost || canCreateNewBlock)
    10279  {
    10280  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10281  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10283 
    10284  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10285  {
    10286  // Use only last block.
    10287  if(!m_Blocks.empty())
    10288  {
    10289  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10290  VMA_ASSERT(pCurrBlock);
    10291  VkResult res = AllocateFromBlock(
    10292  pCurrBlock,
    10293  hCurrentPool,
    10294  currentFrameIndex,
    10295  size,
    10296  alignment,
    10297  allocFlagsCopy,
    10298  createInfo.pUserData,
    10299  suballocType,
    10300  strategy,
    10301  pAllocation);
    10302  if(res == VK_SUCCESS)
    10303  {
    10304  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10305  return VK_SUCCESS;
    10306  }
    10307  }
    10308  }
    10309  else
    10310  {
    10312  {
    10313  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10314  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10315  {
    10316  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10317  VMA_ASSERT(pCurrBlock);
    10318  VkResult res = AllocateFromBlock(
    10319  pCurrBlock,
    10320  hCurrentPool,
    10321  currentFrameIndex,
    10322  size,
    10323  alignment,
    10324  allocFlagsCopy,
    10325  createInfo.pUserData,
    10326  suballocType,
    10327  strategy,
    10328  pAllocation);
    10329  if(res == VK_SUCCESS)
    10330  {
    10331  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10332  return VK_SUCCESS;
    10333  }
    10334  }
    10335  }
    10336  else // WORST_FIT, FIRST_FIT
    10337  {
    10338  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10339  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10340  {
    10341  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10342  VMA_ASSERT(pCurrBlock);
    10343  VkResult res = AllocateFromBlock(
    10344  pCurrBlock,
    10345  hCurrentPool,
    10346  currentFrameIndex,
    10347  size,
    10348  alignment,
    10349  allocFlagsCopy,
    10350  createInfo.pUserData,
    10351  suballocType,
    10352  strategy,
    10353  pAllocation);
    10354  if(res == VK_SUCCESS)
    10355  {
    10356  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10357  return VK_SUCCESS;
    10358  }
    10359  }
    10360  }
    10361  }
    10362 
    10363  // 2. Try to create new block.
    10364  if(canCreateNewBlock)
    10365  {
    10366  // Calculate optimal size for new block.
    10367  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10368  uint32_t newBlockSizeShift = 0;
    10369  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10370 
    10371  if(!m_ExplicitBlockSize)
    10372  {
    10373  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10374  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10375  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10376  {
    10377  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10378  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10379  {
    10380  newBlockSize = smallerNewBlockSize;
    10381  ++newBlockSizeShift;
    10382  }
    10383  else
    10384  {
    10385  break;
    10386  }
    10387  }
    10388  }
    10389 
    10390  size_t newBlockIndex = 0;
    10391  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10392  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10393  if(!m_ExplicitBlockSize)
    10394  {
    10395  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10396  {
    10397  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10398  if(smallerNewBlockSize >= size)
    10399  {
    10400  newBlockSize = smallerNewBlockSize;
    10401  ++newBlockSizeShift;
    10402  res = CreateBlock(newBlockSize, &newBlockIndex);
    10403  }
    10404  else
    10405  {
    10406  break;
    10407  }
    10408  }
    10409  }
    10410 
    10411  if(res == VK_SUCCESS)
    10412  {
    10413  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10414  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10415 
    10416  res = AllocateFromBlock(
    10417  pBlock,
    10418  hCurrentPool,
    10419  currentFrameIndex,
    10420  size,
    10421  alignment,
    10422  allocFlagsCopy,
    10423  createInfo.pUserData,
    10424  suballocType,
    10425  strategy,
    10426  pAllocation);
    10427  if(res == VK_SUCCESS)
    10428  {
    10429  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10430  return VK_SUCCESS;
    10431  }
    10432  else
    10433  {
    10434  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10435  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10436  }
    10437  }
    10438  }
    10439  }
    10440 
    10441  // 3. Try to allocate from existing blocks with making other allocations lost.
    10442  if(canMakeOtherLost)
    10443  {
    10444  uint32_t tryIndex = 0;
    10445  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10446  {
    10447  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10448  VmaAllocationRequest bestRequest = {};
    10449  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10450 
    10451  // 1. Search existing allocations.
    10453  {
    10454  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10455  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10456  {
    10457  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10458  VMA_ASSERT(pCurrBlock);
    10459  VmaAllocationRequest currRequest = {};
    10460  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10461  currentFrameIndex,
    10462  m_FrameInUseCount,
    10463  m_BufferImageGranularity,
    10464  size,
    10465  alignment,
    10466  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10467  suballocType,
    10468  canMakeOtherLost,
    10469  strategy,
    10470  &currRequest))
    10471  {
    10472  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10473  if(pBestRequestBlock == VMA_NULL ||
    10474  currRequestCost < bestRequestCost)
    10475  {
    10476  pBestRequestBlock = pCurrBlock;
    10477  bestRequest = currRequest;
    10478  bestRequestCost = currRequestCost;
    10479 
    10480  if(bestRequestCost == 0)
    10481  {
    10482  break;
    10483  }
    10484  }
    10485  }
    10486  }
    10487  }
    10488  else // WORST_FIT, FIRST_FIT
    10489  {
    10490  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10491  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10492  {
    10493  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10494  VMA_ASSERT(pCurrBlock);
    10495  VmaAllocationRequest currRequest = {};
    10496  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10497  currentFrameIndex,
    10498  m_FrameInUseCount,
    10499  m_BufferImageGranularity,
    10500  size,
    10501  alignment,
    10502  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10503  suballocType,
    10504  canMakeOtherLost,
    10505  strategy,
    10506  &currRequest))
    10507  {
    10508  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10509  if(pBestRequestBlock == VMA_NULL ||
    10510  currRequestCost < bestRequestCost ||
    10512  {
    10513  pBestRequestBlock = pCurrBlock;
    10514  bestRequest = currRequest;
    10515  bestRequestCost = currRequestCost;
    10516 
    10517  if(bestRequestCost == 0 ||
    10519  {
    10520  break;
    10521  }
    10522  }
    10523  }
    10524  }
    10525  }
    10526 
    10527  if(pBestRequestBlock != VMA_NULL)
    10528  {
    10529  if(mapped)
    10530  {
    10531  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10532  if(res != VK_SUCCESS)
    10533  {
    10534  return res;
    10535  }
    10536  }
    10537 
    10538  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10539  currentFrameIndex,
    10540  m_FrameInUseCount,
    10541  &bestRequest))
    10542  {
    10543  // We no longer have an empty Allocation.
    10544  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10545  {
    10546  m_HasEmptyBlock = false;
    10547  }
    10548  // Allocate from this pBlock.
    10549  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10550  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10551  (*pAllocation)->InitBlockAllocation(
    10552  hCurrentPool,
    10553  pBestRequestBlock,
    10554  bestRequest.offset,
    10555  alignment,
    10556  size,
    10557  suballocType,
    10558  mapped,
    10559  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10560  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10561  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10562  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10563  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10564  {
    10565  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10566  }
    10567  if(IsCorruptionDetectionEnabled())
    10568  {
    10569  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10570  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10571  }
    10572  return VK_SUCCESS;
    10573  }
    10574  // else: Some allocations must have been touched while we are here. Next try.
    10575  }
    10576  else
    10577  {
    10578  // Could not find place in any of the blocks - break outer loop.
    10579  break;
    10580  }
    10581  }
    10582  /* Maximum number of tries exceeded - a very unlike event when many other
    10583  threads are simultaneously touching allocations making it impossible to make
    10584  lost at the same time as we try to allocate. */
    10585  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10586  {
    10587  return VK_ERROR_TOO_MANY_OBJECTS;
    10588  }
    10589  }
    10590 
    10591  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10592 }
    10593 
    10594 void VmaBlockVector::Free(
    10595  VmaAllocation hAllocation)
    10596 {
    10597  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10598 
    10599  // Scope for lock.
    10600  {
    10601  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10602 
    10603  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10604 
    10605  if(IsCorruptionDetectionEnabled())
    10606  {
    10607  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10608  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10609  }
    10610 
    10611  if(hAllocation->IsPersistentMap())
    10612  {
    10613  pBlock->Unmap(m_hAllocator, 1);
    10614  }
    10615 
    10616  pBlock->m_pMetadata->Free(hAllocation);
    10617  VMA_HEAVY_ASSERT(pBlock->Validate());
    10618 
    10619  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10620 
    10621  // pBlock became empty after this deallocation.
    10622  if(pBlock->m_pMetadata->IsEmpty())
    10623  {
    10624  // Already has empty Allocation. We don't want to have two, so delete this one.
    10625  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10626  {
    10627  pBlockToDelete = pBlock;
    10628  Remove(pBlock);
    10629  }
    10630  // We now have first empty block.
    10631  else
    10632  {
    10633  m_HasEmptyBlock = true;
    10634  }
    10635  }
    10636  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10637  // (This is optional, heuristics.)
    10638  else if(m_HasEmptyBlock)
    10639  {
    10640  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10641  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10642  {
    10643  pBlockToDelete = pLastBlock;
    10644  m_Blocks.pop_back();
    10645  m_HasEmptyBlock = false;
    10646  }
    10647  }
    10648 
    10649  IncrementallySortBlocks();
    10650  }
    10651 
    10652  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10653  // lock, for performance reason.
    10654  if(pBlockToDelete != VMA_NULL)
    10655  {
    10656  VMA_DEBUG_LOG(" Deleted empty allocation");
    10657  pBlockToDelete->Destroy(m_hAllocator);
    10658  vma_delete(m_hAllocator, pBlockToDelete);
    10659  }
    10660 }
    10661 
    10662 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10663 {
    10664  VkDeviceSize result = 0;
    10665  for(size_t i = m_Blocks.size(); i--; )
    10666  {
    10667  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10668  if(result >= m_PreferredBlockSize)
    10669  {
    10670  break;
    10671  }
    10672  }
    10673  return result;
    10674 }
    10675 
    10676 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10677 {
    10678  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10679  {
    10680  if(m_Blocks[blockIndex] == pBlock)
    10681  {
    10682  VmaVectorRemove(m_Blocks, blockIndex);
    10683  return;
    10684  }
    10685  }
    10686  VMA_ASSERT(0);
    10687 }
    10688 
    10689 void VmaBlockVector::IncrementallySortBlocks()
    10690 {
    10691  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10692  {
    10693  // Bubble sort only until first swap.
    10694  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10695  {
    10696  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10697  {
    10698  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10699  return;
    10700  }
    10701  }
    10702  }
    10703 }
    10704 
    10705 VkResult VmaBlockVector::AllocateFromBlock(
    10706  VmaDeviceMemoryBlock* pBlock,
    10707  VmaPool hCurrentPool,
    10708  uint32_t currentFrameIndex,
    10709  VkDeviceSize size,
    10710  VkDeviceSize alignment,
    10711  VmaAllocationCreateFlags allocFlags,
    10712  void* pUserData,
    10713  VmaSuballocationType suballocType,
    10714  uint32_t strategy,
    10715  VmaAllocation* pAllocation)
    10716 {
    10717  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10718  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10719  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10720  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10721 
    10722  VmaAllocationRequest currRequest = {};
    10723  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10724  currentFrameIndex,
    10725  m_FrameInUseCount,
    10726  m_BufferImageGranularity,
    10727  size,
    10728  alignment,
    10729  isUpperAddress,
    10730  suballocType,
    10731  false, // canMakeOtherLost
    10732  strategy,
    10733  &currRequest))
    10734  {
    10735  // Allocate from pCurrBlock.
    10736  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10737 
    10738  if(mapped)
    10739  {
    10740  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10741  if(res != VK_SUCCESS)
    10742  {
    10743  return res;
    10744  }
    10745  }
    10746 
    10747  // We no longer have an empty Allocation.
    10748  if(pBlock->m_pMetadata->IsEmpty())
    10749  {
    10750  m_HasEmptyBlock = false;
    10751  }
    10752 
    10753  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10754  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10755  (*pAllocation)->InitBlockAllocation(
    10756  hCurrentPool,
    10757  pBlock,
    10758  currRequest.offset,
    10759  alignment,
    10760  size,
    10761  suballocType,
    10762  mapped,
    10763  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10764  VMA_HEAVY_ASSERT(pBlock->Validate());
    10765  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10766  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10767  {
    10768  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10769  }
    10770  if(IsCorruptionDetectionEnabled())
    10771  {
    10772  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10773  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10774  }
    10775  return VK_SUCCESS;
    10776  }
    10777  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10778 }
    10779 
    10780 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10781 {
    10782  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10783  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10784  allocInfo.allocationSize = blockSize;
    10785  VkDeviceMemory mem = VK_NULL_HANDLE;
    10786  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10787  if(res < 0)
    10788  {
    10789  return res;
    10790  }
    10791 
    10792  // New VkDeviceMemory successfully created.
    10793 
    10794  // Create new Allocation for it.
    10795  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10796  pBlock->Init(
    10797  m_hAllocator,
    10798  m_MemoryTypeIndex,
    10799  mem,
    10800  allocInfo.allocationSize,
    10801  m_NextBlockId++,
    10802  m_Algorithm);
    10803 
    10804  m_Blocks.push_back(pBlock);
    10805  if(pNewBlockIndex != VMA_NULL)
    10806  {
    10807  *pNewBlockIndex = m_Blocks.size() - 1;
    10808  }
    10809 
    10810  return VK_SUCCESS;
    10811 }
    10812 
    10813 #if VMA_STATS_STRING_ENABLED
    10814 
    10815 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10816 {
    10817  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10818 
    10819  json.BeginObject();
    10820 
    10821  if(m_IsCustomPool)
    10822  {
    10823  json.WriteString("MemoryTypeIndex");
    10824  json.WriteNumber(m_MemoryTypeIndex);
    10825 
    10826  json.WriteString("BlockSize");
    10827  json.WriteNumber(m_PreferredBlockSize);
    10828 
    10829  json.WriteString("BlockCount");
    10830  json.BeginObject(true);
    10831  if(m_MinBlockCount > 0)
    10832  {
    10833  json.WriteString("Min");
    10834  json.WriteNumber((uint64_t)m_MinBlockCount);
    10835  }
    10836  if(m_MaxBlockCount < SIZE_MAX)
    10837  {
    10838  json.WriteString("Max");
    10839  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10840  }
    10841  json.WriteString("Cur");
    10842  json.WriteNumber((uint64_t)m_Blocks.size());
    10843  json.EndObject();
    10844 
    10845  if(m_FrameInUseCount > 0)
    10846  {
    10847  json.WriteString("FrameInUseCount");
    10848  json.WriteNumber(m_FrameInUseCount);
    10849  }
    10850 
    10851  if(m_Algorithm != 0)
    10852  {
    10853  json.WriteString("Algorithm");
    10854  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10855  }
    10856  }
    10857  else
    10858  {
    10859  json.WriteString("PreferredBlockSize");
    10860  json.WriteNumber(m_PreferredBlockSize);
    10861  }
    10862 
    10863  json.WriteString("Blocks");
    10864  json.BeginObject();
    10865  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10866  {
    10867  json.BeginString();
    10868  json.ContinueString(m_Blocks[i]->GetId());
    10869  json.EndString();
    10870 
    10871  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10872  }
    10873  json.EndObject();
    10874 
    10875  json.EndObject();
    10876 }
    10877 
    10878 #endif // #if VMA_STATS_STRING_ENABLED
    10879 
    10880 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10881  VmaAllocator hAllocator,
    10882  uint32_t currentFrameIndex)
    10883 {
    10884  if(m_pDefragmentator == VMA_NULL)
    10885  {
    10886  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10887  hAllocator,
    10888  this,
    10889  currentFrameIndex);
    10890  }
    10891 
    10892  return m_pDefragmentator;
    10893 }
    10894 
    10895 VkResult VmaBlockVector::Defragment(
    10896  VmaDefragmentationStats* pDefragmentationStats,
    10897  VkDeviceSize& maxBytesToMove,
    10898  uint32_t& maxAllocationsToMove)
    10899 {
    10900  if(m_pDefragmentator == VMA_NULL)
    10901  {
    10902  return VK_SUCCESS;
    10903  }
    10904 
    10905  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10906 
    10907  // Defragment.
    10908  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10909 
    10910  // Accumulate statistics.
    10911  if(pDefragmentationStats != VMA_NULL)
    10912  {
    10913  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10914  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10915  pDefragmentationStats->bytesMoved += bytesMoved;
    10916  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10917  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10918  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10919  maxBytesToMove -= bytesMoved;
    10920  maxAllocationsToMove -= allocationsMoved;
    10921  }
    10922 
    10923  // Free empty blocks.
    10924  m_HasEmptyBlock = false;
    10925  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10926  {
    10927  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10928  if(pBlock->m_pMetadata->IsEmpty())
    10929  {
    10930  if(m_Blocks.size() > m_MinBlockCount)
    10931  {
    10932  if(pDefragmentationStats != VMA_NULL)
    10933  {
    10934  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10935  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10936  }
    10937 
    10938  VmaVectorRemove(m_Blocks, blockIndex);
    10939  pBlock->Destroy(m_hAllocator);
    10940  vma_delete(m_hAllocator, pBlock);
    10941  }
    10942  else
    10943  {
    10944  m_HasEmptyBlock = true;
    10945  }
    10946  }
    10947  }
    10948 
    10949  return result;
    10950 }
    10951 
    10952 void VmaBlockVector::DestroyDefragmentator()
    10953 {
    10954  if(m_pDefragmentator != VMA_NULL)
    10955  {
    10956  vma_delete(m_hAllocator, m_pDefragmentator);
    10957  m_pDefragmentator = VMA_NULL;
    10958  }
    10959 }
    10960 
    10961 void VmaBlockVector::MakePoolAllocationsLost(
    10962  uint32_t currentFrameIndex,
    10963  size_t* pLostAllocationCount)
    10964 {
    10965  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10966  size_t lostAllocationCount = 0;
    10967  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10968  {
    10969  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10970  VMA_ASSERT(pBlock);
    10971  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10972  }
    10973  if(pLostAllocationCount != VMA_NULL)
    10974  {
    10975  *pLostAllocationCount = lostAllocationCount;
    10976  }
    10977 }
    10978 
    10979 VkResult VmaBlockVector::CheckCorruption()
    10980 {
    10981  if(!IsCorruptionDetectionEnabled())
    10982  {
    10983  return VK_ERROR_FEATURE_NOT_PRESENT;
    10984  }
    10985 
    10986  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10987  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10988  {
    10989  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10990  VMA_ASSERT(pBlock);
    10991  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10992  if(res != VK_SUCCESS)
    10993  {
    10994  return res;
    10995  }
    10996  }
    10997  return VK_SUCCESS;
    10998 }
    10999 
    11000 void VmaBlockVector::AddStats(VmaStats* pStats)
    11001 {
    11002  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11003  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11004 
    11005  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11006 
    11007  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11008  {
    11009  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11010  VMA_ASSERT(pBlock);
    11011  VMA_HEAVY_ASSERT(pBlock->Validate());
    11012  VmaStatInfo allocationStatInfo;
    11013  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11014  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11015  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11016  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11017  }
    11018 }
    11019 
    11021 // VmaDefragmentator members definition
    11022 
    11023 VmaDefragmentator::VmaDefragmentator(
    11024  VmaAllocator hAllocator,
    11025  VmaBlockVector* pBlockVector,
    11026  uint32_t currentFrameIndex) :
    11027  m_hAllocator(hAllocator),
    11028  m_pBlockVector(pBlockVector),
    11029  m_CurrentFrameIndex(currentFrameIndex),
    11030  m_BytesMoved(0),
    11031  m_AllocationsMoved(0),
    11032  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11033  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11034 {
    11035  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11036 }
    11037 
    11038 VmaDefragmentator::~VmaDefragmentator()
    11039 {
    11040  for(size_t i = m_Blocks.size(); i--; )
    11041  {
    11042  vma_delete(m_hAllocator, m_Blocks[i]);
    11043  }
    11044 }
    11045 
    11046 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11047 {
    11048  AllocationInfo allocInfo;
    11049  allocInfo.m_hAllocation = hAlloc;
    11050  allocInfo.m_pChanged = pChanged;
    11051  m_Allocations.push_back(allocInfo);
    11052 }
    11053 
    11054 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11055 {
    11056  // It has already been mapped for defragmentation.
    11057  if(m_pMappedDataForDefragmentation)
    11058  {
    11059  *ppMappedData = m_pMappedDataForDefragmentation;
    11060  return VK_SUCCESS;
    11061  }
    11062 
    11063  // It is originally mapped.
    11064  if(m_pBlock->GetMappedData())
    11065  {
    11066  *ppMappedData = m_pBlock->GetMappedData();
    11067  return VK_SUCCESS;
    11068  }
    11069 
    11070  // Map on first usage.
    11071  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11072  *ppMappedData = m_pMappedDataForDefragmentation;
    11073  return res;
    11074 }
    11075 
    11076 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11077 {
    11078  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11079  {
    11080  m_pBlock->Unmap(hAllocator, 1);
    11081  }
    11082 }
    11083 
    11084 VkResult VmaDefragmentator::DefragmentRound(
    11085  VkDeviceSize maxBytesToMove,
    11086  uint32_t maxAllocationsToMove)
    11087 {
    11088  if(m_Blocks.empty())
    11089  {
    11090  return VK_SUCCESS;
    11091  }
    11092 
    11093  size_t srcBlockIndex = m_Blocks.size() - 1;
    11094  size_t srcAllocIndex = SIZE_MAX;
    11095  for(;;)
    11096  {
    11097  // 1. Find next allocation to move.
    11098  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11099  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11100  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11101  {
    11102  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11103  {
    11104  // Finished: no more allocations to process.
    11105  if(srcBlockIndex == 0)
    11106  {
    11107  return VK_SUCCESS;
    11108  }
    11109  else
    11110  {
    11111  --srcBlockIndex;
    11112  srcAllocIndex = SIZE_MAX;
    11113  }
    11114  }
    11115  else
    11116  {
    11117  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11118  }
    11119  }
    11120 
    11121  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11122  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11123 
    11124  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11125  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11126  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11127  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11128 
    11129  // 2. Try to find new place for this allocation in preceding or current block.
    11130  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11131  {
    11132  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11133  VmaAllocationRequest dstAllocRequest;
    11134  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11135  m_CurrentFrameIndex,
    11136  m_pBlockVector->GetFrameInUseCount(),
    11137  m_pBlockVector->GetBufferImageGranularity(),
    11138  size,
    11139  alignment,
    11140  false, // upperAddress
    11141  suballocType,
    11142  false, // canMakeOtherLost
    11144  &dstAllocRequest) &&
    11145  MoveMakesSense(
    11146  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11147  {
    11148  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11149 
    11150  // Reached limit on number of allocations or bytes to move.
    11151  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11152  (m_BytesMoved + size > maxBytesToMove))
    11153  {
    11154  return VK_INCOMPLETE;
    11155  }
    11156 
    11157  void* pDstMappedData = VMA_NULL;
    11158  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11159  if(res != VK_SUCCESS)
    11160  {
    11161  return res;
    11162  }
    11163 
    11164  void* pSrcMappedData = VMA_NULL;
    11165  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11166  if(res != VK_SUCCESS)
    11167  {
    11168  return res;
    11169  }
    11170 
    11171  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11172  memcpy(
    11173  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11174  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11175  static_cast<size_t>(size));
    11176 
    11177  if(VMA_DEBUG_MARGIN > 0)
    11178  {
    11179  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11180  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11181  }
    11182 
    11183  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11184  dstAllocRequest,
    11185  suballocType,
    11186  size,
    11187  false, // upperAddress
    11188  allocInfo.m_hAllocation);
    11189  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11190 
    11191  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11192 
    11193  if(allocInfo.m_pChanged != VMA_NULL)
    11194  {
    11195  *allocInfo.m_pChanged = VK_TRUE;
    11196  }
    11197 
    11198  ++m_AllocationsMoved;
    11199  m_BytesMoved += size;
    11200 
    11201  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11202 
    11203  break;
    11204  }
    11205  }
    11206 
    11207  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11208 
    11209  if(srcAllocIndex > 0)
    11210  {
    11211  --srcAllocIndex;
    11212  }
    11213  else
    11214  {
    11215  if(srcBlockIndex > 0)
    11216  {
    11217  --srcBlockIndex;
    11218  srcAllocIndex = SIZE_MAX;
    11219  }
    11220  else
    11221  {
    11222  return VK_SUCCESS;
    11223  }
    11224  }
    11225  }
    11226 }
    11227 
    11228 VkResult VmaDefragmentator::Defragment(
    11229  VkDeviceSize maxBytesToMove,
    11230  uint32_t maxAllocationsToMove)
    11231 {
    11232  if(m_Allocations.empty())
    11233  {
    11234  return VK_SUCCESS;
    11235  }
    11236 
    11237  // Create block info for each block.
    11238  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11239  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11240  {
    11241  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11242  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11243  m_Blocks.push_back(pBlockInfo);
    11244  }
    11245 
    11246  // Sort them by m_pBlock pointer value.
    11247  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11248 
    11249  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11250  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11251  {
    11252  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11253  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11254  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11255  {
    11256  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11257  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11258  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11259  {
    11260  (*it)->m_Allocations.push_back(allocInfo);
    11261  }
    11262  else
    11263  {
    11264  VMA_ASSERT(0);
    11265  }
    11266  }
    11267  }
    11268  m_Allocations.clear();
    11269 
    11270  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11271  {
    11272  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11273  pBlockInfo->CalcHasNonMovableAllocations();
    11274  pBlockInfo->SortAllocationsBySizeDescecnding();
    11275  }
    11276 
    11277  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11278  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11279 
    11280  // Execute defragmentation rounds (the main part).
    11281  VkResult result = VK_SUCCESS;
    11282  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11283  {
    11284  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11285  }
    11286 
    11287  // Unmap blocks that were mapped for defragmentation.
    11288  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11289  {
    11290  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11291  }
    11292 
    11293  return result;
    11294 }
    11295 
    11296 bool VmaDefragmentator::MoveMakesSense(
    11297  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11298  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11299 {
    11300  if(dstBlockIndex < srcBlockIndex)
    11301  {
    11302  return true;
    11303  }
    11304  if(dstBlockIndex > srcBlockIndex)
    11305  {
    11306  return false;
    11307  }
    11308  if(dstOffset < srcOffset)
    11309  {
    11310  return true;
    11311  }
    11312  return false;
    11313 }
    11314 
    11316 // VmaRecorder
    11317 
    11318 #if VMA_RECORDING_ENABLED
    11319 
    11320 VmaRecorder::VmaRecorder() :
    11321  m_UseMutex(true),
    11322  m_Flags(0),
    11323  m_File(VMA_NULL),
    11324  m_Freq(INT64_MAX),
    11325  m_StartCounter(INT64_MAX)
    11326 {
    11327 }
    11328 
    11329 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11330 {
    11331  m_UseMutex = useMutex;
    11332  m_Flags = settings.flags;
    11333 
    11334  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11335  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11336 
    11337  // Open file for writing.
    11338  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11339  if(err != 0)
    11340  {
    11341  return VK_ERROR_INITIALIZATION_FAILED;
    11342  }
    11343 
    11344  // Write header.
    11345  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11346  fprintf(m_File, "%s\n", "1,3");
    11347 
    11348  return VK_SUCCESS;
    11349 }
    11350 
    11351 VmaRecorder::~VmaRecorder()
    11352 {
    11353  if(m_File != VMA_NULL)
    11354  {
    11355  fclose(m_File);
    11356  }
    11357 }
    11358 
    11359 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11360 {
    11361  CallParams callParams;
    11362  GetBasicParams(callParams);
    11363 
    11364  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11365  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11366  Flush();
    11367 }
    11368 
    11369 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11370 {
    11371  CallParams callParams;
    11372  GetBasicParams(callParams);
    11373 
    11374  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11375  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11376  Flush();
    11377 }
    11378 
    11379 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11380 {
    11381  CallParams callParams;
    11382  GetBasicParams(callParams);
    11383 
    11384  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11385  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11386  createInfo.memoryTypeIndex,
    11387  createInfo.flags,
    11388  createInfo.blockSize,
    11389  (uint64_t)createInfo.minBlockCount,
    11390  (uint64_t)createInfo.maxBlockCount,
    11391  createInfo.frameInUseCount,
    11392  pool);
    11393  Flush();
    11394 }
    11395 
    11396 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11397 {
    11398  CallParams callParams;
    11399  GetBasicParams(callParams);
    11400 
    11401  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11402  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11403  pool);
    11404  Flush();
    11405 }
    11406 
    11407 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11408  const VkMemoryRequirements& vkMemReq,
    11409  const VmaAllocationCreateInfo& createInfo,
    11410  VmaAllocation allocation)
    11411 {
    11412  CallParams callParams;
    11413  GetBasicParams(callParams);
    11414 
    11415  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11416  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11417  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11418  vkMemReq.size,
    11419  vkMemReq.alignment,
    11420  vkMemReq.memoryTypeBits,
    11421  createInfo.flags,
    11422  createInfo.usage,
    11423  createInfo.requiredFlags,
    11424  createInfo.preferredFlags,
    11425  createInfo.memoryTypeBits,
    11426  createInfo.pool,
    11427  allocation,
    11428  userDataStr.GetString());
    11429  Flush();
    11430 }
    11431 
    11432 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11433  const VkMemoryRequirements& vkMemReq,
    11434  bool requiresDedicatedAllocation,
    11435  bool prefersDedicatedAllocation,
    11436  const VmaAllocationCreateInfo& createInfo,
    11437  VmaAllocation allocation)
    11438 {
    11439  CallParams callParams;
    11440  GetBasicParams(callParams);
    11441 
    11442  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11443  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11444  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11445  vkMemReq.size,
    11446  vkMemReq.alignment,
    11447  vkMemReq.memoryTypeBits,
    11448  requiresDedicatedAllocation ? 1 : 0,
    11449  prefersDedicatedAllocation ? 1 : 0,
    11450  createInfo.flags,
    11451  createInfo.usage,
    11452  createInfo.requiredFlags,
    11453  createInfo.preferredFlags,
    11454  createInfo.memoryTypeBits,
    11455  createInfo.pool,
    11456  allocation,
    11457  userDataStr.GetString());
    11458  Flush();
    11459 }
    11460 
    11461 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11462  const VkMemoryRequirements& vkMemReq,
    11463  bool requiresDedicatedAllocation,
    11464  bool prefersDedicatedAllocation,
    11465  const VmaAllocationCreateInfo& createInfo,
    11466  VmaAllocation allocation)
    11467 {
    11468  CallParams callParams;
    11469  GetBasicParams(callParams);
    11470 
    11471  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11472  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11473  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11474  vkMemReq.size,
    11475  vkMemReq.alignment,
    11476  vkMemReq.memoryTypeBits,
    11477  requiresDedicatedAllocation ? 1 : 0,
    11478  prefersDedicatedAllocation ? 1 : 0,
    11479  createInfo.flags,
    11480  createInfo.usage,
    11481  createInfo.requiredFlags,
    11482  createInfo.preferredFlags,
    11483  createInfo.memoryTypeBits,
    11484  createInfo.pool,
    11485  allocation,
    11486  userDataStr.GetString());
    11487  Flush();
    11488 }
    11489 
    11490 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11491  VmaAllocation allocation)
    11492 {
    11493  CallParams callParams;
    11494  GetBasicParams(callParams);
    11495 
    11496  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11497  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11498  allocation);
    11499  Flush();
    11500 }
    11501 
    11502 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11503  VmaAllocation allocation,
    11504  const void* pUserData)
    11505 {
    11506  CallParams callParams;
    11507  GetBasicParams(callParams);
    11508 
    11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11510  UserDataString userDataStr(
    11511  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11512  pUserData);
    11513  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11514  allocation,
    11515  userDataStr.GetString());
    11516  Flush();
    11517 }
    11518 
    11519 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11520  VmaAllocation allocation)
    11521 {
    11522  CallParams callParams;
    11523  GetBasicParams(callParams);
    11524 
    11525  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11526  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11527  allocation);
    11528  Flush();
    11529 }
    11530 
    11531 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11532  VmaAllocation allocation)
    11533 {
    11534  CallParams callParams;
    11535  GetBasicParams(callParams);
    11536 
    11537  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11538  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11539  allocation);
    11540  Flush();
    11541 }
    11542 
    11543 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11544  VmaAllocation allocation)
    11545 {
    11546  CallParams callParams;
    11547  GetBasicParams(callParams);
    11548 
    11549  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11550  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11551  allocation);
    11552  Flush();
    11553 }
    11554 
    11555 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11556  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11557 {
    11558  CallParams callParams;
    11559  GetBasicParams(callParams);
    11560 
    11561  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11562  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11563  allocation,
    11564  offset,
    11565  size);
    11566  Flush();
    11567 }
    11568 
    11569 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11570  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11571 {
    11572  CallParams callParams;
    11573  GetBasicParams(callParams);
    11574 
    11575  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11576  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11577  allocation,
    11578  offset,
    11579  size);
    11580  Flush();
    11581 }
    11582 
    11583 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11584  const VkBufferCreateInfo& bufCreateInfo,
    11585  const VmaAllocationCreateInfo& allocCreateInfo,
    11586  VmaAllocation allocation)
    11587 {
    11588  CallParams callParams;
    11589  GetBasicParams(callParams);
    11590 
    11591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11592  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11593  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11594  bufCreateInfo.flags,
    11595  bufCreateInfo.size,
    11596  bufCreateInfo.usage,
    11597  bufCreateInfo.sharingMode,
    11598  allocCreateInfo.flags,
    11599  allocCreateInfo.usage,
    11600  allocCreateInfo.requiredFlags,
    11601  allocCreateInfo.preferredFlags,
    11602  allocCreateInfo.memoryTypeBits,
    11603  allocCreateInfo.pool,
    11604  allocation,
    11605  userDataStr.GetString());
    11606  Flush();
    11607 }
    11608 
    11609 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11610  const VkImageCreateInfo& imageCreateInfo,
    11611  const VmaAllocationCreateInfo& allocCreateInfo,
    11612  VmaAllocation allocation)
    11613 {
    11614  CallParams callParams;
    11615  GetBasicParams(callParams);
    11616 
    11617  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11618  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11619  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11620  imageCreateInfo.flags,
    11621  imageCreateInfo.imageType,
    11622  imageCreateInfo.format,
    11623  imageCreateInfo.extent.width,
    11624  imageCreateInfo.extent.height,
    11625  imageCreateInfo.extent.depth,
    11626  imageCreateInfo.mipLevels,
    11627  imageCreateInfo.arrayLayers,
    11628  imageCreateInfo.samples,
    11629  imageCreateInfo.tiling,
    11630  imageCreateInfo.usage,
    11631  imageCreateInfo.sharingMode,
    11632  imageCreateInfo.initialLayout,
    11633  allocCreateInfo.flags,
    11634  allocCreateInfo.usage,
    11635  allocCreateInfo.requiredFlags,
    11636  allocCreateInfo.preferredFlags,
    11637  allocCreateInfo.memoryTypeBits,
    11638  allocCreateInfo.pool,
    11639  allocation,
    11640  userDataStr.GetString());
    11641  Flush();
    11642 }
    11643 
    11644 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11645  VmaAllocation allocation)
    11646 {
    11647  CallParams callParams;
    11648  GetBasicParams(callParams);
    11649 
    11650  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11651  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11652  allocation);
    11653  Flush();
    11654 }
    11655 
    11656 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11657  VmaAllocation allocation)
    11658 {
    11659  CallParams callParams;
    11660  GetBasicParams(callParams);
    11661 
    11662  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11663  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11664  allocation);
    11665  Flush();
    11666 }
    11667 
    11668 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11669  VmaAllocation allocation)
    11670 {
    11671  CallParams callParams;
    11672  GetBasicParams(callParams);
    11673 
    11674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11675  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11676  allocation);
    11677  Flush();
    11678 }
    11679 
    11680 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11681  VmaAllocation allocation)
    11682 {
    11683  CallParams callParams;
    11684  GetBasicParams(callParams);
    11685 
    11686  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11687  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11688  allocation);
    11689  Flush();
    11690 }
    11691 
    11692 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11693  VmaPool pool)
    11694 {
    11695  CallParams callParams;
    11696  GetBasicParams(callParams);
    11697 
    11698  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11699  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11700  pool);
    11701  Flush();
    11702 }
    11703 
    11704 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11705 {
    11706  if(pUserData != VMA_NULL)
    11707  {
    11708  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11709  {
    11710  m_Str = (const char*)pUserData;
    11711  }
    11712  else
    11713  {
    11714  sprintf_s(m_PtrStr, "%p", pUserData);
    11715  m_Str = m_PtrStr;
    11716  }
    11717  }
    11718  else
    11719  {
    11720  m_Str = "";
    11721  }
    11722 }
    11723 
    11724 void VmaRecorder::WriteConfiguration(
    11725  const VkPhysicalDeviceProperties& devProps,
    11726  const VkPhysicalDeviceMemoryProperties& memProps,
    11727  bool dedicatedAllocationExtensionEnabled)
    11728 {
    11729  fprintf(m_File, "Config,Begin\n");
    11730 
    11731  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11732  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11733  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11734  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11735  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11736  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11737 
    11738  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11739  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11740  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11741 
    11742  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11743  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11744  {
    11745  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11746  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11747  }
    11748  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11749  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11750  {
    11751  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11752  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11753  }
    11754 
    11755  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11756 
    11757  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11758  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11759  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11760  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11761  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11762  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11763  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11764  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11765  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11766 
    11767  fprintf(m_File, "Config,End\n");
    11768 }
    11769 
    11770 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11771 {
    11772  outParams.threadId = GetCurrentThreadId();
    11773 
    11774  LARGE_INTEGER counter;
    11775  QueryPerformanceCounter(&counter);
    11776  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11777 }
    11778 
    11779 void VmaRecorder::Flush()
    11780 {
    11781  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11782  {
    11783  fflush(m_File);
    11784  }
    11785 }
    11786 
    11787 #endif // #if VMA_RECORDING_ENABLED
    11788 
    11790 // VmaAllocator_T
    11791 
    11792 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11793  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11794  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11795  m_hDevice(pCreateInfo->device),
    11796  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11797  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11798  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11799  m_PreferredLargeHeapBlockSize(0),
    11800  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11801  m_CurrentFrameIndex(0),
    11802  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11803  m_NextPoolId(0)
    11805  ,m_pRecorder(VMA_NULL)
    11806 #endif
    11807 {
    11808  if(VMA_DEBUG_DETECT_CORRUPTION)
    11809  {
    11810  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11811  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11812  }
    11813 
    11814  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11815 
    11816 #if !(VMA_DEDICATED_ALLOCATION)
    11818  {
    11819  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11820  }
    11821 #endif
    11822 
    11823  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11824  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11825  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11826 
    11827  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11828  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11829 
    11830  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11831  {
    11832  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11833  }
    11834 
    11835  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11836  {
    11837  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11838  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11839  }
    11840 
    11841  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11842 
    11843  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11844  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11845 
    11846  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11847  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11848  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11849  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11850 
    11851  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11852  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11853 
    11854  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11855  {
    11856  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11857  {
    11858  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11859  if(limit != VK_WHOLE_SIZE)
    11860  {
    11861  m_HeapSizeLimit[heapIndex] = limit;
    11862  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11863  {
    11864  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11865  }
    11866  }
    11867  }
    11868  }
    11869 
    11870  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11871  {
    11872  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11873 
    11874  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11875  this,
    11876  memTypeIndex,
    11877  preferredBlockSize,
    11878  0,
    11879  SIZE_MAX,
    11880  GetBufferImageGranularity(),
    11881  pCreateInfo->frameInUseCount,
    11882  false, // isCustomPool
    11883  false, // explicitBlockSize
    11884  false); // linearAlgorithm
    11885  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11886  // becase minBlockCount is 0.
    11887  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11888 
    11889  }
    11890 }
    11891 
    11892 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11893 {
    11894  VkResult res = VK_SUCCESS;
    11895 
    11896  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11897  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11898  {
    11899 #if VMA_RECORDING_ENABLED
    11900  m_pRecorder = vma_new(this, VmaRecorder)();
    11901  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11902  if(res != VK_SUCCESS)
    11903  {
    11904  return res;
    11905  }
    11906  m_pRecorder->WriteConfiguration(
    11907  m_PhysicalDeviceProperties,
    11908  m_MemProps,
    11909  m_UseKhrDedicatedAllocation);
    11910  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11911 #else
    11912  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11913  return VK_ERROR_FEATURE_NOT_PRESENT;
    11914 #endif
    11915  }
    11916 
    11917  return res;
    11918 }
    11919 
    11920 VmaAllocator_T::~VmaAllocator_T()
    11921 {
    11922 #if VMA_RECORDING_ENABLED
    11923  if(m_pRecorder != VMA_NULL)
    11924  {
    11925  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11926  vma_delete(this, m_pRecorder);
    11927  }
    11928 #endif
    11929 
    11930  VMA_ASSERT(m_Pools.empty());
    11931 
    11932  for(size_t i = GetMemoryTypeCount(); i--; )
    11933  {
    11934  vma_delete(this, m_pDedicatedAllocations[i]);
    11935  vma_delete(this, m_pBlockVectors[i]);
    11936  }
    11937 }
    11938 
    11939 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11940 {
    11941 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11942  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11943  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11944  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11945  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11946  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11947  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11948  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11949  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11950  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11951  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11952  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11953  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11954  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11955  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11956  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11957  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11958 #if VMA_DEDICATED_ALLOCATION
    11959  if(m_UseKhrDedicatedAllocation)
    11960  {
    11961  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11962  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11963  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11964  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11965  }
    11966 #endif // #if VMA_DEDICATED_ALLOCATION
    11967 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11968 
    11969 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11970  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11971 
    11972  if(pVulkanFunctions != VMA_NULL)
    11973  {
    11974  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11975  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11976  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11977  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11978  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11979  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11980  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11981  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11982  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11983  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11984  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11985  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11986  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11987  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11988  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11989  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11990 #if VMA_DEDICATED_ALLOCATION
    11991  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11992  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11993 #endif
    11994  }
    11995 
    11996 #undef VMA_COPY_IF_NOT_NULL
    11997 
    11998  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    11999  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12000  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12001  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12002  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12003  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12004  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12012  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12013  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12016 #if VMA_DEDICATED_ALLOCATION
    12017  if(m_UseKhrDedicatedAllocation)
    12018  {
    12019  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12020  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12021  }
    12022 #endif
    12023 }
    12024 
    12025 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12026 {
    12027  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12028  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12029  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12030  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12031 }
    12032 
    12033 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12034  VkDeviceSize size,
    12035  VkDeviceSize alignment,
    12036  bool dedicatedAllocation,
    12037  VkBuffer dedicatedBuffer,
    12038  VkImage dedicatedImage,
    12039  const VmaAllocationCreateInfo& createInfo,
    12040  uint32_t memTypeIndex,
    12041  VmaSuballocationType suballocType,
    12042  VmaAllocation* pAllocation)
    12043 {
    12044  VMA_ASSERT(pAllocation != VMA_NULL);
    12045  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12046 
    12047  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12048 
    12049  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12050  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12051  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12052  {
    12053  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12054  }
    12055 
    12056  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12057  VMA_ASSERT(blockVector);
    12058 
    12059  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12060  bool preferDedicatedMemory =
    12061  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12062  dedicatedAllocation ||
    12063  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12064  size > preferredBlockSize / 2;
    12065 
    12066  if(preferDedicatedMemory &&
    12067  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12068  finalCreateInfo.pool == VK_NULL_HANDLE)
    12069  {
    12071  }
    12072 
    12073  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12074  {
    12075  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12076  {
    12077  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12078  }
    12079  else
    12080  {
    12081  return AllocateDedicatedMemory(
    12082  size,
    12083  suballocType,
    12084  memTypeIndex,
    12085  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12086  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12087  finalCreateInfo.pUserData,
    12088  dedicatedBuffer,
    12089  dedicatedImage,
    12090  pAllocation);
    12091  }
    12092  }
    12093  else
    12094  {
    12095  VkResult res = blockVector->Allocate(
    12096  VK_NULL_HANDLE, // hCurrentPool
    12097  m_CurrentFrameIndex.load(),
    12098  size,
    12099  alignment,
    12100  finalCreateInfo,
    12101  suballocType,
    12102  pAllocation);
    12103  if(res == VK_SUCCESS)
    12104  {
    12105  return res;
    12106  }
    12107 
    12108  // 5. Try dedicated memory.
    12109  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12110  {
    12111  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12112  }
    12113  else
    12114  {
    12115  res = AllocateDedicatedMemory(
    12116  size,
    12117  suballocType,
    12118  memTypeIndex,
    12119  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12120  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12121  finalCreateInfo.pUserData,
    12122  dedicatedBuffer,
    12123  dedicatedImage,
    12124  pAllocation);
    12125  if(res == VK_SUCCESS)
    12126  {
    12127  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12128  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12129  return VK_SUCCESS;
    12130  }
    12131  else
    12132  {
    12133  // Everything failed: Return error code.
    12134  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12135  return res;
    12136  }
    12137  }
    12138  }
    12139 }
    12140 
    12141 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12142  VkDeviceSize size,
    12143  VmaSuballocationType suballocType,
    12144  uint32_t memTypeIndex,
    12145  bool map,
    12146  bool isUserDataString,
    12147  void* pUserData,
    12148  VkBuffer dedicatedBuffer,
    12149  VkImage dedicatedImage,
    12150  VmaAllocation* pAllocation)
    12151 {
    12152  VMA_ASSERT(pAllocation);
    12153 
    12154  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12155  allocInfo.memoryTypeIndex = memTypeIndex;
    12156  allocInfo.allocationSize = size;
    12157 
    12158 #if VMA_DEDICATED_ALLOCATION
    12159  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12160  if(m_UseKhrDedicatedAllocation)
    12161  {
    12162  if(dedicatedBuffer != VK_NULL_HANDLE)
    12163  {
    12164  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12165  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12166  allocInfo.pNext = &dedicatedAllocInfo;
    12167  }
    12168  else if(dedicatedImage != VK_NULL_HANDLE)
    12169  {
    12170  dedicatedAllocInfo.image = dedicatedImage;
    12171  allocInfo.pNext = &dedicatedAllocInfo;
    12172  }
    12173  }
    12174 #endif // #if VMA_DEDICATED_ALLOCATION
    12175 
    12176  // Allocate VkDeviceMemory.
    12177  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12178  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12179  if(res < 0)
    12180  {
    12181  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12182  return res;
    12183  }
    12184 
    12185  void* pMappedData = VMA_NULL;
    12186  if(map)
    12187  {
    12188  res = (*m_VulkanFunctions.vkMapMemory)(
    12189  m_hDevice,
    12190  hMemory,
    12191  0,
    12192  VK_WHOLE_SIZE,
    12193  0,
    12194  &pMappedData);
    12195  if(res < 0)
    12196  {
    12197  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12198  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12199  return res;
    12200  }
    12201  }
    12202 
    12203  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12204  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12205  (*pAllocation)->SetUserData(this, pUserData);
    12206  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12207  {
    12208  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12209  }
    12210 
    12211  // Register it in m_pDedicatedAllocations.
    12212  {
    12213  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12214  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12215  VMA_ASSERT(pDedicatedAllocations);
    12216  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12217  }
    12218 
    12219  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12220 
    12221  return VK_SUCCESS;
    12222 }
    12223 
    12224 void VmaAllocator_T::GetBufferMemoryRequirements(
    12225  VkBuffer hBuffer,
    12226  VkMemoryRequirements& memReq,
    12227  bool& requiresDedicatedAllocation,
    12228  bool& prefersDedicatedAllocation) const
    12229 {
    12230 #if VMA_DEDICATED_ALLOCATION
    12231  if(m_UseKhrDedicatedAllocation)
    12232  {
    12233  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12234  memReqInfo.buffer = hBuffer;
    12235 
    12236  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12237 
    12238  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12239  memReq2.pNext = &memDedicatedReq;
    12240 
    12241  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12242 
    12243  memReq = memReq2.memoryRequirements;
    12244  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12245  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12246  }
    12247  else
    12248 #endif // #if VMA_DEDICATED_ALLOCATION
    12249  {
    12250  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12251  requiresDedicatedAllocation = false;
    12252  prefersDedicatedAllocation = false;
    12253  }
    12254 }
    12255 
    12256 void VmaAllocator_T::GetImageMemoryRequirements(
    12257  VkImage hImage,
    12258  VkMemoryRequirements& memReq,
    12259  bool& requiresDedicatedAllocation,
    12260  bool& prefersDedicatedAllocation) const
    12261 {
    12262 #if VMA_DEDICATED_ALLOCATION
    12263  if(m_UseKhrDedicatedAllocation)
    12264  {
    12265  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12266  memReqInfo.image = hImage;
    12267 
    12268  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12269 
    12270  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12271  memReq2.pNext = &memDedicatedReq;
    12272 
    12273  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12274 
    12275  memReq = memReq2.memoryRequirements;
    12276  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12277  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12278  }
    12279  else
    12280 #endif // #if VMA_DEDICATED_ALLOCATION
    12281  {
    12282  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12283  requiresDedicatedAllocation = false;
    12284  prefersDedicatedAllocation = false;
    12285  }
    12286 }
    12287 
    12288 VkResult VmaAllocator_T::AllocateMemory(
    12289  const VkMemoryRequirements& vkMemReq,
    12290  bool requiresDedicatedAllocation,
    12291  bool prefersDedicatedAllocation,
    12292  VkBuffer dedicatedBuffer,
    12293  VkImage dedicatedImage,
    12294  const VmaAllocationCreateInfo& createInfo,
    12295  VmaSuballocationType suballocType,
    12296  VmaAllocation* pAllocation)
    12297 {
    12298  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12299 
    12300  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12301  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12302  {
    12303  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12304  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12305  }
    12306  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12308  {
    12309  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12310  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12311  }
    12312  if(requiresDedicatedAllocation)
    12313  {
    12314  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12315  {
    12316  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12317  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12318  }
    12319  if(createInfo.pool != VK_NULL_HANDLE)
    12320  {
    12321  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12322  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12323  }
    12324  }
    12325  if((createInfo.pool != VK_NULL_HANDLE) &&
    12326  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12327  {
    12328  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12329  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12330  }
    12331 
    12332  if(createInfo.pool != VK_NULL_HANDLE)
    12333  {
    12334  const VkDeviceSize alignmentForPool = VMA_MAX(
    12335  vkMemReq.alignment,
    12336  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12337  return createInfo.pool->m_BlockVector.Allocate(
    12338  createInfo.pool,
    12339  m_CurrentFrameIndex.load(),
    12340  vkMemReq.size,
    12341  alignmentForPool,
    12342  createInfo,
    12343  suballocType,
    12344  pAllocation);
    12345  }
    12346  else
    12347  {
    12348  // Bit mask of memory Vulkan types acceptable for this allocation.
    12349  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12350  uint32_t memTypeIndex = UINT32_MAX;
    12351  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12352  if(res == VK_SUCCESS)
    12353  {
    12354  VkDeviceSize alignmentForMemType = VMA_MAX(
    12355  vkMemReq.alignment,
    12356  GetMemoryTypeMinAlignment(memTypeIndex));
    12357 
    12358  res = AllocateMemoryOfType(
    12359  vkMemReq.size,
    12360  alignmentForMemType,
    12361  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12362  dedicatedBuffer,
    12363  dedicatedImage,
    12364  createInfo,
    12365  memTypeIndex,
    12366  suballocType,
    12367  pAllocation);
    12368  // Succeeded on first try.
    12369  if(res == VK_SUCCESS)
    12370  {
    12371  return res;
    12372  }
    12373  // Allocation from this memory type failed. Try other compatible memory types.
    12374  else
    12375  {
    12376  for(;;)
    12377  {
    12378  // Remove old memTypeIndex from list of possibilities.
    12379  memoryTypeBits &= ~(1u << memTypeIndex);
    12380  // Find alternative memTypeIndex.
    12381  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12382  if(res == VK_SUCCESS)
    12383  {
    12384  alignmentForMemType = VMA_MAX(
    12385  vkMemReq.alignment,
    12386  GetMemoryTypeMinAlignment(memTypeIndex));
    12387 
    12388  res = AllocateMemoryOfType(
    12389  vkMemReq.size,
    12390  alignmentForMemType,
    12391  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12392  dedicatedBuffer,
    12393  dedicatedImage,
    12394  createInfo,
    12395  memTypeIndex,
    12396  suballocType,
    12397  pAllocation);
    12398  // Allocation from this alternative memory type succeeded.
    12399  if(res == VK_SUCCESS)
    12400  {
    12401  return res;
    12402  }
    12403  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12404  }
    12405  // No other matching memory type index could be found.
    12406  else
    12407  {
    12408  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12409  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12410  }
    12411  }
    12412  }
    12413  }
    12414  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12415  else
    12416  return res;
    12417  }
    12418 }
    12419 
    12420 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12421 {
    12422  VMA_ASSERT(allocation);
    12423 
    12424  if(TouchAllocation(allocation))
    12425  {
    12426  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12427  {
    12428  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12429  }
    12430 
    12431  switch(allocation->GetType())
    12432  {
    12433  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12434  {
    12435  VmaBlockVector* pBlockVector = VMA_NULL;
    12436  VmaPool hPool = allocation->GetPool();
    12437  if(hPool != VK_NULL_HANDLE)
    12438  {
    12439  pBlockVector = &hPool->m_BlockVector;
    12440  }
    12441  else
    12442  {
    12443  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12444  pBlockVector = m_pBlockVectors[memTypeIndex];
    12445  }
    12446  pBlockVector->Free(allocation);
    12447  }
    12448  break;
    12449  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12450  FreeDedicatedMemory(allocation);
    12451  break;
    12452  default:
    12453  VMA_ASSERT(0);
    12454  }
    12455  }
    12456 
    12457  allocation->SetUserData(this, VMA_NULL);
    12458  vma_delete(this, allocation);
    12459 }
    12460 
    12461 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12462 {
    12463  // Initialize.
    12464  InitStatInfo(pStats->total);
    12465  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12466  InitStatInfo(pStats->memoryType[i]);
    12467  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12468  InitStatInfo(pStats->memoryHeap[i]);
    12469 
    12470  // Process default pools.
    12471  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12472  {
    12473  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12474  VMA_ASSERT(pBlockVector);
    12475  pBlockVector->AddStats(pStats);
    12476  }
    12477 
    12478  // Process custom pools.
    12479  {
    12480  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12481  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12482  {
    12483  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12484  }
    12485  }
    12486 
    12487  // Process dedicated allocations.
    12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12489  {
    12490  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12491  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12492  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12493  VMA_ASSERT(pDedicatedAllocVector);
    12494  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12495  {
    12496  VmaStatInfo allocationStatInfo;
    12497  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12498  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12499  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12500  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12501  }
    12502  }
    12503 
    12504  // Postprocess.
    12505  VmaPostprocessCalcStatInfo(pStats->total);
    12506  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12507  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12508  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12509  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12510 }
    12511 
    12512 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12513 
    12514 VkResult VmaAllocator_T::Defragment(
    12515  VmaAllocation* pAllocations,
    12516  size_t allocationCount,
    12517  VkBool32* pAllocationsChanged,
    12518  const VmaDefragmentationInfo* pDefragmentationInfo,
    12519  VmaDefragmentationStats* pDefragmentationStats)
    12520 {
    12521  if(pAllocationsChanged != VMA_NULL)
    12522  {
    12523  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12524  }
    12525  if(pDefragmentationStats != VMA_NULL)
    12526  {
    12527  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12528  }
    12529 
    12530  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12531 
    12532  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12533 
    12534  const size_t poolCount = m_Pools.size();
    12535 
    12536  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12537  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12538  {
    12539  VmaAllocation hAlloc = pAllocations[allocIndex];
    12540  VMA_ASSERT(hAlloc);
    12541  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12542  // DedicatedAlloc cannot be defragmented.
    12543  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12544  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12545  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12546  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12547  // Lost allocation cannot be defragmented.
    12548  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12549  {
    12550  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12551 
    12552  const VmaPool hAllocPool = hAlloc->GetPool();
    12553  // This allocation belongs to custom pool.
    12554  if(hAllocPool != VK_NULL_HANDLE)
    12555  {
    12556  // Pools with linear or buddy algorithm are not defragmented.
    12557  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12558  {
    12559  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12560  }
    12561  }
    12562  // This allocation belongs to general pool.
    12563  else
    12564  {
    12565  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12566  }
    12567 
    12568  if(pAllocBlockVector != VMA_NULL)
    12569  {
    12570  VmaDefragmentator* const pDefragmentator =
    12571  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12572  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12573  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12574  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12575  }
    12576  }
    12577  }
    12578 
    12579  VkResult result = VK_SUCCESS;
    12580 
    12581  // ======== Main processing.
    12582 
    12583  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12584  uint32_t maxAllocationsToMove = UINT32_MAX;
    12585  if(pDefragmentationInfo != VMA_NULL)
    12586  {
    12587  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12588  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12589  }
    12590 
    12591  // Process standard memory.
    12592  for(uint32_t memTypeIndex = 0;
    12593  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12594  ++memTypeIndex)
    12595  {
    12596  // Only HOST_VISIBLE memory types can be defragmented.
    12597  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12598  {
    12599  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12600  pDefragmentationStats,
    12601  maxBytesToMove,
    12602  maxAllocationsToMove);
    12603  }
    12604  }
    12605 
    12606  // Process custom pools.
    12607  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12608  {
    12609  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12610  pDefragmentationStats,
    12611  maxBytesToMove,
    12612  maxAllocationsToMove);
    12613  }
    12614 
    12615  // ======== Destroy defragmentators.
    12616 
    12617  // Process custom pools.
    12618  for(size_t poolIndex = poolCount; poolIndex--; )
    12619  {
    12620  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12621  }
    12622 
    12623  // Process standard memory.
    12624  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12625  {
    12626  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12627  {
    12628  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12629  }
    12630  }
    12631 
    12632  return result;
    12633 }
    12634 
    12635 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12636 {
    12637  if(hAllocation->CanBecomeLost())
    12638  {
    12639  /*
    12640  Warning: This is a carefully designed algorithm.
    12641  Do not modify unless you really know what you're doing :)
    12642  */
    12643  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12644  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12645  for(;;)
    12646  {
    12647  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12648  {
    12649  pAllocationInfo->memoryType = UINT32_MAX;
    12650  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12651  pAllocationInfo->offset = 0;
    12652  pAllocationInfo->size = hAllocation->GetSize();
    12653  pAllocationInfo->pMappedData = VMA_NULL;
    12654  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12655  return;
    12656  }
    12657  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12658  {
    12659  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12660  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12661  pAllocationInfo->offset = hAllocation->GetOffset();
    12662  pAllocationInfo->size = hAllocation->GetSize();
    12663  pAllocationInfo->pMappedData = VMA_NULL;
    12664  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12665  return;
    12666  }
    12667  else // Last use time earlier than current time.
    12668  {
    12669  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12670  {
    12671  localLastUseFrameIndex = localCurrFrameIndex;
    12672  }
    12673  }
    12674  }
    12675  }
    12676  else
    12677  {
    12678 #if VMA_STATS_STRING_ENABLED
    12679  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12680  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12681  for(;;)
    12682  {
    12683  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12684  if(localLastUseFrameIndex == localCurrFrameIndex)
    12685  {
    12686  break;
    12687  }
    12688  else // Last use time earlier than current time.
    12689  {
    12690  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12691  {
    12692  localLastUseFrameIndex = localCurrFrameIndex;
    12693  }
    12694  }
    12695  }
    12696 #endif
    12697 
    12698  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12699  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12700  pAllocationInfo->offset = hAllocation->GetOffset();
    12701  pAllocationInfo->size = hAllocation->GetSize();
    12702  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12703  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12704  }
    12705 }
    12706 
    12707 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12708 {
    12709  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12710  if(hAllocation->CanBecomeLost())
    12711  {
    12712  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12713  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12714  for(;;)
    12715  {
    12716  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12717  {
    12718  return false;
    12719  }
    12720  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12721  {
    12722  return true;
    12723  }
    12724  else // Last use time earlier than current time.
    12725  {
    12726  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12727  {
    12728  localLastUseFrameIndex = localCurrFrameIndex;
    12729  }
    12730  }
    12731  }
    12732  }
    12733  else
    12734  {
    12735 #if VMA_STATS_STRING_ENABLED
    12736  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12737  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12738  for(;;)
    12739  {
    12740  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12741  if(localLastUseFrameIndex == localCurrFrameIndex)
    12742  {
    12743  break;
    12744  }
    12745  else // Last use time earlier than current time.
    12746  {
    12747  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12748  {
    12749  localLastUseFrameIndex = localCurrFrameIndex;
    12750  }
    12751  }
    12752  }
    12753 #endif
    12754 
    12755  return true;
    12756  }
    12757 }
    12758 
    12759 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12760 {
    12761  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12762 
    12763  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12764 
    12765  if(newCreateInfo.maxBlockCount == 0)
    12766  {
    12767  newCreateInfo.maxBlockCount = SIZE_MAX;
    12768  }
    12769  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12770  {
    12771  return VK_ERROR_INITIALIZATION_FAILED;
    12772  }
    12773 
    12774  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12775 
    12776  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12777 
    12778  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12779  if(res != VK_SUCCESS)
    12780  {
    12781  vma_delete(this, *pPool);
    12782  *pPool = VMA_NULL;
    12783  return res;
    12784  }
    12785 
    12786  // Add to m_Pools.
    12787  {
    12788  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12789  (*pPool)->SetId(m_NextPoolId++);
    12790  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12791  }
    12792 
    12793  return VK_SUCCESS;
    12794 }
    12795 
    12796 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12797 {
    12798  // Remove from m_Pools.
    12799  {
    12800  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12801  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12802  VMA_ASSERT(success && "Pool not found in Allocator.");
    12803  }
    12804 
    12805  vma_delete(this, pool);
    12806 }
    12807 
    12808 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12809 {
    12810  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12811 }
    12812 
    12813 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12814 {
    12815  m_CurrentFrameIndex.store(frameIndex);
    12816 }
    12817 
    12818 void VmaAllocator_T::MakePoolAllocationsLost(
    12819  VmaPool hPool,
    12820  size_t* pLostAllocationCount)
    12821 {
    12822  hPool->m_BlockVector.MakePoolAllocationsLost(
    12823  m_CurrentFrameIndex.load(),
    12824  pLostAllocationCount);
    12825 }
    12826 
    12827 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12828 {
    12829  return hPool->m_BlockVector.CheckCorruption();
    12830 }
    12831 
    12832 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12833 {
    12834  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12835 
    12836  // Process default pools.
    12837  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12838  {
    12839  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12840  {
    12841  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12842  VMA_ASSERT(pBlockVector);
    12843  VkResult localRes = pBlockVector->CheckCorruption();
    12844  switch(localRes)
    12845  {
    12846  case VK_ERROR_FEATURE_NOT_PRESENT:
    12847  break;
    12848  case VK_SUCCESS:
    12849  finalRes = VK_SUCCESS;
    12850  break;
    12851  default:
    12852  return localRes;
    12853  }
    12854  }
    12855  }
    12856 
    12857  // Process custom pools.
    12858  {
    12859  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12860  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12861  {
    12862  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12863  {
    12864  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12865  switch(localRes)
    12866  {
    12867  case VK_ERROR_FEATURE_NOT_PRESENT:
    12868  break;
    12869  case VK_SUCCESS:
    12870  finalRes = VK_SUCCESS;
    12871  break;
    12872  default:
    12873  return localRes;
    12874  }
    12875  }
    12876  }
    12877  }
    12878 
    12879  return finalRes;
    12880 }
    12881 
    12882 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12883 {
    12884  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12885  (*pAllocation)->InitLost();
    12886 }
    12887 
    12888 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12889 {
    12890  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12891 
    12892  VkResult res;
    12893  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12894  {
    12895  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12896  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12897  {
    12898  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12899  if(res == VK_SUCCESS)
    12900  {
    12901  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12902  }
    12903  }
    12904  else
    12905  {
    12906  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12907  }
    12908  }
    12909  else
    12910  {
    12911  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12912  }
    12913 
    12914  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12915  {
    12916  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12917  }
    12918 
    12919  return res;
    12920 }
    12921 
    12922 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12923 {
    12924  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12925  {
    12926  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12927  }
    12928 
    12929  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12930 
    12931  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12932  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12933  {
    12934  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12935  m_HeapSizeLimit[heapIndex] += size;
    12936  }
    12937 }
    12938 
    12939 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12940 {
    12941  if(hAllocation->CanBecomeLost())
    12942  {
    12943  return VK_ERROR_MEMORY_MAP_FAILED;
    12944  }
    12945 
    12946  switch(hAllocation->GetType())
    12947  {
    12948  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12949  {
    12950  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12951  char *pBytes = VMA_NULL;
    12952  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12953  if(res == VK_SUCCESS)
    12954  {
    12955  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12956  hAllocation->BlockAllocMap();
    12957  }
    12958  return res;
    12959  }
    12960  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12961  return hAllocation->DedicatedAllocMap(this, ppData);
    12962  default:
    12963  VMA_ASSERT(0);
    12964  return VK_ERROR_MEMORY_MAP_FAILED;
    12965  }
    12966 }
    12967 
    12968 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12969 {
    12970  switch(hAllocation->GetType())
    12971  {
    12972  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12973  {
    12974  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12975  hAllocation->BlockAllocUnmap();
    12976  pBlock->Unmap(this, 1);
    12977  }
    12978  break;
    12979  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12980  hAllocation->DedicatedAllocUnmap(this);
    12981  break;
    12982  default:
    12983  VMA_ASSERT(0);
    12984  }
    12985 }
    12986 
    12987 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12988 {
    12989  VkResult res = VK_SUCCESS;
    12990  switch(hAllocation->GetType())
    12991  {
    12992  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12993  res = GetVulkanFunctions().vkBindBufferMemory(
    12994  m_hDevice,
    12995  hBuffer,
    12996  hAllocation->GetMemory(),
    12997  0); //memoryOffset
    12998  break;
    12999  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13000  {
    13001  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13002  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13003  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13004  break;
    13005  }
    13006  default:
    13007  VMA_ASSERT(0);
    13008  }
    13009  return res;
    13010 }
    13011 
    13012 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13013 {
    13014  VkResult res = VK_SUCCESS;
    13015  switch(hAllocation->GetType())
    13016  {
    13017  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13018  res = GetVulkanFunctions().vkBindImageMemory(
    13019  m_hDevice,
    13020  hImage,
    13021  hAllocation->GetMemory(),
    13022  0); //memoryOffset
    13023  break;
    13024  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13025  {
    13026  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13027  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13028  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13029  break;
    13030  }
    13031  default:
    13032  VMA_ASSERT(0);
    13033  }
    13034  return res;
    13035 }
    13036 
    13037 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13038  VmaAllocation hAllocation,
    13039  VkDeviceSize offset, VkDeviceSize size,
    13040  VMA_CACHE_OPERATION op)
    13041 {
    13042  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13043  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13044  {
    13045  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13046  VMA_ASSERT(offset <= allocationSize);
    13047 
    13048  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13049 
    13050  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13051  memRange.memory = hAllocation->GetMemory();
    13052 
    13053  switch(hAllocation->GetType())
    13054  {
    13055  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13056  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13057  if(size == VK_WHOLE_SIZE)
    13058  {
    13059  memRange.size = allocationSize - memRange.offset;
    13060  }
    13061  else
    13062  {
    13063  VMA_ASSERT(offset + size <= allocationSize);
    13064  memRange.size = VMA_MIN(
    13065  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13066  allocationSize - memRange.offset);
    13067  }
    13068  break;
    13069 
    13070  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13071  {
    13072  // 1. Still within this allocation.
    13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13074  if(size == VK_WHOLE_SIZE)
    13075  {
    13076  size = allocationSize - offset;
    13077  }
    13078  else
    13079  {
    13080  VMA_ASSERT(offset + size <= allocationSize);
    13081  }
    13082  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13083 
    13084  // 2. Adjust to whole block.
    13085  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13086  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13087  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13088  memRange.offset += allocationOffset;
    13089  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13090 
    13091  break;
    13092  }
    13093 
    13094  default:
    13095  VMA_ASSERT(0);
    13096  }
    13097 
    13098  switch(op)
    13099  {
    13100  case VMA_CACHE_FLUSH:
    13101  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13102  break;
    13103  case VMA_CACHE_INVALIDATE:
    13104  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13105  break;
    13106  default:
    13107  VMA_ASSERT(0);
    13108  }
    13109  }
    13110  // else: Just ignore this call.
    13111 }
    13112 
    13113 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13114 {
    13115  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13116 
    13117  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13118  {
    13119  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13120  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13121  VMA_ASSERT(pDedicatedAllocations);
    13122  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13123  VMA_ASSERT(success);
    13124  }
    13125 
    13126  VkDeviceMemory hMemory = allocation->GetMemory();
    13127 
    13128  /*
    13129  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13130  before vkFreeMemory.
    13131 
    13132  if(allocation->GetMappedData() != VMA_NULL)
    13133  {
    13134  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13135  }
    13136  */
    13137 
    13138  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13139 
    13140  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13141 }
    13142 
    13143 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13144 {
    13145  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13146  !hAllocation->CanBecomeLost() &&
    13147  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13148  {
    13149  void* pData = VMA_NULL;
    13150  VkResult res = Map(hAllocation, &pData);
    13151  if(res == VK_SUCCESS)
    13152  {
    13153  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13154  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13155  Unmap(hAllocation);
    13156  }
    13157  else
    13158  {
    13159  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13160  }
    13161  }
    13162 }
    13163 
    13164 #if VMA_STATS_STRING_ENABLED
    13165 
    13166 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13167 {
    13168  bool dedicatedAllocationsStarted = false;
    13169  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13170  {
    13171  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13172  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13173  VMA_ASSERT(pDedicatedAllocVector);
    13174  if(pDedicatedAllocVector->empty() == false)
    13175  {
    13176  if(dedicatedAllocationsStarted == false)
    13177  {
    13178  dedicatedAllocationsStarted = true;
    13179  json.WriteString("DedicatedAllocations");
    13180  json.BeginObject();
    13181  }
    13182 
    13183  json.BeginString("Type ");
    13184  json.ContinueString(memTypeIndex);
    13185  json.EndString();
    13186 
    13187  json.BeginArray();
    13188 
    13189  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13190  {
    13191  json.BeginObject(true);
    13192  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13193  hAlloc->PrintParameters(json);
    13194  json.EndObject();
    13195  }
    13196 
    13197  json.EndArray();
    13198  }
    13199  }
    13200  if(dedicatedAllocationsStarted)
    13201  {
    13202  json.EndObject();
    13203  }
    13204 
    13205  {
    13206  bool allocationsStarted = false;
    13207  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13208  {
    13209  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13210  {
    13211  if(allocationsStarted == false)
    13212  {
    13213  allocationsStarted = true;
    13214  json.WriteString("DefaultPools");
    13215  json.BeginObject();
    13216  }
    13217 
    13218  json.BeginString("Type ");
    13219  json.ContinueString(memTypeIndex);
    13220  json.EndString();
    13221 
    13222  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13223  }
    13224  }
    13225  if(allocationsStarted)
    13226  {
    13227  json.EndObject();
    13228  }
    13229  }
    13230 
    13231  // Custom pools
    13232  {
    13233  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13234  const size_t poolCount = m_Pools.size();
    13235  if(poolCount > 0)
    13236  {
    13237  json.WriteString("Pools");
    13238  json.BeginObject();
    13239  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13240  {
    13241  json.BeginString();
    13242  json.ContinueString(m_Pools[poolIndex]->GetId());
    13243  json.EndString();
    13244 
    13245  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13246  }
    13247  json.EndObject();
    13248  }
    13249  }
    13250 }
    13251 
    13252 #endif // #if VMA_STATS_STRING_ENABLED
    13253 
    13255 // Public interface
    13256 
    13257 VkResult vmaCreateAllocator(
    13258  const VmaAllocatorCreateInfo* pCreateInfo,
    13259  VmaAllocator* pAllocator)
    13260 {
    13261  VMA_ASSERT(pCreateInfo && pAllocator);
    13262  VMA_DEBUG_LOG("vmaCreateAllocator");
    13263  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13264  return (*pAllocator)->Init(pCreateInfo);
    13265 }
    13266 
    13267 void vmaDestroyAllocator(
    13268  VmaAllocator allocator)
    13269 {
    13270  if(allocator != VK_NULL_HANDLE)
    13271  {
    13272  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13273  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13274  vma_delete(&allocationCallbacks, allocator);
    13275  }
    13276 }
    13277 
    13279  VmaAllocator allocator,
    13280  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13281 {
    13282  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13283  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13284 }
    13285 
    13287  VmaAllocator allocator,
    13288  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13289 {
    13290  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13291  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13292 }
    13293 
    13295  VmaAllocator allocator,
    13296  uint32_t memoryTypeIndex,
    13297  VkMemoryPropertyFlags* pFlags)
    13298 {
    13299  VMA_ASSERT(allocator && pFlags);
    13300  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13301  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13302 }
    13303 
    13305  VmaAllocator allocator,
    13306  uint32_t frameIndex)
    13307 {
    13308  VMA_ASSERT(allocator);
    13309  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13310 
    13311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13312 
    13313  allocator->SetCurrentFrameIndex(frameIndex);
    13314 }
    13315 
    13316 void vmaCalculateStats(
    13317  VmaAllocator allocator,
    13318  VmaStats* pStats)
    13319 {
    13320  VMA_ASSERT(allocator && pStats);
    13321  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13322  allocator->CalculateStats(pStats);
    13323 }
    13324 
    13325 #if VMA_STATS_STRING_ENABLED
    13326 
    13327 void vmaBuildStatsString(
    13328  VmaAllocator allocator,
    13329  char** ppStatsString,
    13330  VkBool32 detailedMap)
    13331 {
    13332  VMA_ASSERT(allocator && ppStatsString);
    13333  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13334 
    13335  VmaStringBuilder sb(allocator);
    13336  {
    13337  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13338  json.BeginObject();
    13339 
    13340  VmaStats stats;
    13341  allocator->CalculateStats(&stats);
    13342 
    13343  json.WriteString("Total");
    13344  VmaPrintStatInfo(json, stats.total);
    13345 
    13346  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13347  {
    13348  json.BeginString("Heap ");
    13349  json.ContinueString(heapIndex);
    13350  json.EndString();
    13351  json.BeginObject();
    13352 
    13353  json.WriteString("Size");
    13354  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13355 
    13356  json.WriteString("Flags");
    13357  json.BeginArray(true);
    13358  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13359  {
    13360  json.WriteString("DEVICE_LOCAL");
    13361  }
    13362  json.EndArray();
    13363 
    13364  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13365  {
    13366  json.WriteString("Stats");
    13367  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13368  }
    13369 
    13370  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13371  {
    13372  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13373  {
    13374  json.BeginString("Type ");
    13375  json.ContinueString(typeIndex);
    13376  json.EndString();
    13377 
    13378  json.BeginObject();
    13379 
    13380  json.WriteString("Flags");
    13381  json.BeginArray(true);
    13382  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13383  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13384  {
    13385  json.WriteString("DEVICE_LOCAL");
    13386  }
    13387  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13388  {
    13389  json.WriteString("HOST_VISIBLE");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_COHERENT");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13396  {
    13397  json.WriteString("HOST_CACHED");
    13398  }
    13399  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13400  {
    13401  json.WriteString("LAZILY_ALLOCATED");
    13402  }
    13403  json.EndArray();
    13404 
    13405  if(stats.memoryType[typeIndex].blockCount > 0)
    13406  {
    13407  json.WriteString("Stats");
    13408  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13409  }
    13410 
    13411  json.EndObject();
    13412  }
    13413  }
    13414 
    13415  json.EndObject();
    13416  }
    13417  if(detailedMap == VK_TRUE)
    13418  {
    13419  allocator->PrintDetailedMap(json);
    13420  }
    13421 
    13422  json.EndObject();
    13423  }
    13424 
    13425  const size_t len = sb.GetLength();
    13426  char* const pChars = vma_new_array(allocator, char, len + 1);
    13427  if(len > 0)
    13428  {
    13429  memcpy(pChars, sb.GetData(), len);
    13430  }
    13431  pChars[len] = '\0';
    13432  *ppStatsString = pChars;
    13433 }
    13434 
    13435 void vmaFreeStatsString(
    13436  VmaAllocator allocator,
    13437  char* pStatsString)
    13438 {
    13439  if(pStatsString != VMA_NULL)
    13440  {
    13441  VMA_ASSERT(allocator);
    13442  size_t len = strlen(pStatsString);
    13443  vma_delete_array(allocator, pStatsString, len + 1);
    13444  }
    13445 }
    13446 
    13447 #endif // #if VMA_STATS_STRING_ENABLED
    13448 
    13449 /*
    13450 This function is not protected by any mutex because it just reads immutable data.
    13451 */
    13452 VkResult vmaFindMemoryTypeIndex(
    13453  VmaAllocator allocator,
    13454  uint32_t memoryTypeBits,
    13455  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13456  uint32_t* pMemoryTypeIndex)
    13457 {
    13458  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13459  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13460  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13461 
    13462  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13463  {
    13464  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13465  }
    13466 
    13467  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13468  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13469 
    13470  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13471  if(mapped)
    13472  {
    13473  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13474  }
    13475 
    13476  // Convert usage to requiredFlags and preferredFlags.
    13477  switch(pAllocationCreateInfo->usage)
    13478  {
    13480  break;
    13482  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13483  {
    13484  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13485  }
    13486  break;
    13488  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13489  break;
    13491  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13492  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13493  {
    13494  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13495  }
    13496  break;
    13498  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13499  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13500  break;
    13501  default:
    13502  break;
    13503  }
    13504 
    13505  *pMemoryTypeIndex = UINT32_MAX;
    13506  uint32_t minCost = UINT32_MAX;
    13507  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13508  memTypeIndex < allocator->GetMemoryTypeCount();
    13509  ++memTypeIndex, memTypeBit <<= 1)
    13510  {
    13511  // This memory type is acceptable according to memoryTypeBits bitmask.
    13512  if((memTypeBit & memoryTypeBits) != 0)
    13513  {
    13514  const VkMemoryPropertyFlags currFlags =
    13515  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13516  // This memory type contains requiredFlags.
    13517  if((requiredFlags & ~currFlags) == 0)
    13518  {
    13519  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13520  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13521  // Remember memory type with lowest cost.
    13522  if(currCost < minCost)
    13523  {
    13524  *pMemoryTypeIndex = memTypeIndex;
    13525  if(currCost == 0)
    13526  {
    13527  return VK_SUCCESS;
    13528  }
    13529  minCost = currCost;
    13530  }
    13531  }
    13532  }
    13533  }
    13534  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13535 }
    13536 
    13538  VmaAllocator allocator,
    13539  const VkBufferCreateInfo* pBufferCreateInfo,
    13540  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13541  uint32_t* pMemoryTypeIndex)
    13542 {
    13543  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13544  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13545  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13546  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13547 
    13548  const VkDevice hDev = allocator->m_hDevice;
    13549  VkBuffer hBuffer = VK_NULL_HANDLE;
    13550  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13551  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13552  if(res == VK_SUCCESS)
    13553  {
    13554  VkMemoryRequirements memReq = {};
    13555  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13556  hDev, hBuffer, &memReq);
    13557 
    13558  res = vmaFindMemoryTypeIndex(
    13559  allocator,
    13560  memReq.memoryTypeBits,
    13561  pAllocationCreateInfo,
    13562  pMemoryTypeIndex);
    13563 
    13564  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13565  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13566  }
    13567  return res;
    13568 }
    13569 
    13571  VmaAllocator allocator,
    13572  const VkImageCreateInfo* pImageCreateInfo,
    13573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13574  uint32_t* pMemoryTypeIndex)
    13575 {
    13576  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13577  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13578  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13579  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13580 
    13581  const VkDevice hDev = allocator->m_hDevice;
    13582  VkImage hImage = VK_NULL_HANDLE;
    13583  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13584  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13585  if(res == VK_SUCCESS)
    13586  {
    13587  VkMemoryRequirements memReq = {};
    13588  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13589  hDev, hImage, &memReq);
    13590 
    13591  res = vmaFindMemoryTypeIndex(
    13592  allocator,
    13593  memReq.memoryTypeBits,
    13594  pAllocationCreateInfo,
    13595  pMemoryTypeIndex);
    13596 
    13597  allocator->GetVulkanFunctions().vkDestroyImage(
    13598  hDev, hImage, allocator->GetAllocationCallbacks());
    13599  }
    13600  return res;
    13601 }
    13602 
    13603 VkResult vmaCreatePool(
    13604  VmaAllocator allocator,
    13605  const VmaPoolCreateInfo* pCreateInfo,
    13606  VmaPool* pPool)
    13607 {
    13608  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13609 
    13610  VMA_DEBUG_LOG("vmaCreatePool");
    13611 
    13612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13613 
    13614  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13615 
    13616 #if VMA_RECORDING_ENABLED
    13617  if(allocator->GetRecorder() != VMA_NULL)
    13618  {
    13619  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13620  }
    13621 #endif
    13622 
    13623  return res;
    13624 }
    13625 
    13626 void vmaDestroyPool(
    13627  VmaAllocator allocator,
    13628  VmaPool pool)
    13629 {
    13630  VMA_ASSERT(allocator);
    13631 
    13632  if(pool == VK_NULL_HANDLE)
    13633  {
    13634  return;
    13635  }
    13636 
    13637  VMA_DEBUG_LOG("vmaDestroyPool");
    13638 
    13639  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13640 
    13641 #if VMA_RECORDING_ENABLED
    13642  if(allocator->GetRecorder() != VMA_NULL)
    13643  {
    13644  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13645  }
    13646 #endif
    13647 
    13648  allocator->DestroyPool(pool);
    13649 }
    13650 
    13651 void vmaGetPoolStats(
    13652  VmaAllocator allocator,
    13653  VmaPool pool,
    13654  VmaPoolStats* pPoolStats)
    13655 {
    13656  VMA_ASSERT(allocator && pool && pPoolStats);
    13657 
    13658  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13659 
    13660  allocator->GetPoolStats(pool, pPoolStats);
    13661 }
    13662 
    13664  VmaAllocator allocator,
    13665  VmaPool pool,
    13666  size_t* pLostAllocationCount)
    13667 {
    13668  VMA_ASSERT(allocator && pool);
    13669 
    13670  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13671 
    13672 #if VMA_RECORDING_ENABLED
    13673  if(allocator->GetRecorder() != VMA_NULL)
    13674  {
    13675  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13676  }
    13677 #endif
    13678 
    13679  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13680 }
    13681 
    13682 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13683 {
    13684  VMA_ASSERT(allocator && pool);
    13685 
    13686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13687 
    13688  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13689 
    13690  return allocator->CheckPoolCorruption(pool);
    13691 }
    13692 
    13693 VkResult vmaAllocateMemory(
    13694  VmaAllocator allocator,
    13695  const VkMemoryRequirements* pVkMemoryRequirements,
    13696  const VmaAllocationCreateInfo* pCreateInfo,
    13697  VmaAllocation* pAllocation,
    13698  VmaAllocationInfo* pAllocationInfo)
    13699 {
    13700  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13701 
    13702  VMA_DEBUG_LOG("vmaAllocateMemory");
    13703 
    13704  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13705 
    13706  VkResult result = allocator->AllocateMemory(
    13707  *pVkMemoryRequirements,
    13708  false, // requiresDedicatedAllocation
    13709  false, // prefersDedicatedAllocation
    13710  VK_NULL_HANDLE, // dedicatedBuffer
    13711  VK_NULL_HANDLE, // dedicatedImage
    13712  *pCreateInfo,
    13713  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13714  pAllocation);
    13715 
    13716 #if VMA_RECORDING_ENABLED
    13717  if(allocator->GetRecorder() != VMA_NULL)
    13718  {
    13719  allocator->GetRecorder()->RecordAllocateMemory(
    13720  allocator->GetCurrentFrameIndex(),
    13721  *pVkMemoryRequirements,
    13722  *pCreateInfo,
    13723  *pAllocation);
    13724  }
    13725 #endif
    13726 
    13727  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13728  {
    13729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13730  }
    13731 
    13732  return result;
    13733 }
    13734 
    13736  VmaAllocator allocator,
    13737  VkBuffer buffer,
    13738  const VmaAllocationCreateInfo* pCreateInfo,
    13739  VmaAllocation* pAllocation,
    13740  VmaAllocationInfo* pAllocationInfo)
    13741 {
    13742  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13743 
    13744  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13745 
    13746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13747 
    13748  VkMemoryRequirements vkMemReq = {};
    13749  bool requiresDedicatedAllocation = false;
    13750  bool prefersDedicatedAllocation = false;
    13751  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13752  requiresDedicatedAllocation,
    13753  prefersDedicatedAllocation);
    13754 
    13755  VkResult result = allocator->AllocateMemory(
    13756  vkMemReq,
    13757  requiresDedicatedAllocation,
    13758  prefersDedicatedAllocation,
    13759  buffer, // dedicatedBuffer
    13760  VK_NULL_HANDLE, // dedicatedImage
    13761  *pCreateInfo,
    13762  VMA_SUBALLOCATION_TYPE_BUFFER,
    13763  pAllocation);
    13764 
    13765 #if VMA_RECORDING_ENABLED
    13766  if(allocator->GetRecorder() != VMA_NULL)
    13767  {
    13768  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13769  allocator->GetCurrentFrameIndex(),
    13770  vkMemReq,
    13771  requiresDedicatedAllocation,
    13772  prefersDedicatedAllocation,
    13773  *pCreateInfo,
    13774  *pAllocation);
    13775  }
    13776 #endif
    13777 
    13778  if(pAllocationInfo && result == VK_SUCCESS)
    13779  {
    13780  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13781  }
    13782 
    13783  return result;
    13784 }
    13785 
    13786 VkResult vmaAllocateMemoryForImage(
    13787  VmaAllocator allocator,
    13788  VkImage image,
    13789  const VmaAllocationCreateInfo* pCreateInfo,
    13790  VmaAllocation* pAllocation,
    13791  VmaAllocationInfo* pAllocationInfo)
    13792 {
    13793  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13794 
    13795  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13796 
    13797  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13798 
    13799  VkMemoryRequirements vkMemReq = {};
    13800  bool requiresDedicatedAllocation = false;
    13801  bool prefersDedicatedAllocation = false;
    13802  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13803  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13804 
    13805  VkResult result = allocator->AllocateMemory(
    13806  vkMemReq,
    13807  requiresDedicatedAllocation,
    13808  prefersDedicatedAllocation,
    13809  VK_NULL_HANDLE, // dedicatedBuffer
    13810  image, // dedicatedImage
    13811  *pCreateInfo,
    13812  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13813  pAllocation);
    13814 
    13815 #if VMA_RECORDING_ENABLED
    13816  if(allocator->GetRecorder() != VMA_NULL)
    13817  {
    13818  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13819  allocator->GetCurrentFrameIndex(),
    13820  vkMemReq,
    13821  requiresDedicatedAllocation,
    13822  prefersDedicatedAllocation,
    13823  *pCreateInfo,
    13824  *pAllocation);
    13825  }
    13826 #endif
    13827 
    13828  if(pAllocationInfo && result == VK_SUCCESS)
    13829  {
    13830  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13831  }
    13832 
    13833  return result;
    13834 }
    13835 
    13836 void vmaFreeMemory(
    13837  VmaAllocator allocator,
    13838  VmaAllocation allocation)
    13839 {
    13840  VMA_ASSERT(allocator);
    13841 
    13842  if(allocation == VK_NULL_HANDLE)
    13843  {
    13844  return;
    13845  }
    13846 
    13847  VMA_DEBUG_LOG("vmaFreeMemory");
    13848 
    13849  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13850 
    13851 #if VMA_RECORDING_ENABLED
    13852  if(allocator->GetRecorder() != VMA_NULL)
    13853  {
    13854  allocator->GetRecorder()->RecordFreeMemory(
    13855  allocator->GetCurrentFrameIndex(),
    13856  allocation);
    13857  }
    13858 #endif
    13859 
    13860  allocator->FreeMemory(allocation);
    13861 }
    13862 
    13864  VmaAllocator allocator,
    13865  VmaAllocation allocation,
    13866  VmaAllocationInfo* pAllocationInfo)
    13867 {
    13868  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13869 
    13870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13871 
    13872 #if VMA_RECORDING_ENABLED
    13873  if(allocator->GetRecorder() != VMA_NULL)
    13874  {
    13875  allocator->GetRecorder()->RecordGetAllocationInfo(
    13876  allocator->GetCurrentFrameIndex(),
    13877  allocation);
    13878  }
    13879 #endif
    13880 
    13881  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13882 }
    13883 
    13884 VkBool32 vmaTouchAllocation(
    13885  VmaAllocator allocator,
    13886  VmaAllocation allocation)
    13887 {
    13888  VMA_ASSERT(allocator && allocation);
    13889 
    13890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13891 
    13892 #if VMA_RECORDING_ENABLED
    13893  if(allocator->GetRecorder() != VMA_NULL)
    13894  {
    13895  allocator->GetRecorder()->RecordTouchAllocation(
    13896  allocator->GetCurrentFrameIndex(),
    13897  allocation);
    13898  }
    13899 #endif
    13900 
    13901  return allocator->TouchAllocation(allocation);
    13902 }
    13903 
    13905  VmaAllocator allocator,
    13906  VmaAllocation allocation,
    13907  void* pUserData)
    13908 {
    13909  VMA_ASSERT(allocator && allocation);
    13910 
    13911  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13912 
    13913  allocation->SetUserData(allocator, pUserData);
    13914 
    13915 #if VMA_RECORDING_ENABLED
    13916  if(allocator->GetRecorder() != VMA_NULL)
    13917  {
    13918  allocator->GetRecorder()->RecordSetAllocationUserData(
    13919  allocator->GetCurrentFrameIndex(),
    13920  allocation,
    13921  pUserData);
    13922  }
    13923 #endif
    13924 }
    13925 
    13927  VmaAllocator allocator,
    13928  VmaAllocation* pAllocation)
    13929 {
    13930  VMA_ASSERT(allocator && pAllocation);
    13931 
    13932  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13933 
    13934  allocator->CreateLostAllocation(pAllocation);
    13935 
    13936 #if VMA_RECORDING_ENABLED
    13937  if(allocator->GetRecorder() != VMA_NULL)
    13938  {
    13939  allocator->GetRecorder()->RecordCreateLostAllocation(
    13940  allocator->GetCurrentFrameIndex(),
    13941  *pAllocation);
    13942  }
    13943 #endif
    13944 }
    13945 
    13946 VkResult vmaMapMemory(
    13947  VmaAllocator allocator,
    13948  VmaAllocation allocation,
    13949  void** ppData)
    13950 {
    13951  VMA_ASSERT(allocator && allocation && ppData);
    13952 
    13953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13954 
    13955  VkResult res = allocator->Map(allocation, ppData);
    13956 
    13957 #if VMA_RECORDING_ENABLED
    13958  if(allocator->GetRecorder() != VMA_NULL)
    13959  {
    13960  allocator->GetRecorder()->RecordMapMemory(
    13961  allocator->GetCurrentFrameIndex(),
    13962  allocation);
    13963  }
    13964 #endif
    13965 
    13966  return res;
    13967 }
    13968 
    13969 void vmaUnmapMemory(
    13970  VmaAllocator allocator,
    13971  VmaAllocation allocation)
    13972 {
    13973  VMA_ASSERT(allocator && allocation);
    13974 
    13975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13976 
    13977 #if VMA_RECORDING_ENABLED
    13978  if(allocator->GetRecorder() != VMA_NULL)
    13979  {
    13980  allocator->GetRecorder()->RecordUnmapMemory(
    13981  allocator->GetCurrentFrameIndex(),
    13982  allocation);
    13983  }
    13984 #endif
    13985 
    13986  allocator->Unmap(allocation);
    13987 }
    13988 
    13989 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13990 {
    13991  VMA_ASSERT(allocator && allocation);
    13992 
    13993  VMA_DEBUG_LOG("vmaFlushAllocation");
    13994 
    13995  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13996 
    13997  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    13998 
    13999 #if VMA_RECORDING_ENABLED
    14000  if(allocator->GetRecorder() != VMA_NULL)
    14001  {
    14002  allocator->GetRecorder()->RecordFlushAllocation(
    14003  allocator->GetCurrentFrameIndex(),
    14004  allocation, offset, size);
    14005  }
    14006 #endif
    14007 }
    14008 
    14009 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14010 {
    14011  VMA_ASSERT(allocator && allocation);
    14012 
    14013  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14014 
    14015  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14016 
    14017  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14018 
    14019 #if VMA_RECORDING_ENABLED
    14020  if(allocator->GetRecorder() != VMA_NULL)
    14021  {
    14022  allocator->GetRecorder()->RecordInvalidateAllocation(
    14023  allocator->GetCurrentFrameIndex(),
    14024  allocation, offset, size);
    14025  }
    14026 #endif
    14027 }
    14028 
    14029 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14030 {
    14031  VMA_ASSERT(allocator);
    14032 
    14033  VMA_DEBUG_LOG("vmaCheckCorruption");
    14034 
    14035  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14036 
    14037  return allocator->CheckCorruption(memoryTypeBits);
    14038 }
    14039 
    14040 VkResult vmaDefragment(
    14041  VmaAllocator allocator,
    14042  VmaAllocation* pAllocations,
    14043  size_t allocationCount,
    14044  VkBool32* pAllocationsChanged,
    14045  const VmaDefragmentationInfo *pDefragmentationInfo,
    14046  VmaDefragmentationStats* pDefragmentationStats)
    14047 {
    14048  VMA_ASSERT(allocator && pAllocations);
    14049 
    14050  VMA_DEBUG_LOG("vmaDefragment");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14055 }
    14056 
    14057 VkResult vmaBindBufferMemory(
    14058  VmaAllocator allocator,
    14059  VmaAllocation allocation,
    14060  VkBuffer buffer)
    14061 {
    14062  VMA_ASSERT(allocator && allocation && buffer);
    14063 
    14064  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14065 
    14066  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14067 
    14068  return allocator->BindBufferMemory(allocation, buffer);
    14069 }
    14070 
    14071 VkResult vmaBindImageMemory(
    14072  VmaAllocator allocator,
    14073  VmaAllocation allocation,
    14074  VkImage image)
    14075 {
    14076  VMA_ASSERT(allocator && allocation && image);
    14077 
    14078  VMA_DEBUG_LOG("vmaBindImageMemory");
    14079 
    14080  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14081 
    14082  return allocator->BindImageMemory(allocation, image);
    14083 }
    14084 
    14085 VkResult vmaCreateBuffer(
    14086  VmaAllocator allocator,
    14087  const VkBufferCreateInfo* pBufferCreateInfo,
    14088  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14089  VkBuffer* pBuffer,
    14090  VmaAllocation* pAllocation,
    14091  VmaAllocationInfo* pAllocationInfo)
    14092 {
    14093  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14094 
    14095  VMA_DEBUG_LOG("vmaCreateBuffer");
    14096 
    14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14098 
    14099  *pBuffer = VK_NULL_HANDLE;
    14100  *pAllocation = VK_NULL_HANDLE;
    14101 
    14102  // 1. Create VkBuffer.
    14103  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14104  allocator->m_hDevice,
    14105  pBufferCreateInfo,
    14106  allocator->GetAllocationCallbacks(),
    14107  pBuffer);
    14108  if(res >= 0)
    14109  {
    14110  // 2. vkGetBufferMemoryRequirements.
    14111  VkMemoryRequirements vkMemReq = {};
    14112  bool requiresDedicatedAllocation = false;
    14113  bool prefersDedicatedAllocation = false;
    14114  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14115  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14116 
    14117  // Make sure alignment requirements for specific buffer usages reported
    14118  // in Physical Device Properties are included in alignment reported by memory requirements.
    14119  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14120  {
    14121  VMA_ASSERT(vkMemReq.alignment %
    14122  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14123  }
    14124  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14125  {
    14126  VMA_ASSERT(vkMemReq.alignment %
    14127  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14128  }
    14129  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14130  {
    14131  VMA_ASSERT(vkMemReq.alignment %
    14132  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14133  }
    14134 
    14135  // 3. Allocate memory using allocator.
    14136  res = allocator->AllocateMemory(
    14137  vkMemReq,
    14138  requiresDedicatedAllocation,
    14139  prefersDedicatedAllocation,
    14140  *pBuffer, // dedicatedBuffer
    14141  VK_NULL_HANDLE, // dedicatedImage
    14142  *pAllocationCreateInfo,
    14143  VMA_SUBALLOCATION_TYPE_BUFFER,
    14144  pAllocation);
    14145 
    14146 #if VMA_RECORDING_ENABLED
    14147  if(allocator->GetRecorder() != VMA_NULL)
    14148  {
    14149  allocator->GetRecorder()->RecordCreateBuffer(
    14150  allocator->GetCurrentFrameIndex(),
    14151  *pBufferCreateInfo,
    14152  *pAllocationCreateInfo,
    14153  *pAllocation);
    14154  }
    14155 #endif
    14156 
    14157  if(res >= 0)
    14158  {
    14159  // 3. Bind buffer with memory.
    14160  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14161  if(res >= 0)
    14162  {
    14163  // All steps succeeded.
    14164  #if VMA_STATS_STRING_ENABLED
    14165  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14166  #endif
    14167  if(pAllocationInfo != VMA_NULL)
    14168  {
    14169  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14170  }
    14171 
    14172  return VK_SUCCESS;
    14173  }
    14174  allocator->FreeMemory(*pAllocation);
    14175  *pAllocation = VK_NULL_HANDLE;
    14176  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14177  *pBuffer = VK_NULL_HANDLE;
    14178  return res;
    14179  }
    14180  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14181  *pBuffer = VK_NULL_HANDLE;
    14182  return res;
    14183  }
    14184  return res;
    14185 }
    14186 
    14187 void vmaDestroyBuffer(
    14188  VmaAllocator allocator,
    14189  VkBuffer buffer,
    14190  VmaAllocation allocation)
    14191 {
    14192  VMA_ASSERT(allocator);
    14193 
    14194  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14195  {
    14196  return;
    14197  }
    14198 
    14199  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14200 
    14201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14202 
    14203 #if VMA_RECORDING_ENABLED
    14204  if(allocator->GetRecorder() != VMA_NULL)
    14205  {
    14206  allocator->GetRecorder()->RecordDestroyBuffer(
    14207  allocator->GetCurrentFrameIndex(),
    14208  allocation);
    14209  }
    14210 #endif
    14211 
    14212  if(buffer != VK_NULL_HANDLE)
    14213  {
    14214  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14215  }
    14216 
    14217  if(allocation != VK_NULL_HANDLE)
    14218  {
    14219  allocator->FreeMemory(allocation);
    14220  }
    14221 }
    14222 
    14223 VkResult vmaCreateImage(
    14224  VmaAllocator allocator,
    14225  const VkImageCreateInfo* pImageCreateInfo,
    14226  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14227  VkImage* pImage,
    14228  VmaAllocation* pAllocation,
    14229  VmaAllocationInfo* pAllocationInfo)
    14230 {
    14231  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14232 
    14233  VMA_DEBUG_LOG("vmaCreateImage");
    14234 
    14235  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14236 
    14237  *pImage = VK_NULL_HANDLE;
    14238  *pAllocation = VK_NULL_HANDLE;
    14239 
    14240  // 1. Create VkImage.
    14241  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14242  allocator->m_hDevice,
    14243  pImageCreateInfo,
    14244  allocator->GetAllocationCallbacks(),
    14245  pImage);
    14246  if(res >= 0)
    14247  {
    14248  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14249  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14250  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14251 
    14252  // 2. Allocate memory using allocator.
    14253  VkMemoryRequirements vkMemReq = {};
    14254  bool requiresDedicatedAllocation = false;
    14255  bool prefersDedicatedAllocation = false;
    14256  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14257  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14258 
    14259  res = allocator->AllocateMemory(
    14260  vkMemReq,
    14261  requiresDedicatedAllocation,
    14262  prefersDedicatedAllocation,
    14263  VK_NULL_HANDLE, // dedicatedBuffer
    14264  *pImage, // dedicatedImage
    14265  *pAllocationCreateInfo,
    14266  suballocType,
    14267  pAllocation);
    14268 
    14269 #if VMA_RECORDING_ENABLED
    14270  if(allocator->GetRecorder() != VMA_NULL)
    14271  {
    14272  allocator->GetRecorder()->RecordCreateImage(
    14273  allocator->GetCurrentFrameIndex(),
    14274  *pImageCreateInfo,
    14275  *pAllocationCreateInfo,
    14276  *pAllocation);
    14277  }
    14278 #endif
    14279 
    14280  if(res >= 0)
    14281  {
    14282  // 3. Bind image with memory.
    14283  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14284  if(res >= 0)
    14285  {
    14286  // All steps succeeded.
    14287  #if VMA_STATS_STRING_ENABLED
    14288  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14289  #endif
    14290  if(pAllocationInfo != VMA_NULL)
    14291  {
    14292  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14293  }
    14294 
    14295  return VK_SUCCESS;
    14296  }
    14297  allocator->FreeMemory(*pAllocation);
    14298  *pAllocation = VK_NULL_HANDLE;
    14299  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14300  *pImage = VK_NULL_HANDLE;
    14301  return res;
    14302  }
    14303  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14304  *pImage = VK_NULL_HANDLE;
    14305  return res;
    14306  }
    14307  return res;
    14308 }
    14309 
    14310 void vmaDestroyImage(
    14311  VmaAllocator allocator,
    14312  VkImage image,
    14313  VmaAllocation allocation)
    14314 {
    14315  VMA_ASSERT(allocator);
    14316 
    14317  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14318  {
    14319  return;
    14320  }
    14321 
    14322  VMA_DEBUG_LOG("vmaDestroyImage");
    14323 
    14324  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14325 
    14326 #if VMA_RECORDING_ENABLED
    14327  if(allocator->GetRecorder() != VMA_NULL)
    14328  {
    14329  allocator->GetRecorder()->RecordDestroyImage(
    14330  allocator->GetCurrentFrameIndex(),
    14331  allocation);
    14332  }
    14333 #endif
    14334 
    14335  if(image != VK_NULL_HANDLE)
    14336  {
    14337  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14338  }
    14339  if(allocation != VK_NULL_HANDLE)
    14340  {
    14341  allocator->FreeMemory(allocation);
    14342  }
    14343 }
    14344 
    14345 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1571
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1872
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1468 /*
    1469 Define this macro to 0/1 to disable/enable support for recording functionality,
    1470 available through VmaAllocatorCreateInfo::pRecordSettings.
    1471 */
    1472 #ifndef VMA_RECORDING_ENABLED
    1473  #ifdef _WIN32
    1474  #define VMA_RECORDING_ENABLED 1
    1475  #else
    1476  #define VMA_RECORDING_ENABLED 0
    1477  #endif
    1478 #endif
    1479 
    1480 #ifndef NOMINMAX
    1481  #define NOMINMAX // For windows.h
    1482 #endif
    1483 
    1484 #include <vulkan/vulkan.h>
    1485 
    1486 #if VMA_RECORDING_ENABLED
    1487  #include <windows.h>
    1488 #endif
    1489 
    1490 #if !defined(VMA_DEDICATED_ALLOCATION)
    1491  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1492  #define VMA_DEDICATED_ALLOCATION 1
    1493  #else
    1494  #define VMA_DEDICATED_ALLOCATION 0
    1495  #endif
    1496 #endif
    1497 
    1507 VK_DEFINE_HANDLE(VmaAllocator)
    1508 
    1509 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1511  VmaAllocator allocator,
    1512  uint32_t memoryType,
    1513  VkDeviceMemory memory,
    1514  VkDeviceSize size);
    1516 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1517  VmaAllocator allocator,
    1518  uint32_t memoryType,
    1519  VkDeviceMemory memory,
    1520  VkDeviceSize size);
    1521 
    1535 
    1565 
    1568 typedef VkFlags VmaAllocatorCreateFlags;
    1569 
    1574 typedef struct VmaVulkanFunctions {
    1575  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1576  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1577  PFN_vkAllocateMemory vkAllocateMemory;
    1578  PFN_vkFreeMemory vkFreeMemory;
    1579  PFN_vkMapMemory vkMapMemory;
    1580  PFN_vkUnmapMemory vkUnmapMemory;
    1581  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1582  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1583  PFN_vkBindBufferMemory vkBindBufferMemory;
    1584  PFN_vkBindImageMemory vkBindImageMemory;
    1585  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1586  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1587  PFN_vkCreateBuffer vkCreateBuffer;
    1588  PFN_vkDestroyBuffer vkDestroyBuffer;
    1589  PFN_vkCreateImage vkCreateImage;
    1590  PFN_vkDestroyImage vkDestroyImage;
    1591 #if VMA_DEDICATED_ALLOCATION
    1592  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1593  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1594 #endif
    1596 
    1598 typedef enum VmaRecordFlagBits {
    1605 
    1608 typedef VkFlags VmaRecordFlags;
    1609 
    1611 typedef struct VmaRecordSettings
    1612 {
    1622  const char* pFilePath;
    1624 
    1627 {
    1631 
    1632  VkPhysicalDevice physicalDevice;
    1634 
    1635  VkDevice device;
    1637 
    1640 
    1641  const VkAllocationCallbacks* pAllocationCallbacks;
    1643 
    1682  const VkDeviceSize* pHeapSizeLimit;
    1703 
    1705 VkResult vmaCreateAllocator(
    1706  const VmaAllocatorCreateInfo* pCreateInfo,
    1707  VmaAllocator* pAllocator);
    1708 
    1710 void vmaDestroyAllocator(
    1711  VmaAllocator allocator);
    1712 
    1718  VmaAllocator allocator,
    1719  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1720 
    1726  VmaAllocator allocator,
    1727  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1728 
    1736  VmaAllocator allocator,
    1737  uint32_t memoryTypeIndex,
    1738  VkMemoryPropertyFlags* pFlags);
    1739 
    1749  VmaAllocator allocator,
    1750  uint32_t frameIndex);
    1751 
    1754 typedef struct VmaStatInfo
    1755 {
    1757  uint32_t blockCount;
    1763  VkDeviceSize usedBytes;
    1765  VkDeviceSize unusedBytes;
    1768 } VmaStatInfo;
    1769 
    1771 typedef struct VmaStats
    1772 {
    1773  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1774  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1776 } VmaStats;
    1777 
    1779 void vmaCalculateStats(
    1780  VmaAllocator allocator,
    1781  VmaStats* pStats);
    1782 
    1783 #define VMA_STATS_STRING_ENABLED 1
    1784 
    1785 #if VMA_STATS_STRING_ENABLED
    1786 
    1788 
    1790 void vmaBuildStatsString(
    1791  VmaAllocator allocator,
    1792  char** ppStatsString,
    1793  VkBool32 detailedMap);
    1794 
    1795 void vmaFreeStatsString(
    1796  VmaAllocator allocator,
    1797  char* pStatsString);
    1798 
    1799 #endif // #if VMA_STATS_STRING_ENABLED
    1800 
    1809 VK_DEFINE_HANDLE(VmaPool)
    1810 
    1811 typedef enum VmaMemoryUsage
    1812 {
    1861 } VmaMemoryUsage;
    1862 
    1877 
    1932 
    1945 
    1955 
    1962 
    1966 
    1968 {
    1981  VkMemoryPropertyFlags requiredFlags;
    1986  VkMemoryPropertyFlags preferredFlags;
    1994  uint32_t memoryTypeBits;
    2007  void* pUserData;
    2009 
    2026 VkResult vmaFindMemoryTypeIndex(
    2027  VmaAllocator allocator,
    2028  uint32_t memoryTypeBits,
    2029  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2030  uint32_t* pMemoryTypeIndex);
    2031 
    2045  VmaAllocator allocator,
    2046  const VkBufferCreateInfo* pBufferCreateInfo,
    2047  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2048  uint32_t* pMemoryTypeIndex);
    2049 
    2063  VmaAllocator allocator,
    2064  const VkImageCreateInfo* pImageCreateInfo,
    2065  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2066  uint32_t* pMemoryTypeIndex);
    2067 
    2088 
    2105 
    2116 
    2122 
    2125 typedef VkFlags VmaPoolCreateFlags;
    2126 
    2129 typedef struct VmaPoolCreateInfo {
    2144  VkDeviceSize blockSize;
    2173 
    2176 typedef struct VmaPoolStats {
    2179  VkDeviceSize size;
    2182  VkDeviceSize unusedSize;
    2195  VkDeviceSize unusedRangeSizeMax;
    2198  size_t blockCount;
    2199 } VmaPoolStats;
    2200 
    2207 VkResult vmaCreatePool(
    2208  VmaAllocator allocator,
    2209  const VmaPoolCreateInfo* pCreateInfo,
    2210  VmaPool* pPool);
    2211 
    2214 void vmaDestroyPool(
    2215  VmaAllocator allocator,
    2216  VmaPool pool);
    2217 
    2224 void vmaGetPoolStats(
    2225  VmaAllocator allocator,
    2226  VmaPool pool,
    2227  VmaPoolStats* pPoolStats);
    2228 
    2236  VmaAllocator allocator,
    2237  VmaPool pool,
    2238  size_t* pLostAllocationCount);
    2239 
    2254 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2255 
    2280 VK_DEFINE_HANDLE(VmaAllocation)
    2281 
    2282 
    2284 typedef struct VmaAllocationInfo {
    2289  uint32_t memoryType;
    2298  VkDeviceMemory deviceMemory;
    2303  VkDeviceSize offset;
    2308  VkDeviceSize size;
    2322  void* pUserData;
    2324 
    2335 VkResult vmaAllocateMemory(
    2336  VmaAllocator allocator,
    2337  const VkMemoryRequirements* pVkMemoryRequirements,
    2338  const VmaAllocationCreateInfo* pCreateInfo,
    2339  VmaAllocation* pAllocation,
    2340  VmaAllocationInfo* pAllocationInfo);
    2341 
    2349  VmaAllocator allocator,
    2350  VkBuffer buffer,
    2351  const VmaAllocationCreateInfo* pCreateInfo,
    2352  VmaAllocation* pAllocation,
    2353  VmaAllocationInfo* pAllocationInfo);
    2354 
    2356 VkResult vmaAllocateMemoryForImage(
    2357  VmaAllocator allocator,
    2358  VkImage image,
    2359  const VmaAllocationCreateInfo* pCreateInfo,
    2360  VmaAllocation* pAllocation,
    2361  VmaAllocationInfo* pAllocationInfo);
    2362 
    2364 void vmaFreeMemory(
    2365  VmaAllocator allocator,
    2366  VmaAllocation allocation);
    2367 
    2385  VmaAllocator allocator,
    2386  VmaAllocation allocation,
    2387  VmaAllocationInfo* pAllocationInfo);
    2388 
    2403 VkBool32 vmaTouchAllocation(
    2404  VmaAllocator allocator,
    2405  VmaAllocation allocation);
    2406 
    2421  VmaAllocator allocator,
    2422  VmaAllocation allocation,
    2423  void* pUserData);
    2424 
    2436  VmaAllocator allocator,
    2437  VmaAllocation* pAllocation);
    2438 
    2473 VkResult vmaMapMemory(
    2474  VmaAllocator allocator,
    2475  VmaAllocation allocation,
    2476  void** ppData);
    2477 
    2482 void vmaUnmapMemory(
    2483  VmaAllocator allocator,
    2484  VmaAllocation allocation);
    2485 
    2498 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2499 
    2512 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2513 
    2530 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2531 
    2533 typedef struct VmaDefragmentationInfo {
    2538  VkDeviceSize maxBytesToMove;
    2545 
    2547 typedef struct VmaDefragmentationStats {
    2549  VkDeviceSize bytesMoved;
    2551  VkDeviceSize bytesFreed;
    2557 
    2596 VkResult vmaDefragment(
    2597  VmaAllocator allocator,
    2598  VmaAllocation* pAllocations,
    2599  size_t allocationCount,
    2600  VkBool32* pAllocationsChanged,
    2601  const VmaDefragmentationInfo *pDefragmentationInfo,
    2602  VmaDefragmentationStats* pDefragmentationStats);
    2603 
    2616 VkResult vmaBindBufferMemory(
    2617  VmaAllocator allocator,
    2618  VmaAllocation allocation,
    2619  VkBuffer buffer);
    2620 
    2633 VkResult vmaBindImageMemory(
    2634  VmaAllocator allocator,
    2635  VmaAllocation allocation,
    2636  VkImage image);
    2637 
    2664 VkResult vmaCreateBuffer(
    2665  VmaAllocator allocator,
    2666  const VkBufferCreateInfo* pBufferCreateInfo,
    2667  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2668  VkBuffer* pBuffer,
    2669  VmaAllocation* pAllocation,
    2670  VmaAllocationInfo* pAllocationInfo);
    2671 
    2683 void vmaDestroyBuffer(
    2684  VmaAllocator allocator,
    2685  VkBuffer buffer,
    2686  VmaAllocation allocation);
    2687 
    2689 VkResult vmaCreateImage(
    2690  VmaAllocator allocator,
    2691  const VkImageCreateInfo* pImageCreateInfo,
    2692  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2693  VkImage* pImage,
    2694  VmaAllocation* pAllocation,
    2695  VmaAllocationInfo* pAllocationInfo);
    2696 
    2708 void vmaDestroyImage(
    2709  VmaAllocator allocator,
    2710  VkImage image,
    2711  VmaAllocation allocation);
    2712 
    2713 #ifdef __cplusplus
    2714 }
    2715 #endif
    2716 
    2717 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2718 
    2719 // For Visual Studio IntelliSense.
    2720 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2721 #define VMA_IMPLEMENTATION
    2722 #endif
    2723 
    2724 #ifdef VMA_IMPLEMENTATION
    2725 #undef VMA_IMPLEMENTATION
    2726 
    2727 #include <cstdint>
    2728 #include <cstdlib>
    2729 #include <cstring>
    2730 
    2731 /*******************************************************************************
    2732 CONFIGURATION SECTION
    2733 
    2734 Define some of these macros before each #include of this header or change them
    2735 here if you need other then default behavior depending on your environment.
    2736 */
    2737 
    2738 /*
    2739 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2740 internally, like:
    2741 
    2742  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2743 
    2744 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2745 VmaAllocatorCreateInfo::pVulkanFunctions.
    2746 */
    2747 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2748 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2749 #endif
    2750 
    2751 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2752 //#define VMA_USE_STL_CONTAINERS 1
    2753 
    2754 /* Set this macro to 1 to make the library including and using STL containers:
    2755 std::pair, std::vector, std::list, std::unordered_map.
    2756 
    2757 Set it to 0 or undefined to make the library using its own implementation of
    2758 the containers.
    2759 */
    2760 #if VMA_USE_STL_CONTAINERS
    2761  #define VMA_USE_STL_VECTOR 1
    2762  #define VMA_USE_STL_UNORDERED_MAP 1
    2763  #define VMA_USE_STL_LIST 1
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_VECTOR
    2767  #include <vector>
    2768 #endif
    2769 
    2770 #if VMA_USE_STL_UNORDERED_MAP
    2771  #include <unordered_map>
    2772 #endif
    2773 
    2774 #if VMA_USE_STL_LIST
    2775  #include <list>
    2776 #endif
    2777 
    2778 /*
    2779 Following headers are used in this CONFIGURATION section only, so feel free to
    2780 remove them if not needed.
    2781 */
    2782 #include <cassert> // for assert
    2783 #include <algorithm> // for min, max
    2784 #include <mutex> // for std::mutex
    2785 #include <atomic> // for std::atomic
    2786 
    2787 #ifndef VMA_NULL
    2788  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2789  #define VMA_NULL nullptr
    2790 #endif
    2791 
    2792 #if defined(__APPLE__) || defined(__ANDROID__)
    2793 #include <cstdlib>
    2794 void *aligned_alloc(size_t alignment, size_t size)
    2795 {
    2796  // alignment must be >= sizeof(void*)
    2797  if(alignment < sizeof(void*))
    2798  {
    2799  alignment = sizeof(void*);
    2800  }
    2801 
    2802  void *pointer;
    2803  if(posix_memalign(&pointer, alignment, size) == 0)
    2804  return pointer;
    2805  return VMA_NULL;
    2806 }
    2807 #endif
    2808 
    2809 // If your compiler is not compatible with C++11 and definition of
    2810 // aligned_alloc() function is missing, uncommeting following line may help:
    2811 
    2812 //#include <malloc.h>
    2813 
    2814 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2815 #ifndef VMA_ASSERT
    2816  #ifdef _DEBUG
    2817  #define VMA_ASSERT(expr) assert(expr)
    2818  #else
    2819  #define VMA_ASSERT(expr)
    2820  #endif
    2821 #endif
    2822 
    2823 // Assert that will be called very often, like inside data structures e.g. operator[].
    2824 // Making it non-empty can make program slow.
    2825 #ifndef VMA_HEAVY_ASSERT
    2826  #ifdef _DEBUG
    2827  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2828  #else
    2829  #define VMA_HEAVY_ASSERT(expr)
    2830  #endif
    2831 #endif
    2832 
    2833 #ifndef VMA_ALIGN_OF
    2834  #define VMA_ALIGN_OF(type) (__alignof(type))
    2835 #endif
    2836 
    2837 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2838  #if defined(_WIN32)
    2839  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2840  #else
    2841  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2842  #endif
    2843 #endif
    2844 
    2845 #ifndef VMA_SYSTEM_FREE
    2846  #if defined(_WIN32)
    2847  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2848  #else
    2849  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2850  #endif
    2851 #endif
    2852 
    2853 #ifndef VMA_MIN
    2854  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2855 #endif
    2856 
    2857 #ifndef VMA_MAX
    2858  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2859 #endif
    2860 
    2861 #ifndef VMA_SWAP
    2862  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2863 #endif
    2864 
    2865 #ifndef VMA_SORT
    2866  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2867 #endif
    2868 
    2869 #ifndef VMA_DEBUG_LOG
    2870  #define VMA_DEBUG_LOG(format, ...)
    2871  /*
    2872  #define VMA_DEBUG_LOG(format, ...) do { \
    2873  printf(format, __VA_ARGS__); \
    2874  printf("\n"); \
    2875  } while(false)
    2876  */
    2877 #endif
    2878 
    2879 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2880 #if VMA_STATS_STRING_ENABLED
    2881  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2882  {
    2883  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2884  }
    2885  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2886  {
    2887  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2888  }
    2889  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2890  {
    2891  snprintf(outStr, strLen, "%p", ptr);
    2892  }
    2893 #endif
    2894 
    2895 #ifndef VMA_MUTEX
    2896  class VmaMutex
    2897  {
    2898  public:
    2899  VmaMutex() { }
    2900  ~VmaMutex() { }
    2901  void Lock() { m_Mutex.lock(); }
    2902  void Unlock() { m_Mutex.unlock(); }
    2903  private:
    2904  std::mutex m_Mutex;
    2905  };
    2906  #define VMA_MUTEX VmaMutex
    2907 #endif
    2908 
    2909 /*
    2910 If providing your own implementation, you need to implement a subset of std::atomic:
    2911 
    2912 - Constructor(uint32_t desired)
    2913 - uint32_t load() const
    2914 - void store(uint32_t desired)
    2915 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2916 */
    2917 #ifndef VMA_ATOMIC_UINT32
    2918  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2922 
    2926  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2927 #endif
    2928 
    2929 #ifndef VMA_DEBUG_ALIGNMENT
    2930 
    2934  #define VMA_DEBUG_ALIGNMENT (1)
    2935 #endif
    2936 
    2937 #ifndef VMA_DEBUG_MARGIN
    2938 
    2942  #define VMA_DEBUG_MARGIN (0)
    2943 #endif
    2944 
    2945 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2946 
    2950  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2951 #endif
    2952 
    2953 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2954 
    2959  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2963 
    2967  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2968 #endif
    2969 
    2970 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2971 
    2975  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2976 #endif
    2977 
    2978 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2979  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2981 #endif
    2982 
    2983 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2984  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2986 #endif
    2987 
    2988 #ifndef VMA_CLASS_NO_COPY
    2989  #define VMA_CLASS_NO_COPY(className) \
    2990  private: \
    2991  className(const className&) = delete; \
    2992  className& operator=(const className&) = delete;
    2993 #endif
    2994 
    2995 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2996 
    2997 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2998 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2999 
    3000 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3001 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3002 
    3003 /*******************************************************************************
    3004 END OF CONFIGURATION
    3005 */
    3006 
    3007 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3008  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3009 
    3010 // Returns number of bits set to 1 in (v).
    3011 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3012 {
    3013  uint32_t c = v - ((v >> 1) & 0x55555555);
    3014  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3015  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3016  c = ((c >> 8) + c) & 0x00FF00FF;
    3017  c = ((c >> 16) + c) & 0x0000FFFF;
    3018  return c;
    3019 }
    3020 
    3021 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3022 // Use types like uint32_t, uint64_t as T.
    3023 template <typename T>
    3024 static inline T VmaAlignUp(T val, T align)
    3025 {
    3026  return (val + align - 1) / align * align;
    3027 }
    3028 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3029 // Use types like uint32_t, uint64_t as T.
    3030 template <typename T>
    3031 static inline T VmaAlignDown(T val, T align)
    3032 {
    3033  return val / align * align;
    3034 }
    3035 
    3036 // Division with mathematical rounding to nearest number.
    3037 template <typename T>
    3038 static inline T VmaRoundDiv(T x, T y)
    3039 {
    3040  return (x + (y / (T)2)) / y;
    3041 }
    3042 
    3043 /*
    3044 Returns true if given number is a power of two.
    3045 T must be unsigned integer number or signed integer but always nonnegative.
    3046 For 0 returns true.
    3047 */
    3048 template <typename T>
    3049 inline bool VmaIsPow2(T x)
    3050 {
    3051  return (x & (x-1)) == 0;
    3052 }
    3053 
    3054 // Returns smallest power of 2 greater or equal to v.
    3055 static inline uint32_t VmaNextPow2(uint32_t v)
    3056 {
    3057  v--;
    3058  v |= v >> 1;
    3059  v |= v >> 2;
    3060  v |= v >> 4;
    3061  v |= v >> 8;
    3062  v |= v >> 16;
    3063  v++;
    3064  return v;
    3065 }
    3066 static inline uint64_t VmaNextPow2(uint64_t v)
    3067 {
    3068  v--;
    3069  v |= v >> 1;
    3070  v |= v >> 2;
    3071  v |= v >> 4;
    3072  v |= v >> 8;
    3073  v |= v >> 16;
    3074  v |= v >> 32;
    3075  v++;
    3076  return v;
    3077 }
    3078 
    3079 // Returns largest power of 2 less or equal to v.
    3080 static inline uint32_t VmaPrevPow2(uint32_t v)
    3081 {
    3082  v |= v >> 1;
    3083  v |= v >> 2;
    3084  v |= v >> 4;
    3085  v |= v >> 8;
    3086  v |= v >> 16;
    3087  v = v ^ (v >> 1);
    3088  return v;
    3089 }
    3090 static inline uint64_t VmaPrevPow2(uint64_t v)
    3091 {
    3092  v |= v >> 1;
    3093  v |= v >> 2;
    3094  v |= v >> 4;
    3095  v |= v >> 8;
    3096  v |= v >> 16;
    3097  v |= v >> 32;
    3098  v = v ^ (v >> 1);
    3099  return v;
    3100 }
    3101 
    3102 static inline bool VmaStrIsEmpty(const char* pStr)
    3103 {
    3104  return pStr == VMA_NULL || *pStr == '\0';
    3105 }
    3106 
    3107 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3108 {
    3109  switch(algorithm)
    3110  {
    3112  return "Linear";
    3114  return "Buddy";
    3115  case 0:
    3116  return "Default";
    3117  default:
    3118  VMA_ASSERT(0);
    3119  return "";
    3120  }
    3121 }
    3122 
    3123 #ifndef VMA_SORT
    3124 
    3125 template<typename Iterator, typename Compare>
    3126 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3127 {
    3128  Iterator centerValue = end; --centerValue;
    3129  Iterator insertIndex = beg;
    3130  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3131  {
    3132  if(cmp(*memTypeIndex, *centerValue))
    3133  {
    3134  if(insertIndex != memTypeIndex)
    3135  {
    3136  VMA_SWAP(*memTypeIndex, *insertIndex);
    3137  }
    3138  ++insertIndex;
    3139  }
    3140  }
    3141  if(insertIndex != centerValue)
    3142  {
    3143  VMA_SWAP(*insertIndex, *centerValue);
    3144  }
    3145  return insertIndex;
    3146 }
    3147 
    3148 template<typename Iterator, typename Compare>
    3149 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3150 {
    3151  if(beg < end)
    3152  {
    3153  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3154  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3155  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3156  }
    3157 }
    3158 
    3159 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3160 
    3161 #endif // #ifndef VMA_SORT
    3162 
    3163 /*
    3164 Returns true if two memory blocks occupy overlapping pages.
    3165 ResourceA must be in less memory offset than ResourceB.
    3166 
    3167 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3168 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3169 */
    3170 static inline bool VmaBlocksOnSamePage(
    3171  VkDeviceSize resourceAOffset,
    3172  VkDeviceSize resourceASize,
    3173  VkDeviceSize resourceBOffset,
    3174  VkDeviceSize pageSize)
    3175 {
    3176  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3177  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3178  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3179  VkDeviceSize resourceBStart = resourceBOffset;
    3180  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3181  return resourceAEndPage == resourceBStartPage;
    3182 }
    3183 
    3184 enum VmaSuballocationType
    3185 {
    3186  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3187  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3188  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3189  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3190  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3191  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3192  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3193 };
    3194 
    3195 /*
    3196 Returns true if given suballocation types could conflict and must respect
    3197 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3198 or linear image and another one is optimal image. If type is unknown, behave
    3199 conservatively.
    3200 */
    3201 static inline bool VmaIsBufferImageGranularityConflict(
    3202  VmaSuballocationType suballocType1,
    3203  VmaSuballocationType suballocType2)
    3204 {
    3205  if(suballocType1 > suballocType2)
    3206  {
    3207  VMA_SWAP(suballocType1, suballocType2);
    3208  }
    3209 
    3210  switch(suballocType1)
    3211  {
    3212  case VMA_SUBALLOCATION_TYPE_FREE:
    3213  return false;
    3214  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3215  return true;
    3216  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3217  return
    3218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3220  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3221  return
    3222  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3224  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3225  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3226  return
    3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3228  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3229  return false;
    3230  default:
    3231  VMA_ASSERT(0);
    3232  return true;
    3233  }
    3234 }
    3235 
    3236 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3237 {
    3238  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3239  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3240  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3241  {
    3242  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3243  }
    3244 }
    3245 
    3246 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3247 {
    3248  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3249  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3250  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3251  {
    3252  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3253  {
    3254  return false;
    3255  }
    3256  }
    3257  return true;
    3258 }
    3259 
    3260 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3261 struct VmaMutexLock
    3262 {
    3263  VMA_CLASS_NO_COPY(VmaMutexLock)
    3264 public:
    3265  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3266  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3267  {
    3268  if(m_pMutex)
    3269  {
    3270  m_pMutex->Lock();
    3271  }
    3272  }
    3273 
    3274  ~VmaMutexLock()
    3275  {
    3276  if(m_pMutex)
    3277  {
    3278  m_pMutex->Unlock();
    3279  }
    3280  }
    3281 
    3282 private:
    3283  VMA_MUTEX* m_pMutex;
    3284 };
    3285 
    3286 #if VMA_DEBUG_GLOBAL_MUTEX
    3287  static VMA_MUTEX gDebugGlobalMutex;
    3288  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3289 #else
    3290  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3291 #endif
    3292 
    3293 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3294 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3295 
    3296 /*
    3297 Performs binary search and returns iterator to first element that is greater or
    3298 equal to (key), according to comparison (cmp).
    3299 
    3300 Cmp should return true if first argument is less than second argument.
    3301 
    3302 Returned value is the found element, if present in the collection or place where
    3303 new element with value (key) should be inserted.
    3304 */
    3305 template <typename CmpLess, typename IterT, typename KeyT>
    3306 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3307 {
    3308  size_t down = 0, up = (end - beg);
    3309  while(down < up)
    3310  {
    3311  const size_t mid = (down + up) / 2;
    3312  if(cmp(*(beg+mid), key))
    3313  {
    3314  down = mid + 1;
    3315  }
    3316  else
    3317  {
    3318  up = mid;
    3319  }
    3320  }
    3321  return beg + down;
    3322 }
    3323 
    3325 // Memory allocation
    3326 
    3327 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3328 {
    3329  if((pAllocationCallbacks != VMA_NULL) &&
    3330  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3331  {
    3332  return (*pAllocationCallbacks->pfnAllocation)(
    3333  pAllocationCallbacks->pUserData,
    3334  size,
    3335  alignment,
    3336  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3337  }
    3338  else
    3339  {
    3340  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3341  }
    3342 }
    3343 
    3344 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3345 {
    3346  if((pAllocationCallbacks != VMA_NULL) &&
    3347  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3348  {
    3349  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3350  }
    3351  else
    3352  {
    3353  VMA_SYSTEM_FREE(ptr);
    3354  }
    3355 }
    3356 
    3357 template<typename T>
    3358 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3359 {
    3360  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3361 }
    3362 
    3363 template<typename T>
    3364 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3365 {
    3366  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3367 }
    3368 
    3369 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3370 
    3371 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3372 
    3373 template<typename T>
    3374 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3375 {
    3376  ptr->~T();
    3377  VmaFree(pAllocationCallbacks, ptr);
    3378 }
    3379 
    3380 template<typename T>
    3381 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3382 {
    3383  if(ptr != VMA_NULL)
    3384  {
    3385  for(size_t i = count; i--; )
    3386  {
    3387  ptr[i].~T();
    3388  }
    3389  VmaFree(pAllocationCallbacks, ptr);
    3390  }
    3391 }
    3392 
    3393 // STL-compatible allocator.
    3394 template<typename T>
    3395 class VmaStlAllocator
    3396 {
    3397 public:
    3398  const VkAllocationCallbacks* const m_pCallbacks;
    3399  typedef T value_type;
    3400 
    3401  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3402  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3403 
    3404  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3405  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3406 
    3407  template<typename U>
    3408  bool operator==(const VmaStlAllocator<U>& rhs) const
    3409  {
    3410  return m_pCallbacks == rhs.m_pCallbacks;
    3411  }
    3412  template<typename U>
    3413  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3414  {
    3415  return m_pCallbacks != rhs.m_pCallbacks;
    3416  }
    3417 
    3418  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3419 };
    3420 
    3421 #if VMA_USE_STL_VECTOR
    3422 
    3423 #define VmaVector std::vector
    3424 
    3425 template<typename T, typename allocatorT>
    3426 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3427 {
    3428  vec.insert(vec.begin() + index, item);
    3429 }
    3430 
    3431 template<typename T, typename allocatorT>
    3432 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3433 {
    3434  vec.erase(vec.begin() + index);
    3435 }
    3436 
    3437 #else // #if VMA_USE_STL_VECTOR
    3438 
    3439 /* Class with interface compatible with subset of std::vector.
    3440 T must be POD because constructors and destructors are not called and memcpy is
    3441 used for these objects. */
    3442 template<typename T, typename AllocatorT>
    3443 class VmaVector
    3444 {
    3445 public:
    3446  typedef T value_type;
    3447 
    3448  VmaVector(const AllocatorT& allocator) :
    3449  m_Allocator(allocator),
    3450  m_pArray(VMA_NULL),
    3451  m_Count(0),
    3452  m_Capacity(0)
    3453  {
    3454  }
    3455 
    3456  VmaVector(size_t count, const AllocatorT& allocator) :
    3457  m_Allocator(allocator),
    3458  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3459  m_Count(count),
    3460  m_Capacity(count)
    3461  {
    3462  }
    3463 
    3464  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3465  m_Allocator(src.m_Allocator),
    3466  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3467  m_Count(src.m_Count),
    3468  m_Capacity(src.m_Count)
    3469  {
    3470  if(m_Count != 0)
    3471  {
    3472  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3473  }
    3474  }
    3475 
    3476  ~VmaVector()
    3477  {
    3478  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3479  }
    3480 
    3481  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3482  {
    3483  if(&rhs != this)
    3484  {
    3485  resize(rhs.m_Count);
    3486  if(m_Count != 0)
    3487  {
    3488  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3489  }
    3490  }
    3491  return *this;
    3492  }
    3493 
    3494  bool empty() const { return m_Count == 0; }
    3495  size_t size() const { return m_Count; }
    3496  T* data() { return m_pArray; }
    3497  const T* data() const { return m_pArray; }
    3498 
    3499  T& operator[](size_t index)
    3500  {
    3501  VMA_HEAVY_ASSERT(index < m_Count);
    3502  return m_pArray[index];
    3503  }
    3504  const T& operator[](size_t index) const
    3505  {
    3506  VMA_HEAVY_ASSERT(index < m_Count);
    3507  return m_pArray[index];
    3508  }
    3509 
    3510  T& front()
    3511  {
    3512  VMA_HEAVY_ASSERT(m_Count > 0);
    3513  return m_pArray[0];
    3514  }
    3515  const T& front() const
    3516  {
    3517  VMA_HEAVY_ASSERT(m_Count > 0);
    3518  return m_pArray[0];
    3519  }
    3520  T& back()
    3521  {
    3522  VMA_HEAVY_ASSERT(m_Count > 0);
    3523  return m_pArray[m_Count - 1];
    3524  }
    3525  const T& back() const
    3526  {
    3527  VMA_HEAVY_ASSERT(m_Count > 0);
    3528  return m_pArray[m_Count - 1];
    3529  }
    3530 
    3531  void reserve(size_t newCapacity, bool freeMemory = false)
    3532  {
    3533  newCapacity = VMA_MAX(newCapacity, m_Count);
    3534 
    3535  if((newCapacity < m_Capacity) && !freeMemory)
    3536  {
    3537  newCapacity = m_Capacity;
    3538  }
    3539 
    3540  if(newCapacity != m_Capacity)
    3541  {
    3542  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3543  if(m_Count != 0)
    3544  {
    3545  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3546  }
    3547  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3548  m_Capacity = newCapacity;
    3549  m_pArray = newArray;
    3550  }
    3551  }
    3552 
    3553  void resize(size_t newCount, bool freeMemory = false)
    3554  {
    3555  size_t newCapacity = m_Capacity;
    3556  if(newCount > m_Capacity)
    3557  {
    3558  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3559  }
    3560  else if(freeMemory)
    3561  {
    3562  newCapacity = newCount;
    3563  }
    3564 
    3565  if(newCapacity != m_Capacity)
    3566  {
    3567  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3568  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3569  if(elementsToCopy != 0)
    3570  {
    3571  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3572  }
    3573  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3574  m_Capacity = newCapacity;
    3575  m_pArray = newArray;
    3576  }
    3577 
    3578  m_Count = newCount;
    3579  }
    3580 
    3581  void clear(bool freeMemory = false)
    3582  {
    3583  resize(0, freeMemory);
    3584  }
    3585 
    3586  void insert(size_t index, const T& src)
    3587  {
    3588  VMA_HEAVY_ASSERT(index <= m_Count);
    3589  const size_t oldCount = size();
    3590  resize(oldCount + 1);
    3591  if(index < oldCount)
    3592  {
    3593  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3594  }
    3595  m_pArray[index] = src;
    3596  }
    3597 
    3598  void remove(size_t index)
    3599  {
    3600  VMA_HEAVY_ASSERT(index < m_Count);
    3601  const size_t oldCount = size();
    3602  if(index < oldCount - 1)
    3603  {
    3604  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3605  }
    3606  resize(oldCount - 1);
    3607  }
    3608 
    3609  void push_back(const T& src)
    3610  {
    3611  const size_t newIndex = size();
    3612  resize(newIndex + 1);
    3613  m_pArray[newIndex] = src;
    3614  }
    3615 
    3616  void pop_back()
    3617  {
    3618  VMA_HEAVY_ASSERT(m_Count > 0);
    3619  resize(size() - 1);
    3620  }
    3621 
    3622  void push_front(const T& src)
    3623  {
    3624  insert(0, src);
    3625  }
    3626 
    3627  void pop_front()
    3628  {
    3629  VMA_HEAVY_ASSERT(m_Count > 0);
    3630  remove(0);
    3631  }
    3632 
    3633  typedef T* iterator;
    3634 
    3635  iterator begin() { return m_pArray; }
    3636  iterator end() { return m_pArray + m_Count; }
    3637 
    3638 private:
    3639  AllocatorT m_Allocator;
    3640  T* m_pArray;
    3641  size_t m_Count;
    3642  size_t m_Capacity;
    3643 };
    3644 
    3645 template<typename T, typename allocatorT>
    3646 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3647 {
    3648  vec.insert(index, item);
    3649 }
    3650 
    3651 template<typename T, typename allocatorT>
    3652 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3653 {
    3654  vec.remove(index);
    3655 }
    3656 
    3657 #endif // #if VMA_USE_STL_VECTOR
    3658 
    3659 template<typename CmpLess, typename VectorT>
    3660 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3661 {
    3662  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3663  vector.data(),
    3664  vector.data() + vector.size(),
    3665  value,
    3666  CmpLess()) - vector.data();
    3667  VmaVectorInsert(vector, indexToInsert, value);
    3668  return indexToInsert;
    3669 }
    3670 
    3671 template<typename CmpLess, typename VectorT>
    3672 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3673 {
    3674  CmpLess comparator;
    3675  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3676  vector.begin(),
    3677  vector.end(),
    3678  value,
    3679  comparator);
    3680  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3681  {
    3682  size_t indexToRemove = it - vector.begin();
    3683  VmaVectorRemove(vector, indexToRemove);
    3684  return true;
    3685  }
    3686  return false;
    3687 }
    3688 
    3689 template<typename CmpLess, typename IterT, typename KeyT>
    3690 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3691 {
    3692  CmpLess comparator;
    3693  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3694  beg, end, value, comparator);
    3695  if(it == end ||
    3696  (!comparator(*it, value) && !comparator(value, *it)))
    3697  {
    3698  return it;
    3699  }
    3700  return end;
    3701 }
    3702 
    3704 // class VmaPoolAllocator
    3705 
    3706 /*
    3707 Allocator for objects of type T using a list of arrays (pools) to speed up
    3708 allocation. Number of elements that can be allocated is not bounded because
    3709 allocator can create multiple blocks.
    3710 */
    3711 template<typename T>
    3712 class VmaPoolAllocator
    3713 {
    3714  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3715 public:
    3716  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3717  ~VmaPoolAllocator();
    3718  void Clear();
    3719  T* Alloc();
    3720  void Free(T* ptr);
    3721 
    3722 private:
    3723  union Item
    3724  {
    3725  uint32_t NextFreeIndex;
    3726  T Value;
    3727  };
    3728 
    3729  struct ItemBlock
    3730  {
    3731  Item* pItems;
    3732  uint32_t FirstFreeIndex;
    3733  };
    3734 
    3735  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3736  size_t m_ItemsPerBlock;
    3737  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3738 
    3739  ItemBlock& CreateNewBlock();
    3740 };
    3741 
    3742 template<typename T>
    3743 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3744  m_pAllocationCallbacks(pAllocationCallbacks),
    3745  m_ItemsPerBlock(itemsPerBlock),
    3746  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3747 {
    3748  VMA_ASSERT(itemsPerBlock > 0);
    3749 }
    3750 
    3751 template<typename T>
    3752 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3753 {
    3754  Clear();
    3755 }
    3756 
    3757 template<typename T>
    3758 void VmaPoolAllocator<T>::Clear()
    3759 {
    3760  for(size_t i = m_ItemBlocks.size(); i--; )
    3761  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3762  m_ItemBlocks.clear();
    3763 }
    3764 
    3765 template<typename T>
    3766 T* VmaPoolAllocator<T>::Alloc()
    3767 {
    3768  for(size_t i = m_ItemBlocks.size(); i--; )
    3769  {
    3770  ItemBlock& block = m_ItemBlocks[i];
    3771  // This block has some free items: Use first one.
    3772  if(block.FirstFreeIndex != UINT32_MAX)
    3773  {
    3774  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3775  block.FirstFreeIndex = pItem->NextFreeIndex;
    3776  return &pItem->Value;
    3777  }
    3778  }
    3779 
    3780  // No block has free item: Create new one and use it.
    3781  ItemBlock& newBlock = CreateNewBlock();
    3782  Item* const pItem = &newBlock.pItems[0];
    3783  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3784  return &pItem->Value;
    3785 }
    3786 
    3787 template<typename T>
    3788 void VmaPoolAllocator<T>::Free(T* ptr)
    3789 {
    3790  // Search all memory blocks to find ptr.
    3791  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3792  {
    3793  ItemBlock& block = m_ItemBlocks[i];
    3794 
    3795  // Casting to union.
    3796  Item* pItemPtr;
    3797  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3798 
    3799  // Check if pItemPtr is in address range of this block.
    3800  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3801  {
    3802  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3803  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3804  block.FirstFreeIndex = index;
    3805  return;
    3806  }
    3807  }
    3808  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3809 }
    3810 
    3811 template<typename T>
    3812 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3813 {
    3814  ItemBlock newBlock = {
    3815  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3816 
    3817  m_ItemBlocks.push_back(newBlock);
    3818 
    3819  // Setup singly-linked list of all free items in this block.
    3820  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3821  newBlock.pItems[i].NextFreeIndex = i + 1;
    3822  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3823  return m_ItemBlocks.back();
    3824 }
    3825 
    3827 // class VmaRawList, VmaList
    3828 
    3829 #if VMA_USE_STL_LIST
    3830 
    3831 #define VmaList std::list
    3832 
    3833 #else // #if VMA_USE_STL_LIST
    3834 
    3835 template<typename T>
    3836 struct VmaListItem
    3837 {
    3838  VmaListItem* pPrev;
    3839  VmaListItem* pNext;
    3840  T Value;
    3841 };
    3842 
    3843 // Doubly linked list.
    3844 template<typename T>
    3845 class VmaRawList
    3846 {
    3847  VMA_CLASS_NO_COPY(VmaRawList)
    3848 public:
    3849  typedef VmaListItem<T> ItemType;
    3850 
    3851  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3852  ~VmaRawList();
    3853  void Clear();
    3854 
    3855  size_t GetCount() const { return m_Count; }
    3856  bool IsEmpty() const { return m_Count == 0; }
    3857 
    3858  ItemType* Front() { return m_pFront; }
    3859  const ItemType* Front() const { return m_pFront; }
    3860  ItemType* Back() { return m_pBack; }
    3861  const ItemType* Back() const { return m_pBack; }
    3862 
    3863  ItemType* PushBack();
    3864  ItemType* PushFront();
    3865  ItemType* PushBack(const T& value);
    3866  ItemType* PushFront(const T& value);
    3867  void PopBack();
    3868  void PopFront();
    3869 
    3870  // Item can be null - it means PushBack.
    3871  ItemType* InsertBefore(ItemType* pItem);
    3872  // Item can be null - it means PushFront.
    3873  ItemType* InsertAfter(ItemType* pItem);
    3874 
    3875  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3876  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3877 
    3878  void Remove(ItemType* pItem);
    3879 
    3880 private:
    3881  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3882  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3883  ItemType* m_pFront;
    3884  ItemType* m_pBack;
    3885  size_t m_Count;
    3886 };
    3887 
    3888 template<typename T>
    3889 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3890  m_pAllocationCallbacks(pAllocationCallbacks),
    3891  m_ItemAllocator(pAllocationCallbacks, 128),
    3892  m_pFront(VMA_NULL),
    3893  m_pBack(VMA_NULL),
    3894  m_Count(0)
    3895 {
    3896 }
    3897 
    3898 template<typename T>
    3899 VmaRawList<T>::~VmaRawList()
    3900 {
    3901  // Intentionally not calling Clear, because that would be unnecessary
    3902  // computations to return all items to m_ItemAllocator as free.
    3903 }
    3904 
    3905 template<typename T>
    3906 void VmaRawList<T>::Clear()
    3907 {
    3908  if(IsEmpty() == false)
    3909  {
    3910  ItemType* pItem = m_pBack;
    3911  while(pItem != VMA_NULL)
    3912  {
    3913  ItemType* const pPrevItem = pItem->pPrev;
    3914  m_ItemAllocator.Free(pItem);
    3915  pItem = pPrevItem;
    3916  }
    3917  m_pFront = VMA_NULL;
    3918  m_pBack = VMA_NULL;
    3919  m_Count = 0;
    3920  }
    3921 }
    3922 
    3923 template<typename T>
    3924 VmaListItem<T>* VmaRawList<T>::PushBack()
    3925 {
    3926  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3927  pNewItem->pNext = VMA_NULL;
    3928  if(IsEmpty())
    3929  {
    3930  pNewItem->pPrev = VMA_NULL;
    3931  m_pFront = pNewItem;
    3932  m_pBack = pNewItem;
    3933  m_Count = 1;
    3934  }
    3935  else
    3936  {
    3937  pNewItem->pPrev = m_pBack;
    3938  m_pBack->pNext = pNewItem;
    3939  m_pBack = pNewItem;
    3940  ++m_Count;
    3941  }
    3942  return pNewItem;
    3943 }
    3944 
    3945 template<typename T>
    3946 VmaListItem<T>* VmaRawList<T>::PushFront()
    3947 {
    3948  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3949  pNewItem->pPrev = VMA_NULL;
    3950  if(IsEmpty())
    3951  {
    3952  pNewItem->pNext = VMA_NULL;
    3953  m_pFront = pNewItem;
    3954  m_pBack = pNewItem;
    3955  m_Count = 1;
    3956  }
    3957  else
    3958  {
    3959  pNewItem->pNext = m_pFront;
    3960  m_pFront->pPrev = pNewItem;
    3961  m_pFront = pNewItem;
    3962  ++m_Count;
    3963  }
    3964  return pNewItem;
    3965 }
    3966 
    3967 template<typename T>
    3968 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3969 {
    3970  ItemType* const pNewItem = PushBack();
    3971  pNewItem->Value = value;
    3972  return pNewItem;
    3973 }
    3974 
    3975 template<typename T>
    3976 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3977 {
    3978  ItemType* const pNewItem = PushFront();
    3979  pNewItem->Value = value;
    3980  return pNewItem;
    3981 }
    3982 
    3983 template<typename T>
    3984 void VmaRawList<T>::PopBack()
    3985 {
    3986  VMA_HEAVY_ASSERT(m_Count > 0);
    3987  ItemType* const pBackItem = m_pBack;
    3988  ItemType* const pPrevItem = pBackItem->pPrev;
    3989  if(pPrevItem != VMA_NULL)
    3990  {
    3991  pPrevItem->pNext = VMA_NULL;
    3992  }
    3993  m_pBack = pPrevItem;
    3994  m_ItemAllocator.Free(pBackItem);
    3995  --m_Count;
    3996 }
    3997 
    3998 template<typename T>
    3999 void VmaRawList<T>::PopFront()
    4000 {
    4001  VMA_HEAVY_ASSERT(m_Count > 0);
    4002  ItemType* const pFrontItem = m_pFront;
    4003  ItemType* const pNextItem = pFrontItem->pNext;
    4004  if(pNextItem != VMA_NULL)
    4005  {
    4006  pNextItem->pPrev = VMA_NULL;
    4007  }
    4008  m_pFront = pNextItem;
    4009  m_ItemAllocator.Free(pFrontItem);
    4010  --m_Count;
    4011 }
    4012 
    4013 template<typename T>
    4014 void VmaRawList<T>::Remove(ItemType* pItem)
    4015 {
    4016  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4017  VMA_HEAVY_ASSERT(m_Count > 0);
    4018 
    4019  if(pItem->pPrev != VMA_NULL)
    4020  {
    4021  pItem->pPrev->pNext = pItem->pNext;
    4022  }
    4023  else
    4024  {
    4025  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4026  m_pFront = pItem->pNext;
    4027  }
    4028 
    4029  if(pItem->pNext != VMA_NULL)
    4030  {
    4031  pItem->pNext->pPrev = pItem->pPrev;
    4032  }
    4033  else
    4034  {
    4035  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4036  m_pBack = pItem->pPrev;
    4037  }
    4038 
    4039  m_ItemAllocator.Free(pItem);
    4040  --m_Count;
    4041 }
    4042 
    4043 template<typename T>
    4044 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4045 {
    4046  if(pItem != VMA_NULL)
    4047  {
    4048  ItemType* const prevItem = pItem->pPrev;
    4049  ItemType* const newItem = m_ItemAllocator.Alloc();
    4050  newItem->pPrev = prevItem;
    4051  newItem->pNext = pItem;
    4052  pItem->pPrev = newItem;
    4053  if(prevItem != VMA_NULL)
    4054  {
    4055  prevItem->pNext = newItem;
    4056  }
    4057  else
    4058  {
    4059  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4060  m_pFront = newItem;
    4061  }
    4062  ++m_Count;
    4063  return newItem;
    4064  }
    4065  else
    4066  return PushBack();
    4067 }
    4068 
    4069 template<typename T>
    4070 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4071 {
    4072  if(pItem != VMA_NULL)
    4073  {
    4074  ItemType* const nextItem = pItem->pNext;
    4075  ItemType* const newItem = m_ItemAllocator.Alloc();
    4076  newItem->pNext = nextItem;
    4077  newItem->pPrev = pItem;
    4078  pItem->pNext = newItem;
    4079  if(nextItem != VMA_NULL)
    4080  {
    4081  nextItem->pPrev = newItem;
    4082  }
    4083  else
    4084  {
    4085  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4086  m_pBack = newItem;
    4087  }
    4088  ++m_Count;
    4089  return newItem;
    4090  }
    4091  else
    4092  return PushFront();
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4097 {
    4098  ItemType* const newItem = InsertBefore(pItem);
    4099  newItem->Value = value;
    4100  return newItem;
    4101 }
    4102 
    4103 template<typename T>
    4104 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4105 {
    4106  ItemType* const newItem = InsertAfter(pItem);
    4107  newItem->Value = value;
    4108  return newItem;
    4109 }
    4110 
    4111 template<typename T, typename AllocatorT>
    4112 class VmaList
    4113 {
    4114  VMA_CLASS_NO_COPY(VmaList)
    4115 public:
    4116  class iterator
    4117  {
    4118  public:
    4119  iterator() :
    4120  m_pList(VMA_NULL),
    4121  m_pItem(VMA_NULL)
    4122  {
    4123  }
    4124 
    4125  T& operator*() const
    4126  {
    4127  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4128  return m_pItem->Value;
    4129  }
    4130  T* operator->() const
    4131  {
    4132  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4133  return &m_pItem->Value;
    4134  }
    4135 
    4136  iterator& operator++()
    4137  {
    4138  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4139  m_pItem = m_pItem->pNext;
    4140  return *this;
    4141  }
    4142  iterator& operator--()
    4143  {
    4144  if(m_pItem != VMA_NULL)
    4145  {
    4146  m_pItem = m_pItem->pPrev;
    4147  }
    4148  else
    4149  {
    4150  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4151  m_pItem = m_pList->Back();
    4152  }
    4153  return *this;
    4154  }
    4155 
    4156  iterator operator++(int)
    4157  {
    4158  iterator result = *this;
    4159  ++*this;
    4160  return result;
    4161  }
    4162  iterator operator--(int)
    4163  {
    4164  iterator result = *this;
    4165  --*this;
    4166  return result;
    4167  }
    4168 
    4169  bool operator==(const iterator& rhs) const
    4170  {
    4171  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4172  return m_pItem == rhs.m_pItem;
    4173  }
    4174  bool operator!=(const iterator& rhs) const
    4175  {
    4176  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4177  return m_pItem != rhs.m_pItem;
    4178  }
    4179 
    4180  private:
    4181  VmaRawList<T>* m_pList;
    4182  VmaListItem<T>* m_pItem;
    4183 
    4184  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4185  m_pList(pList),
    4186  m_pItem(pItem)
    4187  {
    4188  }
    4189 
    4190  friend class VmaList<T, AllocatorT>;
    4191  };
    4192 
    4193  class const_iterator
    4194  {
    4195  public:
    4196  const_iterator() :
    4197  m_pList(VMA_NULL),
    4198  m_pItem(VMA_NULL)
    4199  {
    4200  }
    4201 
    4202  const_iterator(const iterator& src) :
    4203  m_pList(src.m_pList),
    4204  m_pItem(src.m_pItem)
    4205  {
    4206  }
    4207 
    4208  const T& operator*() const
    4209  {
    4210  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4211  return m_pItem->Value;
    4212  }
    4213  const T* operator->() const
    4214  {
    4215  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4216  return &m_pItem->Value;
    4217  }
    4218 
    4219  const_iterator& operator++()
    4220  {
    4221  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4222  m_pItem = m_pItem->pNext;
    4223  return *this;
    4224  }
    4225  const_iterator& operator--()
    4226  {
    4227  if(m_pItem != VMA_NULL)
    4228  {
    4229  m_pItem = m_pItem->pPrev;
    4230  }
    4231  else
    4232  {
    4233  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4234  m_pItem = m_pList->Back();
    4235  }
    4236  return *this;
    4237  }
    4238 
    4239  const_iterator operator++(int)
    4240  {
    4241  const_iterator result = *this;
    4242  ++*this;
    4243  return result;
    4244  }
    4245  const_iterator operator--(int)
    4246  {
    4247  const_iterator result = *this;
    4248  --*this;
    4249  return result;
    4250  }
    4251 
    4252  bool operator==(const const_iterator& rhs) const
    4253  {
    4254  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4255  return m_pItem == rhs.m_pItem;
    4256  }
    4257  bool operator!=(const const_iterator& rhs) const
    4258  {
    4259  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4260  return m_pItem != rhs.m_pItem;
    4261  }
    4262 
    4263  private:
    4264  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4265  m_pList(pList),
    4266  m_pItem(pItem)
    4267  {
    4268  }
    4269 
    4270  const VmaRawList<T>* m_pList;
    4271  const VmaListItem<T>* m_pItem;
    4272 
    4273  friend class VmaList<T, AllocatorT>;
    4274  };
    4275 
    4276  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4277 
    4278  bool empty() const { return m_RawList.IsEmpty(); }
    4279  size_t size() const { return m_RawList.GetCount(); }
    4280 
    4281  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4282  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4283 
    4284  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4285  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4286 
    4287  void clear() { m_RawList.Clear(); }
    4288  void push_back(const T& value) { m_RawList.PushBack(value); }
    4289  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4290  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4291 
    4292 private:
    4293  VmaRawList<T> m_RawList;
    4294 };
    4295 
    4296 #endif // #if VMA_USE_STL_LIST
    4297 
    4299 // class VmaMap
    4300 
    4301 // Unused in this version.
    4302 #if 0
    4303 
    4304 #if VMA_USE_STL_UNORDERED_MAP
    4305 
    4306 #define VmaPair std::pair
    4307 
    4308 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4309  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4310 
    4311 #else // #if VMA_USE_STL_UNORDERED_MAP
    4312 
    4313 template<typename T1, typename T2>
    4314 struct VmaPair
    4315 {
    4316  T1 first;
    4317  T2 second;
    4318 
    4319  VmaPair() : first(), second() { }
    4320  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4321 };
    4322 
    4323 /* Class compatible with subset of interface of std::unordered_map.
    4324 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4325 */
    4326 template<typename KeyT, typename ValueT>
    4327 class VmaMap
    4328 {
    4329 public:
    4330  typedef VmaPair<KeyT, ValueT> PairType;
    4331  typedef PairType* iterator;
    4332 
    4333  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4334 
    4335  iterator begin() { return m_Vector.begin(); }
    4336  iterator end() { return m_Vector.end(); }
    4337 
    4338  void insert(const PairType& pair);
    4339  iterator find(const KeyT& key);
    4340  void erase(iterator it);
    4341 
    4342 private:
    4343  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4344 };
    4345 
    4346 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4347 
    4348 template<typename FirstT, typename SecondT>
    4349 struct VmaPairFirstLess
    4350 {
    4351  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4352  {
    4353  return lhs.first < rhs.first;
    4354  }
    4355  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4356  {
    4357  return lhs.first < rhsFirst;
    4358  }
    4359 };
    4360 
    4361 template<typename KeyT, typename ValueT>
    4362 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4363 {
    4364  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4365  m_Vector.data(),
    4366  m_Vector.data() + m_Vector.size(),
    4367  pair,
    4368  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4369  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4370 }
    4371 
    4372 template<typename KeyT, typename ValueT>
    4373 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4374 {
    4375  PairType* it = VmaBinaryFindFirstNotLess(
    4376  m_Vector.data(),
    4377  m_Vector.data() + m_Vector.size(),
    4378  key,
    4379  VmaPairFirstLess<KeyT, ValueT>());
    4380  if((it != m_Vector.end()) && (it->first == key))
    4381  {
    4382  return it;
    4383  }
    4384  else
    4385  {
    4386  return m_Vector.end();
    4387  }
    4388 }
    4389 
    4390 template<typename KeyT, typename ValueT>
    4391 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4392 {
    4393  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4394 }
    4395 
    4396 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4397 
    4398 #endif // #if 0
    4399 
    4401 
    4402 class VmaDeviceMemoryBlock;
    4403 
    4404 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4405 
    4406 struct VmaAllocation_T
    4407 {
    4408  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4409 private:
    4410  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4411 
    4412  enum FLAGS
    4413  {
    4414  FLAG_USER_DATA_STRING = 0x01,
    4415  };
    4416 
    4417 public:
    4418  enum ALLOCATION_TYPE
    4419  {
    4420  ALLOCATION_TYPE_NONE,
    4421  ALLOCATION_TYPE_BLOCK,
    4422  ALLOCATION_TYPE_DEDICATED,
    4423  };
    4424 
    4425  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4426  m_Alignment(1),
    4427  m_Size(0),
    4428  m_pUserData(VMA_NULL),
    4429  m_LastUseFrameIndex(currentFrameIndex),
    4430  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4431  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4432  m_MapCount(0),
    4433  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4434  {
    4435 #if VMA_STATS_STRING_ENABLED
    4436  m_CreationFrameIndex = currentFrameIndex;
    4437  m_BufferImageUsage = 0;
    4438 #endif
    4439  }
    4440 
    4441  ~VmaAllocation_T()
    4442  {
    4443  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4444 
    4445  // Check if owned string was freed.
    4446  VMA_ASSERT(m_pUserData == VMA_NULL);
    4447  }
    4448 
    4449  void InitBlockAllocation(
    4450  VmaPool hPool,
    4451  VmaDeviceMemoryBlock* block,
    4452  VkDeviceSize offset,
    4453  VkDeviceSize alignment,
    4454  VkDeviceSize size,
    4455  VmaSuballocationType suballocationType,
    4456  bool mapped,
    4457  bool canBecomeLost)
    4458  {
    4459  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4460  VMA_ASSERT(block != VMA_NULL);
    4461  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4462  m_Alignment = alignment;
    4463  m_Size = size;
    4464  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4465  m_SuballocationType = (uint8_t)suballocationType;
    4466  m_BlockAllocation.m_hPool = hPool;
    4467  m_BlockAllocation.m_Block = block;
    4468  m_BlockAllocation.m_Offset = offset;
    4469  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4470  }
    4471 
    4472  void InitLost()
    4473  {
    4474  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4475  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4476  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4477  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4478  m_BlockAllocation.m_Block = VMA_NULL;
    4479  m_BlockAllocation.m_Offset = 0;
    4480  m_BlockAllocation.m_CanBecomeLost = true;
    4481  }
    4482 
    4483  void ChangeBlockAllocation(
    4484  VmaAllocator hAllocator,
    4485  VmaDeviceMemoryBlock* block,
    4486  VkDeviceSize offset);
    4487 
    4488  // pMappedData not null means allocation is created with MAPPED flag.
    4489  void InitDedicatedAllocation(
    4490  uint32_t memoryTypeIndex,
    4491  VkDeviceMemory hMemory,
    4492  VmaSuballocationType suballocationType,
    4493  void* pMappedData,
    4494  VkDeviceSize size)
    4495  {
    4496  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4497  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4498  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4499  m_Alignment = 0;
    4500  m_Size = size;
    4501  m_SuballocationType = (uint8_t)suballocationType;
    4502  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4503  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4504  m_DedicatedAllocation.m_hMemory = hMemory;
    4505  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4506  }
    4507 
    4508  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4509  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4510  VkDeviceSize GetSize() const { return m_Size; }
    4511  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4512  void* GetUserData() const { return m_pUserData; }
    4513  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4514  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4515 
    4516  VmaDeviceMemoryBlock* GetBlock() const
    4517  {
    4518  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4519  return m_BlockAllocation.m_Block;
    4520  }
    4521  VkDeviceSize GetOffset() const;
    4522  VkDeviceMemory GetMemory() const;
    4523  uint32_t GetMemoryTypeIndex() const;
    4524  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4525  void* GetMappedData() const;
    4526  bool CanBecomeLost() const;
    4527  VmaPool GetPool() const;
    4528 
    4529  uint32_t GetLastUseFrameIndex() const
    4530  {
    4531  return m_LastUseFrameIndex.load();
    4532  }
    4533  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4534  {
    4535  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4536  }
    4537  /*
    4538  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4539  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4540  - Else, returns false.
    4541 
    4542  If hAllocation is already lost, assert - you should not call it then.
    4543  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4544  */
    4545  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4546 
    4547  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4548  {
    4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4550  outInfo.blockCount = 1;
    4551  outInfo.allocationCount = 1;
    4552  outInfo.unusedRangeCount = 0;
    4553  outInfo.usedBytes = m_Size;
    4554  outInfo.unusedBytes = 0;
    4555  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4556  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4557  outInfo.unusedRangeSizeMax = 0;
    4558  }
    4559 
    4560  void BlockAllocMap();
    4561  void BlockAllocUnmap();
    4562  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4563  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4564 
    4565 #if VMA_STATS_STRING_ENABLED
    4566  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4567  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4568 
    4569  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4570  {
    4571  VMA_ASSERT(m_BufferImageUsage == 0);
    4572  m_BufferImageUsage = bufferImageUsage;
    4573  }
    4574 
    4575  void PrintParameters(class VmaJsonWriter& json) const;
    4576 #endif
    4577 
    4578 private:
    4579  VkDeviceSize m_Alignment;
    4580  VkDeviceSize m_Size;
    4581  void* m_pUserData;
    4582  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4583  uint8_t m_Type; // ALLOCATION_TYPE
    4584  uint8_t m_SuballocationType; // VmaSuballocationType
    4585  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4586  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4587  uint8_t m_MapCount;
    4588  uint8_t m_Flags; // enum FLAGS
    4589 
    4590  // Allocation out of VmaDeviceMemoryBlock.
    4591  struct BlockAllocation
    4592  {
    4593  VmaPool m_hPool; // Null if belongs to general memory.
    4594  VmaDeviceMemoryBlock* m_Block;
    4595  VkDeviceSize m_Offset;
    4596  bool m_CanBecomeLost;
    4597  };
    4598 
    4599  // Allocation for an object that has its own private VkDeviceMemory.
    4600  struct DedicatedAllocation
    4601  {
    4602  uint32_t m_MemoryTypeIndex;
    4603  VkDeviceMemory m_hMemory;
    4604  void* m_pMappedData; // Not null means memory is mapped.
    4605  };
    4606 
    4607  union
    4608  {
    4609  // Allocation out of VmaDeviceMemoryBlock.
    4610  BlockAllocation m_BlockAllocation;
    4611  // Allocation for an object that has its own private VkDeviceMemory.
    4612  DedicatedAllocation m_DedicatedAllocation;
    4613  };
    4614 
    4615 #if VMA_STATS_STRING_ENABLED
    4616  uint32_t m_CreationFrameIndex;
    4617  uint32_t m_BufferImageUsage; // 0 if unknown.
    4618 #endif
    4619 
    4620  void FreeUserDataString(VmaAllocator hAllocator);
    4621 };
    4622 
    4623 /*
    4624 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4625 allocated memory block or free.
    4626 */
    4627 struct VmaSuballocation
    4628 {
    4629  VkDeviceSize offset;
    4630  VkDeviceSize size;
    4631  VmaAllocation hAllocation;
    4632  VmaSuballocationType type;
    4633 };
    4634 
    4635 // Comparator for offsets.
    4636 struct VmaSuballocationOffsetLess
    4637 {
    4638  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4639  {
    4640  return lhs.offset < rhs.offset;
    4641  }
    4642 };
    4643 struct VmaSuballocationOffsetGreater
    4644 {
    4645  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4646  {
    4647  return lhs.offset > rhs.offset;
    4648  }
    4649 };
    4650 
    4651 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4652 
    4653 // Cost of one additional allocation lost, as equivalent in bytes.
    4654 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4655 
    4656 /*
    4657 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4658 
    4659 If canMakeOtherLost was false:
    4660 - item points to a FREE suballocation.
    4661 - itemsToMakeLostCount is 0.
    4662 
    4663 If canMakeOtherLost was true:
    4664 - item points to first of sequence of suballocations, which are either FREE,
    4665  or point to VmaAllocations that can become lost.
    4666 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4667  the requested allocation to succeed.
    4668 */
    4669 struct VmaAllocationRequest
    4670 {
    4671  VkDeviceSize offset;
    4672  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4673  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4674  VmaSuballocationList::iterator item;
    4675  size_t itemsToMakeLostCount;
    4676  void* customData;
    4677 
    4678  VkDeviceSize CalcCost() const
    4679  {
    4680  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4681  }
    4682 };
    4683 
    4684 /*
    4685 Data structure used for bookkeeping of allocations and unused ranges of memory
    4686 in a single VkDeviceMemory block.
    4687 */
    4688 class VmaBlockMetadata
    4689 {
    4690 public:
    4691  VmaBlockMetadata(VmaAllocator hAllocator);
    4692  virtual ~VmaBlockMetadata() { }
    4693  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4694 
    4695  // Validates all data structures inside this object. If not valid, returns false.
    4696  virtual bool Validate() const = 0;
    4697  VkDeviceSize GetSize() const { return m_Size; }
    4698  virtual size_t GetAllocationCount() const = 0;
    4699  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4700  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4701  // Returns true if this block is empty - contains only single free suballocation.
    4702  virtual bool IsEmpty() const = 0;
    4703 
    4704  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4705  // Shouldn't modify blockCount.
    4706  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4707 
    4708 #if VMA_STATS_STRING_ENABLED
    4709  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4710 #endif
    4711 
    4712  // Tries to find a place for suballocation with given parameters inside this block.
    4713  // If succeeded, fills pAllocationRequest and returns true.
    4714  // If failed, returns false.
    4715  virtual bool CreateAllocationRequest(
    4716  uint32_t currentFrameIndex,
    4717  uint32_t frameInUseCount,
    4718  VkDeviceSize bufferImageGranularity,
    4719  VkDeviceSize allocSize,
    4720  VkDeviceSize allocAlignment,
    4721  bool upperAddress,
    4722  VmaSuballocationType allocType,
    4723  bool canMakeOtherLost,
    4724  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4725  VmaAllocationRequest* pAllocationRequest) = 0;
    4726 
    4727  virtual bool MakeRequestedAllocationsLost(
    4728  uint32_t currentFrameIndex,
    4729  uint32_t frameInUseCount,
    4730  VmaAllocationRequest* pAllocationRequest) = 0;
    4731 
    4732  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4733 
    4734  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4735 
    4736  // Makes actual allocation based on request. Request must already be checked and valid.
    4737  virtual void Alloc(
    4738  const VmaAllocationRequest& request,
    4739  VmaSuballocationType type,
    4740  VkDeviceSize allocSize,
    4741  bool upperAddress,
    4742  VmaAllocation hAllocation) = 0;
    4743 
    4744  // Frees suballocation assigned to given memory region.
    4745  virtual void Free(const VmaAllocation allocation) = 0;
    4746  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4747 
    4748 protected:
    4749  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4750 
    4751 #if VMA_STATS_STRING_ENABLED
    4752  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4753  VkDeviceSize unusedBytes,
    4754  size_t allocationCount,
    4755  size_t unusedRangeCount) const;
    4756  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4757  VkDeviceSize offset,
    4758  VmaAllocation hAllocation) const;
    4759  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4760  VkDeviceSize offset,
    4761  VkDeviceSize size) const;
    4762  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4763 #endif
    4764 
    4765 private:
    4766  VkDeviceSize m_Size;
    4767  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4768 };
    4769 
    4770 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4771  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4772  return false; \
    4773  } } while(false)
    4774 
    4775 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4776 {
    4777  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4778 public:
    4779  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4780  virtual ~VmaBlockMetadata_Generic();
    4781  virtual void Init(VkDeviceSize size);
    4782 
    4783  virtual bool Validate() const;
    4784  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4785  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4786  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4787  virtual bool IsEmpty() const;
    4788 
    4789  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4790  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4791 
    4792 #if VMA_STATS_STRING_ENABLED
    4793  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4794 #endif
    4795 
    4796  virtual bool CreateAllocationRequest(
    4797  uint32_t currentFrameIndex,
    4798  uint32_t frameInUseCount,
    4799  VkDeviceSize bufferImageGranularity,
    4800  VkDeviceSize allocSize,
    4801  VkDeviceSize allocAlignment,
    4802  bool upperAddress,
    4803  VmaSuballocationType allocType,
    4804  bool canMakeOtherLost,
    4805  uint32_t strategy,
    4806  VmaAllocationRequest* pAllocationRequest);
    4807 
    4808  virtual bool MakeRequestedAllocationsLost(
    4809  uint32_t currentFrameIndex,
    4810  uint32_t frameInUseCount,
    4811  VmaAllocationRequest* pAllocationRequest);
    4812 
    4813  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4814 
    4815  virtual VkResult CheckCorruption(const void* pBlockData);
    4816 
    4817  virtual void Alloc(
    4818  const VmaAllocationRequest& request,
    4819  VmaSuballocationType type,
    4820  VkDeviceSize allocSize,
    4821  bool upperAddress,
    4822  VmaAllocation hAllocation);
    4823 
    4824  virtual void Free(const VmaAllocation allocation);
    4825  virtual void FreeAtOffset(VkDeviceSize offset);
    4826 
    4827 private:
    4828  uint32_t m_FreeCount;
    4829  VkDeviceSize m_SumFreeSize;
    4830  VmaSuballocationList m_Suballocations;
    4831  // Suballocations that are free and have size greater than certain threshold.
    4832  // Sorted by size, ascending.
    4833  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4834 
    4835  bool ValidateFreeSuballocationList() const;
    4836 
    4837  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4838  // If yes, fills pOffset and returns true. If no, returns false.
    4839  bool CheckAllocation(
    4840  uint32_t currentFrameIndex,
    4841  uint32_t frameInUseCount,
    4842  VkDeviceSize bufferImageGranularity,
    4843  VkDeviceSize allocSize,
    4844  VkDeviceSize allocAlignment,
    4845  VmaSuballocationType allocType,
    4846  VmaSuballocationList::const_iterator suballocItem,
    4847  bool canMakeOtherLost,
    4848  VkDeviceSize* pOffset,
    4849  size_t* itemsToMakeLostCount,
    4850  VkDeviceSize* pSumFreeSize,
    4851  VkDeviceSize* pSumItemSize) const;
    4852  // Given free suballocation, it merges it with following one, which must also be free.
    4853  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4854  // Releases given suballocation, making it free.
    4855  // Merges it with adjacent free suballocations if applicable.
    4856  // Returns iterator to new free suballocation at this place.
    4857  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4858  // Given free suballocation, it inserts it into sorted list of
    4859  // m_FreeSuballocationsBySize if it's suitable.
    4860  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4861  // Given free suballocation, it removes it from sorted list of
    4862  // m_FreeSuballocationsBySize if it's suitable.
    4863  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4864 };
    4865 
    4866 /*
    4867 Allocations and their references in internal data structure look like this:
    4868 
    4869 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4870 
    4871  0 +-------+
    4872  | |
    4873  | |
    4874  | |
    4875  +-------+
    4876  | Alloc | 1st[m_1stNullItemsBeginCount]
    4877  +-------+
    4878  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4879  +-------+
    4880  | ... |
    4881  +-------+
    4882  | Alloc | 1st[1st.size() - 1]
    4883  +-------+
    4884  | |
    4885  | |
    4886  | |
    4887 GetSize() +-------+
    4888 
    4889 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4890 
    4891  0 +-------+
    4892  | Alloc | 2nd[0]
    4893  +-------+
    4894  | Alloc | 2nd[1]
    4895  +-------+
    4896  | ... |
    4897  +-------+
    4898  | Alloc | 2nd[2nd.size() - 1]
    4899  +-------+
    4900  | |
    4901  | |
    4902  | |
    4903  +-------+
    4904  | Alloc | 1st[m_1stNullItemsBeginCount]
    4905  +-------+
    4906  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4907  +-------+
    4908  | ... |
    4909  +-------+
    4910  | Alloc | 1st[1st.size() - 1]
    4911  +-------+
    4912  | |
    4913 GetSize() +-------+
    4914 
    4915 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4916 
    4917  0 +-------+
    4918  | |
    4919  | |
    4920  | |
    4921  +-------+
    4922  | Alloc | 1st[m_1stNullItemsBeginCount]
    4923  +-------+
    4924  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4925  +-------+
    4926  | ... |
    4927  +-------+
    4928  | Alloc | 1st[1st.size() - 1]
    4929  +-------+
    4930  | |
    4931  | |
    4932  | |
    4933  +-------+
    4934  | Alloc | 2nd[2nd.size() - 1]
    4935  +-------+
    4936  | ... |
    4937  +-------+
    4938  | Alloc | 2nd[1]
    4939  +-------+
    4940  | Alloc | 2nd[0]
    4941 GetSize() +-------+
    4942 
    4943 */
    4944 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4945 {
    4946  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4947 public:
    4948  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4949  virtual ~VmaBlockMetadata_Linear();
    4950  virtual void Init(VkDeviceSize size);
    4951 
    4952  virtual bool Validate() const;
    4953  virtual size_t GetAllocationCount() const;
    4954  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4955  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4956  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4957 
    4958  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4959  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4960 
    4961 #if VMA_STATS_STRING_ENABLED
    4962  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4963 #endif
    4964 
    4965  virtual bool CreateAllocationRequest(
    4966  uint32_t currentFrameIndex,
    4967  uint32_t frameInUseCount,
    4968  VkDeviceSize bufferImageGranularity,
    4969  VkDeviceSize allocSize,
    4970  VkDeviceSize allocAlignment,
    4971  bool upperAddress,
    4972  VmaSuballocationType allocType,
    4973  bool canMakeOtherLost,
    4974  uint32_t strategy,
    4975  VmaAllocationRequest* pAllocationRequest);
    4976 
    4977  virtual bool MakeRequestedAllocationsLost(
    4978  uint32_t currentFrameIndex,
    4979  uint32_t frameInUseCount,
    4980  VmaAllocationRequest* pAllocationRequest);
    4981 
    4982  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4983 
    4984  virtual VkResult CheckCorruption(const void* pBlockData);
    4985 
    4986  virtual void Alloc(
    4987  const VmaAllocationRequest& request,
    4988  VmaSuballocationType type,
    4989  VkDeviceSize allocSize,
    4990  bool upperAddress,
    4991  VmaAllocation hAllocation);
    4992 
    4993  virtual void Free(const VmaAllocation allocation);
    4994  virtual void FreeAtOffset(VkDeviceSize offset);
    4995 
    4996 private:
    4997  /*
    4998  There are two suballocation vectors, used in ping-pong way.
    4999  The one with index m_1stVectorIndex is called 1st.
    5000  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5001  2nd can be non-empty only when 1st is not empty.
    5002  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5003  */
    5004  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5005 
    5006  enum SECOND_VECTOR_MODE
    5007  {
    5008  SECOND_VECTOR_EMPTY,
    5009  /*
    5010  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5011  all have smaller offset.
    5012  */
    5013  SECOND_VECTOR_RING_BUFFER,
    5014  /*
    5015  Suballocations in 2nd vector are upper side of double stack.
    5016  They all have offsets higher than those in 1st vector.
    5017  Top of this stack means smaller offsets, but higher indices in this vector.
    5018  */
    5019  SECOND_VECTOR_DOUBLE_STACK,
    5020  };
    5021 
    5022  VkDeviceSize m_SumFreeSize;
    5023  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5024  uint32_t m_1stVectorIndex;
    5025  SECOND_VECTOR_MODE m_2ndVectorMode;
    5026 
    5027  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5028  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5029  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5030  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5031 
    5032  // Number of items in 1st vector with hAllocation = null at the beginning.
    5033  size_t m_1stNullItemsBeginCount;
    5034  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5035  size_t m_1stNullItemsMiddleCount;
    5036  // Number of items in 2nd vector with hAllocation = null.
    5037  size_t m_2ndNullItemsCount;
    5038 
    5039  bool ShouldCompact1st() const;
    5040  void CleanupAfterFree();
    5041 };
    5042 
    5043 /*
    5044 - GetSize() is the original size of allocated memory block.
    5045 - m_UsableSize is this size aligned down to a power of two.
    5046  All allocations and calculations happen relative to m_UsableSize.
    5047 - GetUnusableSize() is the difference between them.
    5048  It is repoted as separate, unused range, not available for allocations.
    5049 
    5050 Node at level 0 has size = m_UsableSize.
    5051 Each next level contains nodes with size 2 times smaller than current level.
    5052 m_LevelCount is the maximum number of levels to use in the current object.
    5053 */
    5054 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5055 {
    5056  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5057 public:
    5058  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5059  virtual ~VmaBlockMetadata_Buddy();
    5060  virtual void Init(VkDeviceSize size);
    5061 
    5062  virtual bool Validate() const;
    5063  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5064  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5065  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5066  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5067 
    5068  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5069  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5070 
    5071 #if VMA_STATS_STRING_ENABLED
    5072  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5073 #endif
    5074 
    5075  virtual bool CreateAllocationRequest(
    5076  uint32_t currentFrameIndex,
    5077  uint32_t frameInUseCount,
    5078  VkDeviceSize bufferImageGranularity,
    5079  VkDeviceSize allocSize,
    5080  VkDeviceSize allocAlignment,
    5081  bool upperAddress,
    5082  VmaSuballocationType allocType,
    5083  bool canMakeOtherLost,
    5084  uint32_t strategy,
    5085  VmaAllocationRequest* pAllocationRequest);
    5086 
    5087  virtual bool MakeRequestedAllocationsLost(
    5088  uint32_t currentFrameIndex,
    5089  uint32_t frameInUseCount,
    5090  VmaAllocationRequest* pAllocationRequest);
    5091 
    5092  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5093 
    5094  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5095 
    5096  virtual void Alloc(
    5097  const VmaAllocationRequest& request,
    5098  VmaSuballocationType type,
    5099  VkDeviceSize allocSize,
    5100  bool upperAddress,
    5101  VmaAllocation hAllocation);
    5102 
    5103  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5104  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5105 
    5106 private:
    5107  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5108  static const size_t MAX_LEVELS = 30;
    5109 
    5110  struct ValidationContext
    5111  {
    5112  size_t calculatedAllocationCount;
    5113  size_t calculatedFreeCount;
    5114  VkDeviceSize calculatedSumFreeSize;
    5115 
    5116  ValidationContext() :
    5117  calculatedAllocationCount(0),
    5118  calculatedFreeCount(0),
    5119  calculatedSumFreeSize(0) { }
    5120  };
    5121 
    5122  struct Node
    5123  {
    5124  VkDeviceSize offset;
    5125  enum TYPE
    5126  {
    5127  TYPE_FREE,
    5128  TYPE_ALLOCATION,
    5129  TYPE_SPLIT,
    5130  TYPE_COUNT
    5131  } type;
    5132  Node* parent;
    5133  Node* buddy;
    5134 
    5135  union
    5136  {
    5137  struct
    5138  {
    5139  Node* prev;
    5140  Node* next;
    5141  } free;
    5142  struct
    5143  {
    5144  VmaAllocation alloc;
    5145  } allocation;
    5146  struct
    5147  {
    5148  Node* leftChild;
    5149  } split;
    5150  };
    5151  };
    5152 
    5153  // Size of the memory block aligned down to a power of two.
    5154  VkDeviceSize m_UsableSize;
    5155  uint32_t m_LevelCount;
    5156 
    5157  Node* m_Root;
    5158  struct {
    5159  Node* front;
    5160  Node* back;
    5161  } m_FreeList[MAX_LEVELS];
    5162  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5163  size_t m_AllocationCount;
    5164  // Number of nodes in the tree with type == TYPE_FREE.
    5165  size_t m_FreeCount;
    5166  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5167  VkDeviceSize m_SumFreeSize;
    5168 
    5169  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5170  void DeleteNode(Node* node);
    5171  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5172  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5173  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5174  // Alloc passed just for validation. Can be null.
    5175  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5176  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5177  // Adds node to the front of FreeList at given level.
    5178  // node->type must be FREE.
    5179  // node->free.prev, next can be undefined.
    5180  void AddToFreeListFront(uint32_t level, Node* node);
    5181  // Removes node from FreeList at given level.
    5182  // node->type must be FREE.
    5183  // node->free.prev, next stay untouched.
    5184  void RemoveFromFreeList(uint32_t level, Node* node);
    5185 
    5186 #if VMA_STATS_STRING_ENABLED
    5187  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5188 #endif
    5189 };
    5190 
    5191 /*
    5192 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5193 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5194 
    5195 Thread-safety: This class must be externally synchronized.
    5196 */
    5197 class VmaDeviceMemoryBlock
    5198 {
    5199  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5200 public:
    5201  VmaBlockMetadata* m_pMetadata;
    5202 
    5203  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5204 
    5205  ~VmaDeviceMemoryBlock()
    5206  {
    5207  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5208  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5209  }
    5210 
    5211  // Always call after construction.
    5212  void Init(
    5213  VmaAllocator hAllocator,
    5214  uint32_t newMemoryTypeIndex,
    5215  VkDeviceMemory newMemory,
    5216  VkDeviceSize newSize,
    5217  uint32_t id,
    5218  uint32_t algorithm);
    5219  // Always call before destruction.
    5220  void Destroy(VmaAllocator allocator);
    5221 
    5222  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5223  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5224  uint32_t GetId() const { return m_Id; }
    5225  void* GetMappedData() const { return m_pMappedData; }
    5226 
    5227  // Validates all data structures inside this object. If not valid, returns false.
    5228  bool Validate() const;
    5229 
    5230  VkResult CheckCorruption(VmaAllocator hAllocator);
    5231 
    5232  // ppData can be null.
    5233  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5234  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5235 
    5236  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5237  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5238 
    5239  VkResult BindBufferMemory(
    5240  const VmaAllocator hAllocator,
    5241  const VmaAllocation hAllocation,
    5242  VkBuffer hBuffer);
    5243  VkResult BindImageMemory(
    5244  const VmaAllocator hAllocator,
    5245  const VmaAllocation hAllocation,
    5246  VkImage hImage);
    5247 
    5248 private:
    5249  uint32_t m_MemoryTypeIndex;
    5250  uint32_t m_Id;
    5251  VkDeviceMemory m_hMemory;
    5252 
    5253  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5254  // Also protects m_MapCount, m_pMappedData.
    5255  VMA_MUTEX m_Mutex;
    5256  uint32_t m_MapCount;
    5257  void* m_pMappedData;
    5258 };
    5259 
    5260 struct VmaPointerLess
    5261 {
    5262  bool operator()(const void* lhs, const void* rhs) const
    5263  {
    5264  return lhs < rhs;
    5265  }
    5266 };
    5267 
    5268 class VmaDefragmentator;
    5269 
    5270 /*
    5271 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5272 Vulkan memory type.
    5273 
    5274 Synchronized internally with a mutex.
    5275 */
    5276 struct VmaBlockVector
    5277 {
    5278  VMA_CLASS_NO_COPY(VmaBlockVector)
    5279 public:
    5280  VmaBlockVector(
    5281  VmaAllocator hAllocator,
    5282  uint32_t memoryTypeIndex,
    5283  VkDeviceSize preferredBlockSize,
    5284  size_t minBlockCount,
    5285  size_t maxBlockCount,
    5286  VkDeviceSize bufferImageGranularity,
    5287  uint32_t frameInUseCount,
    5288  bool isCustomPool,
    5289  bool explicitBlockSize,
    5290  uint32_t algorithm);
    5291  ~VmaBlockVector();
    5292 
    5293  VkResult CreateMinBlocks();
    5294 
    5295  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5296  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5297  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5298  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5299  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5300 
    5301  void GetPoolStats(VmaPoolStats* pStats);
    5302 
    5303  bool IsEmpty() const { return m_Blocks.empty(); }
    5304  bool IsCorruptionDetectionEnabled() const;
    5305 
    5306  VkResult Allocate(
    5307  VmaPool hCurrentPool,
    5308  uint32_t currentFrameIndex,
    5309  VkDeviceSize size,
    5310  VkDeviceSize alignment,
    5311  const VmaAllocationCreateInfo& createInfo,
    5312  VmaSuballocationType suballocType,
    5313  VmaAllocation* pAllocation);
    5314 
    5315  void Free(
    5316  VmaAllocation hAllocation);
    5317 
    5318  // Adds statistics of this BlockVector to pStats.
    5319  void AddStats(VmaStats* pStats);
    5320 
    5321 #if VMA_STATS_STRING_ENABLED
    5322  void PrintDetailedMap(class VmaJsonWriter& json);
    5323 #endif
    5324 
    5325  void MakePoolAllocationsLost(
    5326  uint32_t currentFrameIndex,
    5327  size_t* pLostAllocationCount);
    5328  VkResult CheckCorruption();
    5329 
    5330  VmaDefragmentator* EnsureDefragmentator(
    5331  VmaAllocator hAllocator,
    5332  uint32_t currentFrameIndex);
    5333 
    5334  VkResult Defragment(
    5335  VmaDefragmentationStats* pDefragmentationStats,
    5336  VkDeviceSize& maxBytesToMove,
    5337  uint32_t& maxAllocationsToMove);
    5338 
    5339  void DestroyDefragmentator();
    5340 
    5341 private:
    5342  friend class VmaDefragmentator;
    5343 
    5344  const VmaAllocator m_hAllocator;
    5345  const uint32_t m_MemoryTypeIndex;
    5346  const VkDeviceSize m_PreferredBlockSize;
    5347  const size_t m_MinBlockCount;
    5348  const size_t m_MaxBlockCount;
    5349  const VkDeviceSize m_BufferImageGranularity;
    5350  const uint32_t m_FrameInUseCount;
    5351  const bool m_IsCustomPool;
    5352  const bool m_ExplicitBlockSize;
    5353  const uint32_t m_Algorithm;
    5354  bool m_HasEmptyBlock;
    5355  VMA_MUTEX m_Mutex;
    5356  // Incrementally sorted by sumFreeSize, ascending.
    5357  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5358  /* There can be at most one allocation that is completely empty - a
    5359  hysteresis to avoid pessimistic case of alternating creation and destruction
    5360  of a VkDeviceMemory. */
    5361  VmaDefragmentator* m_pDefragmentator;
    5362  uint32_t m_NextBlockId;
    5363 
    5364  VkDeviceSize CalcMaxBlockSize() const;
    5365 
    5366  // Finds and removes given block from vector.
    5367  void Remove(VmaDeviceMemoryBlock* pBlock);
    5368 
    5369  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5370  // after this call.
    5371  void IncrementallySortBlocks();
    5372 
    5373  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5374  VkResult AllocateFromBlock(
    5375  VmaDeviceMemoryBlock* pBlock,
    5376  VmaPool hCurrentPool,
    5377  uint32_t currentFrameIndex,
    5378  VkDeviceSize size,
    5379  VkDeviceSize alignment,
    5380  VmaAllocationCreateFlags allocFlags,
    5381  void* pUserData,
    5382  VmaSuballocationType suballocType,
    5383  uint32_t strategy,
    5384  VmaAllocation* pAllocation);
    5385 
    5386  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5387 };
    5388 
    5389 struct VmaPool_T
    5390 {
    5391  VMA_CLASS_NO_COPY(VmaPool_T)
    5392 public:
    5393  VmaBlockVector m_BlockVector;
    5394 
    5395  VmaPool_T(
    5396  VmaAllocator hAllocator,
    5397  const VmaPoolCreateInfo& createInfo,
    5398  VkDeviceSize preferredBlockSize);
    5399  ~VmaPool_T();
    5400 
    5401  uint32_t GetId() const { return m_Id; }
    5402  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5403 
    5404 #if VMA_STATS_STRING_ENABLED
    5405  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5406 #endif
    5407 
    5408 private:
    5409  uint32_t m_Id;
    5410 };
    5411 
    5412 class VmaDefragmentator
    5413 {
    5414  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5415 private:
    5416  const VmaAllocator m_hAllocator;
    5417  VmaBlockVector* const m_pBlockVector;
    5418  uint32_t m_CurrentFrameIndex;
    5419  VkDeviceSize m_BytesMoved;
    5420  uint32_t m_AllocationsMoved;
    5421 
    5422  struct AllocationInfo
    5423  {
    5424  VmaAllocation m_hAllocation;
    5425  VkBool32* m_pChanged;
    5426 
    5427  AllocationInfo() :
    5428  m_hAllocation(VK_NULL_HANDLE),
    5429  m_pChanged(VMA_NULL)
    5430  {
    5431  }
    5432  };
    5433 
    5434  struct AllocationInfoSizeGreater
    5435  {
    5436  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5437  {
    5438  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5439  }
    5440  };
    5441 
    5442  // Used between AddAllocation and Defragment.
    5443  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5444 
    5445  struct BlockInfo
    5446  {
    5447  VmaDeviceMemoryBlock* m_pBlock;
    5448  bool m_HasNonMovableAllocations;
    5449  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5450 
    5451  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5452  m_pBlock(VMA_NULL),
    5453  m_HasNonMovableAllocations(true),
    5454  m_Allocations(pAllocationCallbacks),
    5455  m_pMappedDataForDefragmentation(VMA_NULL)
    5456  {
    5457  }
    5458 
    5459  void CalcHasNonMovableAllocations()
    5460  {
    5461  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5462  const size_t defragmentAllocCount = m_Allocations.size();
    5463  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5464  }
    5465 
    5466  void SortAllocationsBySizeDescecnding()
    5467  {
    5468  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5469  }
    5470 
    5471  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5472  void Unmap(VmaAllocator hAllocator);
    5473 
    5474  private:
    5475  // Not null if mapped for defragmentation only, not originally mapped.
    5476  void* m_pMappedDataForDefragmentation;
    5477  };
    5478 
    5479  struct BlockPointerLess
    5480  {
    5481  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5482  {
    5483  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5484  }
    5485  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5486  {
    5487  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5488  }
    5489  };
    5490 
    5491  // 1. Blocks with some non-movable allocations go first.
    5492  // 2. Blocks with smaller sumFreeSize go first.
    5493  struct BlockInfoCompareMoveDestination
    5494  {
    5495  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5496  {
    5497  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5498  {
    5499  return true;
    5500  }
    5501  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5502  {
    5503  return false;
    5504  }
    5505  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5506  {
    5507  return true;
    5508  }
    5509  return false;
    5510  }
    5511  };
    5512 
    5513  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5514  BlockInfoVector m_Blocks;
    5515 
    5516  VkResult DefragmentRound(
    5517  VkDeviceSize maxBytesToMove,
    5518  uint32_t maxAllocationsToMove);
    5519 
    5520  static bool MoveMakesSense(
    5521  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5522  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5523 
    5524 public:
    5525  VmaDefragmentator(
    5526  VmaAllocator hAllocator,
    5527  VmaBlockVector* pBlockVector,
    5528  uint32_t currentFrameIndex);
    5529 
    5530  ~VmaDefragmentator();
    5531 
    5532  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5533  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5534 
    5535  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5536 
    5537  VkResult Defragment(
    5538  VkDeviceSize maxBytesToMove,
    5539  uint32_t maxAllocationsToMove);
    5540 };
    5541 
    5542 #if VMA_RECORDING_ENABLED
    5543 
    5544 class VmaRecorder
    5545 {
    5546 public:
    5547  VmaRecorder();
    5548  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5549  void WriteConfiguration(
    5550  const VkPhysicalDeviceProperties& devProps,
    5551  const VkPhysicalDeviceMemoryProperties& memProps,
    5552  bool dedicatedAllocationExtensionEnabled);
    5553  ~VmaRecorder();
    5554 
    5555  void RecordCreateAllocator(uint32_t frameIndex);
    5556  void RecordDestroyAllocator(uint32_t frameIndex);
    5557  void RecordCreatePool(uint32_t frameIndex,
    5558  const VmaPoolCreateInfo& createInfo,
    5559  VmaPool pool);
    5560  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5561  void RecordAllocateMemory(uint32_t frameIndex,
    5562  const VkMemoryRequirements& vkMemReq,
    5563  const VmaAllocationCreateInfo& createInfo,
    5564  VmaAllocation allocation);
    5565  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5566  const VkMemoryRequirements& vkMemReq,
    5567  bool requiresDedicatedAllocation,
    5568  bool prefersDedicatedAllocation,
    5569  const VmaAllocationCreateInfo& createInfo,
    5570  VmaAllocation allocation);
    5571  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5572  const VkMemoryRequirements& vkMemReq,
    5573  bool requiresDedicatedAllocation,
    5574  bool prefersDedicatedAllocation,
    5575  const VmaAllocationCreateInfo& createInfo,
    5576  VmaAllocation allocation);
    5577  void RecordFreeMemory(uint32_t frameIndex,
    5578  VmaAllocation allocation);
    5579  void RecordSetAllocationUserData(uint32_t frameIndex,
    5580  VmaAllocation allocation,
    5581  const void* pUserData);
    5582  void RecordCreateLostAllocation(uint32_t frameIndex,
    5583  VmaAllocation allocation);
    5584  void RecordMapMemory(uint32_t frameIndex,
    5585  VmaAllocation allocation);
    5586  void RecordUnmapMemory(uint32_t frameIndex,
    5587  VmaAllocation allocation);
    5588  void RecordFlushAllocation(uint32_t frameIndex,
    5589  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5590  void RecordInvalidateAllocation(uint32_t frameIndex,
    5591  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5592  void RecordCreateBuffer(uint32_t frameIndex,
    5593  const VkBufferCreateInfo& bufCreateInfo,
    5594  const VmaAllocationCreateInfo& allocCreateInfo,
    5595  VmaAllocation allocation);
    5596  void RecordCreateImage(uint32_t frameIndex,
    5597  const VkImageCreateInfo& imageCreateInfo,
    5598  const VmaAllocationCreateInfo& allocCreateInfo,
    5599  VmaAllocation allocation);
    5600  void RecordDestroyBuffer(uint32_t frameIndex,
    5601  VmaAllocation allocation);
    5602  void RecordDestroyImage(uint32_t frameIndex,
    5603  VmaAllocation allocation);
    5604  void RecordTouchAllocation(uint32_t frameIndex,
    5605  VmaAllocation allocation);
    5606  void RecordGetAllocationInfo(uint32_t frameIndex,
    5607  VmaAllocation allocation);
    5608  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5609  VmaPool pool);
    5610 
    5611 private:
    5612  struct CallParams
    5613  {
    5614  uint32_t threadId;
    5615  double time;
    5616  };
    5617 
    5618  class UserDataString
    5619  {
    5620  public:
    5621  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5622  const char* GetString() const { return m_Str; }
    5623 
    5624  private:
    5625  char m_PtrStr[17];
    5626  const char* m_Str;
    5627  };
    5628 
    5629  bool m_UseMutex;
    5630  VmaRecordFlags m_Flags;
    5631  FILE* m_File;
    5632  VMA_MUTEX m_FileMutex;
    5633  int64_t m_Freq;
    5634  int64_t m_StartCounter;
    5635 
    5636  void GetBasicParams(CallParams& outParams);
    5637  void Flush();
    5638 };
    5639 
    5640 #endif // #if VMA_RECORDING_ENABLED
    5641 
    5642 // Main allocator object.
    5643 struct VmaAllocator_T
    5644 {
    5645  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5646 public:
    5647  bool m_UseMutex;
    5648  bool m_UseKhrDedicatedAllocation;
    5649  VkDevice m_hDevice;
    5650  bool m_AllocationCallbacksSpecified;
    5651  VkAllocationCallbacks m_AllocationCallbacks;
    5652  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5653 
    5654  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5655  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5656  VMA_MUTEX m_HeapSizeLimitMutex;
    5657 
    5658  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5659  VkPhysicalDeviceMemoryProperties m_MemProps;
    5660 
    5661  // Default pools.
    5662  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5663 
    5664  // Each vector is sorted by memory (handle value).
    5665  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5666  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5667  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5668 
    5669  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5670  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5671  ~VmaAllocator_T();
    5672 
    5673  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5674  {
    5675  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5676  }
    5677  const VmaVulkanFunctions& GetVulkanFunctions() const
    5678  {
    5679  return m_VulkanFunctions;
    5680  }
    5681 
    5682  VkDeviceSize GetBufferImageGranularity() const
    5683  {
    5684  return VMA_MAX(
    5685  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5686  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5687  }
    5688 
    5689  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5690  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5691 
    5692  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5693  {
    5694  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5695  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5696  }
    5697  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5698  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5699  {
    5700  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5701  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5702  }
    5703  // Minimum alignment for all allocations in specific memory type.
    5704  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5705  {
    5706  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5707  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5708  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5709  }
    5710 
    5711  bool IsIntegratedGpu() const
    5712  {
    5713  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5714  }
    5715 
    5716 #if VMA_RECORDING_ENABLED
    5717  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5718 #endif
    5719 
    5720  void GetBufferMemoryRequirements(
    5721  VkBuffer hBuffer,
    5722  VkMemoryRequirements& memReq,
    5723  bool& requiresDedicatedAllocation,
    5724  bool& prefersDedicatedAllocation) const;
    5725  void GetImageMemoryRequirements(
    5726  VkImage hImage,
    5727  VkMemoryRequirements& memReq,
    5728  bool& requiresDedicatedAllocation,
    5729  bool& prefersDedicatedAllocation) const;
    5730 
    5731  // Main allocation function.
    5732  VkResult AllocateMemory(
    5733  const VkMemoryRequirements& vkMemReq,
    5734  bool requiresDedicatedAllocation,
    5735  bool prefersDedicatedAllocation,
    5736  VkBuffer dedicatedBuffer,
    5737  VkImage dedicatedImage,
    5738  const VmaAllocationCreateInfo& createInfo,
    5739  VmaSuballocationType suballocType,
    5740  VmaAllocation* pAllocation);
    5741 
    5742  // Main deallocation function.
    5743  void FreeMemory(const VmaAllocation allocation);
    5744 
    5745  void CalculateStats(VmaStats* pStats);
    5746 
    5747 #if VMA_STATS_STRING_ENABLED
    5748  void PrintDetailedMap(class VmaJsonWriter& json);
    5749 #endif
    5750 
    5751  VkResult Defragment(
    5752  VmaAllocation* pAllocations,
    5753  size_t allocationCount,
    5754  VkBool32* pAllocationsChanged,
    5755  const VmaDefragmentationInfo* pDefragmentationInfo,
    5756  VmaDefragmentationStats* pDefragmentationStats);
    5757 
    5758  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5759  bool TouchAllocation(VmaAllocation hAllocation);
    5760 
    5761  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5762  void DestroyPool(VmaPool pool);
    5763  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5764 
    5765  void SetCurrentFrameIndex(uint32_t frameIndex);
    5766  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5767 
    5768  void MakePoolAllocationsLost(
    5769  VmaPool hPool,
    5770  size_t* pLostAllocationCount);
    5771  VkResult CheckPoolCorruption(VmaPool hPool);
    5772  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5773 
    5774  void CreateLostAllocation(VmaAllocation* pAllocation);
    5775 
    5776  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5777  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5778 
    5779  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5780  void Unmap(VmaAllocation hAllocation);
    5781 
    5782  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5783  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5784 
    5785  void FlushOrInvalidateAllocation(
    5786  VmaAllocation hAllocation,
    5787  VkDeviceSize offset, VkDeviceSize size,
    5788  VMA_CACHE_OPERATION op);
    5789 
    5790  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5791 
    5792 private:
    5793  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5794 
    5795  VkPhysicalDevice m_PhysicalDevice;
    5796  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5797 
    5798  VMA_MUTEX m_PoolsMutex;
    5799  // Protected by m_PoolsMutex. Sorted by pointer value.
    5800  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5801  uint32_t m_NextPoolId;
    5802 
    5803  VmaVulkanFunctions m_VulkanFunctions;
    5804 
    5805 #if VMA_RECORDING_ENABLED
    5806  VmaRecorder* m_pRecorder;
    5807 #endif
    5808 
    5809  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5810 
    5811  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5812 
    5813  VkResult AllocateMemoryOfType(
    5814  VkDeviceSize size,
    5815  VkDeviceSize alignment,
    5816  bool dedicatedAllocation,
    5817  VkBuffer dedicatedBuffer,
    5818  VkImage dedicatedImage,
    5819  const VmaAllocationCreateInfo& createInfo,
    5820  uint32_t memTypeIndex,
    5821  VmaSuballocationType suballocType,
    5822  VmaAllocation* pAllocation);
    5823 
    5824  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5825  VkResult AllocateDedicatedMemory(
    5826  VkDeviceSize size,
    5827  VmaSuballocationType suballocType,
    5828  uint32_t memTypeIndex,
    5829  bool map,
    5830  bool isUserDataString,
    5831  void* pUserData,
    5832  VkBuffer dedicatedBuffer,
    5833  VkImage dedicatedImage,
    5834  VmaAllocation* pAllocation);
    5835 
    5836  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5837  void FreeDedicatedMemory(VmaAllocation allocation);
    5838 };
    5839 
    5841 // Memory allocation #2 after VmaAllocator_T definition
    5842 
    5843 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5844 {
    5845  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5846 }
    5847 
    5848 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5849 {
    5850  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5851 }
    5852 
    5853 template<typename T>
    5854 static T* VmaAllocate(VmaAllocator hAllocator)
    5855 {
    5856  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5857 }
    5858 
    5859 template<typename T>
    5860 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5861 {
    5862  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5863 }
    5864 
    5865 template<typename T>
    5866 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5867 {
    5868  if(ptr != VMA_NULL)
    5869  {
    5870  ptr->~T();
    5871  VmaFree(hAllocator, ptr);
    5872  }
    5873 }
    5874 
    5875 template<typename T>
    5876 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5877 {
    5878  if(ptr != VMA_NULL)
    5879  {
    5880  for(size_t i = count; i--; )
    5881  ptr[i].~T();
    5882  VmaFree(hAllocator, ptr);
    5883  }
    5884 }
    5885 
    5887 // VmaStringBuilder
    5888 
    5889 #if VMA_STATS_STRING_ENABLED
    5890 
    5891 class VmaStringBuilder
    5892 {
    5893 public:
    5894  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5895  size_t GetLength() const { return m_Data.size(); }
    5896  const char* GetData() const { return m_Data.data(); }
    5897 
    5898  void Add(char ch) { m_Data.push_back(ch); }
    5899  void Add(const char* pStr);
    5900  void AddNewLine() { Add('\n'); }
    5901  void AddNumber(uint32_t num);
    5902  void AddNumber(uint64_t num);
    5903  void AddPointer(const void* ptr);
    5904 
    5905 private:
    5906  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5907 };
    5908 
    5909 void VmaStringBuilder::Add(const char* pStr)
    5910 {
    5911  const size_t strLen = strlen(pStr);
    5912  if(strLen > 0)
    5913  {
    5914  const size_t oldCount = m_Data.size();
    5915  m_Data.resize(oldCount + strLen);
    5916  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5917  }
    5918 }
    5919 
    5920 void VmaStringBuilder::AddNumber(uint32_t num)
    5921 {
    5922  char buf[11];
    5923  VmaUint32ToStr(buf, sizeof(buf), num);
    5924  Add(buf);
    5925 }
    5926 
    5927 void VmaStringBuilder::AddNumber(uint64_t num)
    5928 {
    5929  char buf[21];
    5930  VmaUint64ToStr(buf, sizeof(buf), num);
    5931  Add(buf);
    5932 }
    5933 
    5934 void VmaStringBuilder::AddPointer(const void* ptr)
    5935 {
    5936  char buf[21];
    5937  VmaPtrToStr(buf, sizeof(buf), ptr);
    5938  Add(buf);
    5939 }
    5940 
    5941 #endif // #if VMA_STATS_STRING_ENABLED
    5942 
    5944 // VmaJsonWriter
    5945 
    5946 #if VMA_STATS_STRING_ENABLED
    5947 
    5948 class VmaJsonWriter
    5949 {
    5950  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5951 public:
    5952  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5953  ~VmaJsonWriter();
    5954 
    5955  void BeginObject(bool singleLine = false);
    5956  void EndObject();
    5957 
    5958  void BeginArray(bool singleLine = false);
    5959  void EndArray();
    5960 
    5961  void WriteString(const char* pStr);
    5962  void BeginString(const char* pStr = VMA_NULL);
    5963  void ContinueString(const char* pStr);
    5964  void ContinueString(uint32_t n);
    5965  void ContinueString(uint64_t n);
    5966  void ContinueString_Pointer(const void* ptr);
    5967  void EndString(const char* pStr = VMA_NULL);
    5968 
    5969  void WriteNumber(uint32_t n);
    5970  void WriteNumber(uint64_t n);
    5971  void WriteBool(bool b);
    5972  void WriteNull();
    5973 
    5974 private:
    5975  static const char* const INDENT;
    5976 
    5977  enum COLLECTION_TYPE
    5978  {
    5979  COLLECTION_TYPE_OBJECT,
    5980  COLLECTION_TYPE_ARRAY,
    5981  };
    5982  struct StackItem
    5983  {
    5984  COLLECTION_TYPE type;
    5985  uint32_t valueCount;
    5986  bool singleLineMode;
    5987  };
    5988 
    5989  VmaStringBuilder& m_SB;
    5990  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5991  bool m_InsideString;
    5992 
    5993  void BeginValue(bool isString);
    5994  void WriteIndent(bool oneLess = false);
    5995 };
    5996 
    5997 const char* const VmaJsonWriter::INDENT = " ";
    5998 
    5999 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6000  m_SB(sb),
    6001  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6002  m_InsideString(false)
    6003 {
    6004 }
    6005 
    6006 VmaJsonWriter::~VmaJsonWriter()
    6007 {
    6008  VMA_ASSERT(!m_InsideString);
    6009  VMA_ASSERT(m_Stack.empty());
    6010 }
    6011 
    6012 void VmaJsonWriter::BeginObject(bool singleLine)
    6013 {
    6014  VMA_ASSERT(!m_InsideString);
    6015 
    6016  BeginValue(false);
    6017  m_SB.Add('{');
    6018 
    6019  StackItem item;
    6020  item.type = COLLECTION_TYPE_OBJECT;
    6021  item.valueCount = 0;
    6022  item.singleLineMode = singleLine;
    6023  m_Stack.push_back(item);
    6024 }
    6025 
    6026 void VmaJsonWriter::EndObject()
    6027 {
    6028  VMA_ASSERT(!m_InsideString);
    6029 
    6030  WriteIndent(true);
    6031  m_SB.Add('}');
    6032 
    6033  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6034  m_Stack.pop_back();
    6035 }
    6036 
    6037 void VmaJsonWriter::BeginArray(bool singleLine)
    6038 {
    6039  VMA_ASSERT(!m_InsideString);
    6040 
    6041  BeginValue(false);
    6042  m_SB.Add('[');
    6043 
    6044  StackItem item;
    6045  item.type = COLLECTION_TYPE_ARRAY;
    6046  item.valueCount = 0;
    6047  item.singleLineMode = singleLine;
    6048  m_Stack.push_back(item);
    6049 }
    6050 
    6051 void VmaJsonWriter::EndArray()
    6052 {
    6053  VMA_ASSERT(!m_InsideString);
    6054 
    6055  WriteIndent(true);
    6056  m_SB.Add(']');
    6057 
    6058  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6059  m_Stack.pop_back();
    6060 }
    6061 
    6062 void VmaJsonWriter::WriteString(const char* pStr)
    6063 {
    6064  BeginString(pStr);
    6065  EndString();
    6066 }
    6067 
    6068 void VmaJsonWriter::BeginString(const char* pStr)
    6069 {
    6070  VMA_ASSERT(!m_InsideString);
    6071 
    6072  BeginValue(true);
    6073  m_SB.Add('"');
    6074  m_InsideString = true;
    6075  if(pStr != VMA_NULL && pStr[0] != '\0')
    6076  {
    6077  ContinueString(pStr);
    6078  }
    6079 }
    6080 
    6081 void VmaJsonWriter::ContinueString(const char* pStr)
    6082 {
    6083  VMA_ASSERT(m_InsideString);
    6084 
    6085  const size_t strLen = strlen(pStr);
    6086  for(size_t i = 0; i < strLen; ++i)
    6087  {
    6088  char ch = pStr[i];
    6089  if(ch == '\\')
    6090  {
    6091  m_SB.Add("\\\\");
    6092  }
    6093  else if(ch == '"')
    6094  {
    6095  m_SB.Add("\\\"");
    6096  }
    6097  else if(ch >= 32)
    6098  {
    6099  m_SB.Add(ch);
    6100  }
    6101  else switch(ch)
    6102  {
    6103  case '\b':
    6104  m_SB.Add("\\b");
    6105  break;
    6106  case '\f':
    6107  m_SB.Add("\\f");
    6108  break;
    6109  case '\n':
    6110  m_SB.Add("\\n");
    6111  break;
    6112  case '\r':
    6113  m_SB.Add("\\r");
    6114  break;
    6115  case '\t':
    6116  m_SB.Add("\\t");
    6117  break;
    6118  default:
    6119  VMA_ASSERT(0 && "Character not currently supported.");
    6120  break;
    6121  }
    6122  }
    6123 }
    6124 
    6125 void VmaJsonWriter::ContinueString(uint32_t n)
    6126 {
    6127  VMA_ASSERT(m_InsideString);
    6128  m_SB.AddNumber(n);
    6129 }
    6130 
    6131 void VmaJsonWriter::ContinueString(uint64_t n)
    6132 {
    6133  VMA_ASSERT(m_InsideString);
    6134  m_SB.AddNumber(n);
    6135 }
    6136 
    6137 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6138 {
    6139  VMA_ASSERT(m_InsideString);
    6140  m_SB.AddPointer(ptr);
    6141 }
    6142 
    6143 void VmaJsonWriter::EndString(const char* pStr)
    6144 {
    6145  VMA_ASSERT(m_InsideString);
    6146  if(pStr != VMA_NULL && pStr[0] != '\0')
    6147  {
    6148  ContinueString(pStr);
    6149  }
    6150  m_SB.Add('"');
    6151  m_InsideString = false;
    6152 }
    6153 
    6154 void VmaJsonWriter::WriteNumber(uint32_t n)
    6155 {
    6156  VMA_ASSERT(!m_InsideString);
    6157  BeginValue(false);
    6158  m_SB.AddNumber(n);
    6159 }
    6160 
    6161 void VmaJsonWriter::WriteNumber(uint64_t n)
    6162 {
    6163  VMA_ASSERT(!m_InsideString);
    6164  BeginValue(false);
    6165  m_SB.AddNumber(n);
    6166 }
    6167 
    6168 void VmaJsonWriter::WriteBool(bool b)
    6169 {
    6170  VMA_ASSERT(!m_InsideString);
    6171  BeginValue(false);
    6172  m_SB.Add(b ? "true" : "false");
    6173 }
    6174 
    6175 void VmaJsonWriter::WriteNull()
    6176 {
    6177  VMA_ASSERT(!m_InsideString);
    6178  BeginValue(false);
    6179  m_SB.Add("null");
    6180 }
    6181 
    6182 void VmaJsonWriter::BeginValue(bool isString)
    6183 {
    6184  if(!m_Stack.empty())
    6185  {
    6186  StackItem& currItem = m_Stack.back();
    6187  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6188  currItem.valueCount % 2 == 0)
    6189  {
    6190  VMA_ASSERT(isString);
    6191  }
    6192 
    6193  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6194  currItem.valueCount % 2 != 0)
    6195  {
    6196  m_SB.Add(": ");
    6197  }
    6198  else if(currItem.valueCount > 0)
    6199  {
    6200  m_SB.Add(", ");
    6201  WriteIndent();
    6202  }
    6203  else
    6204  {
    6205  WriteIndent();
    6206  }
    6207  ++currItem.valueCount;
    6208  }
    6209 }
    6210 
    6211 void VmaJsonWriter::WriteIndent(bool oneLess)
    6212 {
    6213  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6214  {
    6215  m_SB.AddNewLine();
    6216 
    6217  size_t count = m_Stack.size();
    6218  if(count > 0 && oneLess)
    6219  {
    6220  --count;
    6221  }
    6222  for(size_t i = 0; i < count; ++i)
    6223  {
    6224  m_SB.Add(INDENT);
    6225  }
    6226  }
    6227 }
    6228 
    6229 #endif // #if VMA_STATS_STRING_ENABLED
    6230 
    6232 
    6233 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6234 {
    6235  if(IsUserDataString())
    6236  {
    6237  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6238 
    6239  FreeUserDataString(hAllocator);
    6240 
    6241  if(pUserData != VMA_NULL)
    6242  {
    6243  const char* const newStrSrc = (char*)pUserData;
    6244  const size_t newStrLen = strlen(newStrSrc);
    6245  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6246  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6247  m_pUserData = newStrDst;
    6248  }
    6249  }
    6250  else
    6251  {
    6252  m_pUserData = pUserData;
    6253  }
    6254 }
    6255 
    6256 void VmaAllocation_T::ChangeBlockAllocation(
    6257  VmaAllocator hAllocator,
    6258  VmaDeviceMemoryBlock* block,
    6259  VkDeviceSize offset)
    6260 {
    6261  VMA_ASSERT(block != VMA_NULL);
    6262  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6263 
    6264  // Move mapping reference counter from old block to new block.
    6265  if(block != m_BlockAllocation.m_Block)
    6266  {
    6267  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6268  if(IsPersistentMap())
    6269  ++mapRefCount;
    6270  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6271  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6272  }
    6273 
    6274  m_BlockAllocation.m_Block = block;
    6275  m_BlockAllocation.m_Offset = offset;
    6276 }
    6277 
    6278 VkDeviceSize VmaAllocation_T::GetOffset() const
    6279 {
    6280  switch(m_Type)
    6281  {
    6282  case ALLOCATION_TYPE_BLOCK:
    6283  return m_BlockAllocation.m_Offset;
    6284  case ALLOCATION_TYPE_DEDICATED:
    6285  return 0;
    6286  default:
    6287  VMA_ASSERT(0);
    6288  return 0;
    6289  }
    6290 }
    6291 
    6292 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6293 {
    6294  switch(m_Type)
    6295  {
    6296  case ALLOCATION_TYPE_BLOCK:
    6297  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6298  case ALLOCATION_TYPE_DEDICATED:
    6299  return m_DedicatedAllocation.m_hMemory;
    6300  default:
    6301  VMA_ASSERT(0);
    6302  return VK_NULL_HANDLE;
    6303  }
    6304 }
    6305 
    6306 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6307 {
    6308  switch(m_Type)
    6309  {
    6310  case ALLOCATION_TYPE_BLOCK:
    6311  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6312  case ALLOCATION_TYPE_DEDICATED:
    6313  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6314  default:
    6315  VMA_ASSERT(0);
    6316  return UINT32_MAX;
    6317  }
    6318 }
    6319 
    6320 void* VmaAllocation_T::GetMappedData() const
    6321 {
    6322  switch(m_Type)
    6323  {
    6324  case ALLOCATION_TYPE_BLOCK:
    6325  if(m_MapCount != 0)
    6326  {
    6327  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6328  VMA_ASSERT(pBlockData != VMA_NULL);
    6329  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6330  }
    6331  else
    6332  {
    6333  return VMA_NULL;
    6334  }
    6335  break;
    6336  case ALLOCATION_TYPE_DEDICATED:
    6337  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6338  return m_DedicatedAllocation.m_pMappedData;
    6339  default:
    6340  VMA_ASSERT(0);
    6341  return VMA_NULL;
    6342  }
    6343 }
    6344 
    6345 bool VmaAllocation_T::CanBecomeLost() const
    6346 {
    6347  switch(m_Type)
    6348  {
    6349  case ALLOCATION_TYPE_BLOCK:
    6350  return m_BlockAllocation.m_CanBecomeLost;
    6351  case ALLOCATION_TYPE_DEDICATED:
    6352  return false;
    6353  default:
    6354  VMA_ASSERT(0);
    6355  return false;
    6356  }
    6357 }
    6358 
    6359 VmaPool VmaAllocation_T::GetPool() const
    6360 {
    6361  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6362  return m_BlockAllocation.m_hPool;
    6363 }
    6364 
    6365 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6366 {
    6367  VMA_ASSERT(CanBecomeLost());
    6368 
    6369  /*
    6370  Warning: This is a carefully designed algorithm.
    6371  Do not modify unless you really know what you're doing :)
    6372  */
    6373  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6374  for(;;)
    6375  {
    6376  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6377  {
    6378  VMA_ASSERT(0);
    6379  return false;
    6380  }
    6381  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6382  {
    6383  return false;
    6384  }
    6385  else // Last use time earlier than current time.
    6386  {
    6387  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6388  {
    6389  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6390  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6391  return true;
    6392  }
    6393  }
    6394  }
    6395 }
    6396 
    6397 #if VMA_STATS_STRING_ENABLED
    6398 
    6399 // Correspond to values of enum VmaSuballocationType.
    6400 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6401  "FREE",
    6402  "UNKNOWN",
    6403  "BUFFER",
    6404  "IMAGE_UNKNOWN",
    6405  "IMAGE_LINEAR",
    6406  "IMAGE_OPTIMAL",
    6407 };
    6408 
    6409 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6410 {
    6411  json.WriteString("Type");
    6412  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6413 
    6414  json.WriteString("Size");
    6415  json.WriteNumber(m_Size);
    6416 
    6417  if(m_pUserData != VMA_NULL)
    6418  {
    6419  json.WriteString("UserData");
    6420  if(IsUserDataString())
    6421  {
    6422  json.WriteString((const char*)m_pUserData);
    6423  }
    6424  else
    6425  {
    6426  json.BeginString();
    6427  json.ContinueString_Pointer(m_pUserData);
    6428  json.EndString();
    6429  }
    6430  }
    6431 
    6432  json.WriteString("CreationFrameIndex");
    6433  json.WriteNumber(m_CreationFrameIndex);
    6434 
    6435  json.WriteString("LastUseFrameIndex");
    6436  json.WriteNumber(GetLastUseFrameIndex());
    6437 
    6438  if(m_BufferImageUsage != 0)
    6439  {
    6440  json.WriteString("Usage");
    6441  json.WriteNumber(m_BufferImageUsage);
    6442  }
    6443 }
    6444 
    6445 #endif
    6446 
    6447 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6448 {
    6449  VMA_ASSERT(IsUserDataString());
    6450  if(m_pUserData != VMA_NULL)
    6451  {
    6452  char* const oldStr = (char*)m_pUserData;
    6453  const size_t oldStrLen = strlen(oldStr);
    6454  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6455  m_pUserData = VMA_NULL;
    6456  }
    6457 }
    6458 
    6459 void VmaAllocation_T::BlockAllocMap()
    6460 {
    6461  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6462 
    6463  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6464  {
    6465  ++m_MapCount;
    6466  }
    6467  else
    6468  {
    6469  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6470  }
    6471 }
    6472 
    6473 void VmaAllocation_T::BlockAllocUnmap()
    6474 {
    6475  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6476 
    6477  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6478  {
    6479  --m_MapCount;
    6480  }
    6481  else
    6482  {
    6483  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6484  }
    6485 }
    6486 
    6487 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6488 {
    6489  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6490 
    6491  if(m_MapCount != 0)
    6492  {
    6493  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6494  {
    6495  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6496  *ppData = m_DedicatedAllocation.m_pMappedData;
    6497  ++m_MapCount;
    6498  return VK_SUCCESS;
    6499  }
    6500  else
    6501  {
    6502  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6503  return VK_ERROR_MEMORY_MAP_FAILED;
    6504  }
    6505  }
    6506  else
    6507  {
    6508  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6509  hAllocator->m_hDevice,
    6510  m_DedicatedAllocation.m_hMemory,
    6511  0, // offset
    6512  VK_WHOLE_SIZE,
    6513  0, // flags
    6514  ppData);
    6515  if(result == VK_SUCCESS)
    6516  {
    6517  m_DedicatedAllocation.m_pMappedData = *ppData;
    6518  m_MapCount = 1;
    6519  }
    6520  return result;
    6521  }
    6522 }
    6523 
    6524 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6525 {
    6526  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6527 
    6528  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6529  {
    6530  --m_MapCount;
    6531  if(m_MapCount == 0)
    6532  {
    6533  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6534  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6535  hAllocator->m_hDevice,
    6536  m_DedicatedAllocation.m_hMemory);
    6537  }
    6538  }
    6539  else
    6540  {
    6541  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6542  }
    6543 }
    6544 
    6545 #if VMA_STATS_STRING_ENABLED
    6546 
    6547 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6548 {
    6549  json.BeginObject();
    6550 
    6551  json.WriteString("Blocks");
    6552  json.WriteNumber(stat.blockCount);
    6553 
    6554  json.WriteString("Allocations");
    6555  json.WriteNumber(stat.allocationCount);
    6556 
    6557  json.WriteString("UnusedRanges");
    6558  json.WriteNumber(stat.unusedRangeCount);
    6559 
    6560  json.WriteString("UsedBytes");
    6561  json.WriteNumber(stat.usedBytes);
    6562 
    6563  json.WriteString("UnusedBytes");
    6564  json.WriteNumber(stat.unusedBytes);
    6565 
    6566  if(stat.allocationCount > 1)
    6567  {
    6568  json.WriteString("AllocationSize");
    6569  json.BeginObject(true);
    6570  json.WriteString("Min");
    6571  json.WriteNumber(stat.allocationSizeMin);
    6572  json.WriteString("Avg");
    6573  json.WriteNumber(stat.allocationSizeAvg);
    6574  json.WriteString("Max");
    6575  json.WriteNumber(stat.allocationSizeMax);
    6576  json.EndObject();
    6577  }
    6578 
    6579  if(stat.unusedRangeCount > 1)
    6580  {
    6581  json.WriteString("UnusedRangeSize");
    6582  json.BeginObject(true);
    6583  json.WriteString("Min");
    6584  json.WriteNumber(stat.unusedRangeSizeMin);
    6585  json.WriteString("Avg");
    6586  json.WriteNumber(stat.unusedRangeSizeAvg);
    6587  json.WriteString("Max");
    6588  json.WriteNumber(stat.unusedRangeSizeMax);
    6589  json.EndObject();
    6590  }
    6591 
    6592  json.EndObject();
    6593 }
    6594 
    6595 #endif // #if VMA_STATS_STRING_ENABLED
    6596 
    6597 struct VmaSuballocationItemSizeLess
    6598 {
    6599  bool operator()(
    6600  const VmaSuballocationList::iterator lhs,
    6601  const VmaSuballocationList::iterator rhs) const
    6602  {
    6603  return lhs->size < rhs->size;
    6604  }
    6605  bool operator()(
    6606  const VmaSuballocationList::iterator lhs,
    6607  VkDeviceSize rhsSize) const
    6608  {
    6609  return lhs->size < rhsSize;
    6610  }
    6611 };
    6612 
    6613 
    6615 // class VmaBlockMetadata
    6616 
    6617 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6618  m_Size(0),
    6619  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6620 {
    6621 }
    6622 
    6623 #if VMA_STATS_STRING_ENABLED
    6624 
    6625 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6626  VkDeviceSize unusedBytes,
    6627  size_t allocationCount,
    6628  size_t unusedRangeCount) const
    6629 {
    6630  json.BeginObject();
    6631 
    6632  json.WriteString("TotalBytes");
    6633  json.WriteNumber(GetSize());
    6634 
    6635  json.WriteString("UnusedBytes");
    6636  json.WriteNumber(unusedBytes);
    6637 
    6638  json.WriteString("Allocations");
    6639  json.WriteNumber((uint64_t)allocationCount);
    6640 
    6641  json.WriteString("UnusedRanges");
    6642  json.WriteNumber((uint64_t)unusedRangeCount);
    6643 
    6644  json.WriteString("Suballocations");
    6645  json.BeginArray();
    6646 }
    6647 
    6648 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6649  VkDeviceSize offset,
    6650  VmaAllocation hAllocation) const
    6651 {
    6652  json.BeginObject(true);
    6653 
    6654  json.WriteString("Offset");
    6655  json.WriteNumber(offset);
    6656 
    6657  hAllocation->PrintParameters(json);
    6658 
    6659  json.EndObject();
    6660 }
    6661 
    6662 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6663  VkDeviceSize offset,
    6664  VkDeviceSize size) const
    6665 {
    6666  json.BeginObject(true);
    6667 
    6668  json.WriteString("Offset");
    6669  json.WriteNumber(offset);
    6670 
    6671  json.WriteString("Type");
    6672  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6673 
    6674  json.WriteString("Size");
    6675  json.WriteNumber(size);
    6676 
    6677  json.EndObject();
    6678 }
    6679 
    6680 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6681 {
    6682  json.EndArray();
    6683  json.EndObject();
    6684 }
    6685 
    6686 #endif // #if VMA_STATS_STRING_ENABLED
    6687 
    6689 // class VmaBlockMetadata_Generic
    6690 
    6691 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6692  VmaBlockMetadata(hAllocator),
    6693  m_FreeCount(0),
    6694  m_SumFreeSize(0),
    6695  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6696  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6697 {
    6698 }
    6699 
    6700 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6701 {
    6702 }
    6703 
    6704 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6705 {
    6706  VmaBlockMetadata::Init(size);
    6707 
    6708  m_FreeCount = 1;
    6709  m_SumFreeSize = size;
    6710 
    6711  VmaSuballocation suballoc = {};
    6712  suballoc.offset = 0;
    6713  suballoc.size = size;
    6714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6715  suballoc.hAllocation = VK_NULL_HANDLE;
    6716 
    6717  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6718  m_Suballocations.push_back(suballoc);
    6719  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6720  --suballocItem;
    6721  m_FreeSuballocationsBySize.push_back(suballocItem);
    6722 }
    6723 
    6724 bool VmaBlockMetadata_Generic::Validate() const
    6725 {
    6726  VMA_VALIDATE(!m_Suballocations.empty());
    6727 
    6728  // Expected offset of new suballocation as calculated from previous ones.
    6729  VkDeviceSize calculatedOffset = 0;
    6730  // Expected number of free suballocations as calculated from traversing their list.
    6731  uint32_t calculatedFreeCount = 0;
    6732  // Expected sum size of free suballocations as calculated from traversing their list.
    6733  VkDeviceSize calculatedSumFreeSize = 0;
    6734  // Expected number of free suballocations that should be registered in
    6735  // m_FreeSuballocationsBySize calculated from traversing their list.
    6736  size_t freeSuballocationsToRegister = 0;
    6737  // True if previous visited suballocation was free.
    6738  bool prevFree = false;
    6739 
    6740  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6741  suballocItem != m_Suballocations.cend();
    6742  ++suballocItem)
    6743  {
    6744  const VmaSuballocation& subAlloc = *suballocItem;
    6745 
    6746  // Actual offset of this suballocation doesn't match expected one.
    6747  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6748 
    6749  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6750  // Two adjacent free suballocations are invalid. They should be merged.
    6751  VMA_VALIDATE(!prevFree || !currFree);
    6752 
    6753  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6754 
    6755  if(currFree)
    6756  {
    6757  calculatedSumFreeSize += subAlloc.size;
    6758  ++calculatedFreeCount;
    6759  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6760  {
    6761  ++freeSuballocationsToRegister;
    6762  }
    6763 
    6764  // Margin required between allocations - every free space must be at least that large.
    6765  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6766  }
    6767  else
    6768  {
    6769  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6770  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6771 
    6772  // Margin required between allocations - previous allocation must be free.
    6773  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6774  }
    6775 
    6776  calculatedOffset += subAlloc.size;
    6777  prevFree = currFree;
    6778  }
    6779 
    6780  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6781  // match expected one.
    6782  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6783 
    6784  VkDeviceSize lastSize = 0;
    6785  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6786  {
    6787  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6788 
    6789  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6790  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6791  // They must be sorted by size ascending.
    6792  VMA_VALIDATE(suballocItem->size >= lastSize);
    6793 
    6794  lastSize = suballocItem->size;
    6795  }
    6796 
    6797  // Check if totals match calculacted values.
    6798  VMA_VALIDATE(ValidateFreeSuballocationList());
    6799  VMA_VALIDATE(calculatedOffset == GetSize());
    6800  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6801  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6802 
    6803  return true;
    6804 }
    6805 
    6806 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6807 {
    6808  if(!m_FreeSuballocationsBySize.empty())
    6809  {
    6810  return m_FreeSuballocationsBySize.back()->size;
    6811  }
    6812  else
    6813  {
    6814  return 0;
    6815  }
    6816 }
    6817 
    6818 bool VmaBlockMetadata_Generic::IsEmpty() const
    6819 {
    6820  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6821 }
    6822 
    6823 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6824 {
    6825  outInfo.blockCount = 1;
    6826 
    6827  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6828  outInfo.allocationCount = rangeCount - m_FreeCount;
    6829  outInfo.unusedRangeCount = m_FreeCount;
    6830 
    6831  outInfo.unusedBytes = m_SumFreeSize;
    6832  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6833 
    6834  outInfo.allocationSizeMin = UINT64_MAX;
    6835  outInfo.allocationSizeMax = 0;
    6836  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6837  outInfo.unusedRangeSizeMax = 0;
    6838 
    6839  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6840  suballocItem != m_Suballocations.cend();
    6841  ++suballocItem)
    6842  {
    6843  const VmaSuballocation& suballoc = *suballocItem;
    6844  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6845  {
    6846  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6847  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6848  }
    6849  else
    6850  {
    6851  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6852  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6853  }
    6854  }
    6855 }
    6856 
    6857 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6858 {
    6859  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6860 
    6861  inoutStats.size += GetSize();
    6862  inoutStats.unusedSize += m_SumFreeSize;
    6863  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6864  inoutStats.unusedRangeCount += m_FreeCount;
    6865  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6866 }
    6867 
    6868 #if VMA_STATS_STRING_ENABLED
    6869 
    6870 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6871 {
    6872  PrintDetailedMap_Begin(json,
    6873  m_SumFreeSize, // unusedBytes
    6874  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6875  m_FreeCount); // unusedRangeCount
    6876 
    6877  size_t i = 0;
    6878  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6879  suballocItem != m_Suballocations.cend();
    6880  ++suballocItem, ++i)
    6881  {
    6882  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6883  {
    6884  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6885  }
    6886  else
    6887  {
    6888  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6889  }
    6890  }
    6891 
    6892  PrintDetailedMap_End(json);
    6893 }
    6894 
    6895 #endif // #if VMA_STATS_STRING_ENABLED
    6896 
    6897 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6898  uint32_t currentFrameIndex,
    6899  uint32_t frameInUseCount,
    6900  VkDeviceSize bufferImageGranularity,
    6901  VkDeviceSize allocSize,
    6902  VkDeviceSize allocAlignment,
    6903  bool upperAddress,
    6904  VmaSuballocationType allocType,
    6905  bool canMakeOtherLost,
    6906  uint32_t strategy,
    6907  VmaAllocationRequest* pAllocationRequest)
    6908 {
    6909  VMA_ASSERT(allocSize > 0);
    6910  VMA_ASSERT(!upperAddress);
    6911  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6912  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6913  VMA_HEAVY_ASSERT(Validate());
    6914 
    6915  // There is not enough total free space in this block to fullfill the request: Early return.
    6916  if(canMakeOtherLost == false &&
    6917  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6918  {
    6919  return false;
    6920  }
    6921 
    6922  // New algorithm, efficiently searching freeSuballocationsBySize.
    6923  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6924  if(freeSuballocCount > 0)
    6925  {
    6927  {
    6928  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6929  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6930  m_FreeSuballocationsBySize.data(),
    6931  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6932  allocSize + 2 * VMA_DEBUG_MARGIN,
    6933  VmaSuballocationItemSizeLess());
    6934  size_t index = it - m_FreeSuballocationsBySize.data();
    6935  for(; index < freeSuballocCount; ++index)
    6936  {
    6937  if(CheckAllocation(
    6938  currentFrameIndex,
    6939  frameInUseCount,
    6940  bufferImageGranularity,
    6941  allocSize,
    6942  allocAlignment,
    6943  allocType,
    6944  m_FreeSuballocationsBySize[index],
    6945  false, // canMakeOtherLost
    6946  &pAllocationRequest->offset,
    6947  &pAllocationRequest->itemsToMakeLostCount,
    6948  &pAllocationRequest->sumFreeSize,
    6949  &pAllocationRequest->sumItemSize))
    6950  {
    6951  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6952  return true;
    6953  }
    6954  }
    6955  }
    6956  else // WORST_FIT, FIRST_FIT
    6957  {
    6958  // Search staring from biggest suballocations.
    6959  for(size_t index = freeSuballocCount; index--; )
    6960  {
    6961  if(CheckAllocation(
    6962  currentFrameIndex,
    6963  frameInUseCount,
    6964  bufferImageGranularity,
    6965  allocSize,
    6966  allocAlignment,
    6967  allocType,
    6968  m_FreeSuballocationsBySize[index],
    6969  false, // canMakeOtherLost
    6970  &pAllocationRequest->offset,
    6971  &pAllocationRequest->itemsToMakeLostCount,
    6972  &pAllocationRequest->sumFreeSize,
    6973  &pAllocationRequest->sumItemSize))
    6974  {
    6975  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6976  return true;
    6977  }
    6978  }
    6979  }
    6980  }
    6981 
    6982  if(canMakeOtherLost)
    6983  {
    6984  // Brute-force algorithm. TODO: Come up with something better.
    6985 
    6986  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6987  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6988 
    6989  VmaAllocationRequest tmpAllocRequest = {};
    6990  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6991  suballocIt != m_Suballocations.end();
    6992  ++suballocIt)
    6993  {
    6994  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6995  suballocIt->hAllocation->CanBecomeLost())
    6996  {
    6997  if(CheckAllocation(
    6998  currentFrameIndex,
    6999  frameInUseCount,
    7000  bufferImageGranularity,
    7001  allocSize,
    7002  allocAlignment,
    7003  allocType,
    7004  suballocIt,
    7005  canMakeOtherLost,
    7006  &tmpAllocRequest.offset,
    7007  &tmpAllocRequest.itemsToMakeLostCount,
    7008  &tmpAllocRequest.sumFreeSize,
    7009  &tmpAllocRequest.sumItemSize))
    7010  {
    7011  tmpAllocRequest.item = suballocIt;
    7012 
    7013  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7015  {
    7016  *pAllocationRequest = tmpAllocRequest;
    7017  }
    7018  }
    7019  }
    7020  }
    7021 
    7022  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7023  {
    7024  return true;
    7025  }
    7026  }
    7027 
    7028  return false;
    7029 }
    7030 
    7031 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7032  uint32_t currentFrameIndex,
    7033  uint32_t frameInUseCount,
    7034  VmaAllocationRequest* pAllocationRequest)
    7035 {
    7036  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7037  {
    7038  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7039  {
    7040  ++pAllocationRequest->item;
    7041  }
    7042  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7043  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7044  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7045  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7046  {
    7047  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7048  --pAllocationRequest->itemsToMakeLostCount;
    7049  }
    7050  else
    7051  {
    7052  return false;
    7053  }
    7054  }
    7055 
    7056  VMA_HEAVY_ASSERT(Validate());
    7057  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7058  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7059 
    7060  return true;
    7061 }
    7062 
    7063 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7064 {
    7065  uint32_t lostAllocationCount = 0;
    7066  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7067  it != m_Suballocations.end();
    7068  ++it)
    7069  {
    7070  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7071  it->hAllocation->CanBecomeLost() &&
    7072  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7073  {
    7074  it = FreeSuballocation(it);
    7075  ++lostAllocationCount;
    7076  }
    7077  }
    7078  return lostAllocationCount;
    7079 }
    7080 
    7081 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7082 {
    7083  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7084  it != m_Suballocations.end();
    7085  ++it)
    7086  {
    7087  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7088  {
    7089  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7090  {
    7091  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7092  return VK_ERROR_VALIDATION_FAILED_EXT;
    7093  }
    7094  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7095  {
    7096  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7097  return VK_ERROR_VALIDATION_FAILED_EXT;
    7098  }
    7099  }
    7100  }
    7101 
    7102  return VK_SUCCESS;
    7103 }
    7104 
    7105 void VmaBlockMetadata_Generic::Alloc(
    7106  const VmaAllocationRequest& request,
    7107  VmaSuballocationType type,
    7108  VkDeviceSize allocSize,
    7109  bool upperAddress,
    7110  VmaAllocation hAllocation)
    7111 {
    7112  VMA_ASSERT(!upperAddress);
    7113  VMA_ASSERT(request.item != m_Suballocations.end());
    7114  VmaSuballocation& suballoc = *request.item;
    7115  // Given suballocation is a free block.
    7116  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7117  // Given offset is inside this suballocation.
    7118  VMA_ASSERT(request.offset >= suballoc.offset);
    7119  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7120  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7121  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7122 
    7123  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7124  // it to become used.
    7125  UnregisterFreeSuballocation(request.item);
    7126 
    7127  suballoc.offset = request.offset;
    7128  suballoc.size = allocSize;
    7129  suballoc.type = type;
    7130  suballoc.hAllocation = hAllocation;
    7131 
    7132  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7133  if(paddingEnd)
    7134  {
    7135  VmaSuballocation paddingSuballoc = {};
    7136  paddingSuballoc.offset = request.offset + allocSize;
    7137  paddingSuballoc.size = paddingEnd;
    7138  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7139  VmaSuballocationList::iterator next = request.item;
    7140  ++next;
    7141  const VmaSuballocationList::iterator paddingEndItem =
    7142  m_Suballocations.insert(next, paddingSuballoc);
    7143  RegisterFreeSuballocation(paddingEndItem);
    7144  }
    7145 
    7146  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7147  if(paddingBegin)
    7148  {
    7149  VmaSuballocation paddingSuballoc = {};
    7150  paddingSuballoc.offset = request.offset - paddingBegin;
    7151  paddingSuballoc.size = paddingBegin;
    7152  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7153  const VmaSuballocationList::iterator paddingBeginItem =
    7154  m_Suballocations.insert(request.item, paddingSuballoc);
    7155  RegisterFreeSuballocation(paddingBeginItem);
    7156  }
    7157 
    7158  // Update totals.
    7159  m_FreeCount = m_FreeCount - 1;
    7160  if(paddingBegin > 0)
    7161  {
    7162  ++m_FreeCount;
    7163  }
    7164  if(paddingEnd > 0)
    7165  {
    7166  ++m_FreeCount;
    7167  }
    7168  m_SumFreeSize -= allocSize;
    7169 }
    7170 
    7171 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7172 {
    7173  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7174  suballocItem != m_Suballocations.end();
    7175  ++suballocItem)
    7176  {
    7177  VmaSuballocation& suballoc = *suballocItem;
    7178  if(suballoc.hAllocation == allocation)
    7179  {
    7180  FreeSuballocation(suballocItem);
    7181  VMA_HEAVY_ASSERT(Validate());
    7182  return;
    7183  }
    7184  }
    7185  VMA_ASSERT(0 && "Not found!");
    7186 }
    7187 
    7188 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7189 {
    7190  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7191  suballocItem != m_Suballocations.end();
    7192  ++suballocItem)
    7193  {
    7194  VmaSuballocation& suballoc = *suballocItem;
    7195  if(suballoc.offset == offset)
    7196  {
    7197  FreeSuballocation(suballocItem);
    7198  return;
    7199  }
    7200  }
    7201  VMA_ASSERT(0 && "Not found!");
    7202 }
    7203 
    7204 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7205 {
    7206  VkDeviceSize lastSize = 0;
    7207  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7208  {
    7209  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7210 
    7211  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7212  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7213  VMA_VALIDATE(it->size >= lastSize);
    7214  lastSize = it->size;
    7215  }
    7216  return true;
    7217 }
    7218 
    7219 bool VmaBlockMetadata_Generic::CheckAllocation(
    7220  uint32_t currentFrameIndex,
    7221  uint32_t frameInUseCount,
    7222  VkDeviceSize bufferImageGranularity,
    7223  VkDeviceSize allocSize,
    7224  VkDeviceSize allocAlignment,
    7225  VmaSuballocationType allocType,
    7226  VmaSuballocationList::const_iterator suballocItem,
    7227  bool canMakeOtherLost,
    7228  VkDeviceSize* pOffset,
    7229  size_t* itemsToMakeLostCount,
    7230  VkDeviceSize* pSumFreeSize,
    7231  VkDeviceSize* pSumItemSize) const
    7232 {
    7233  VMA_ASSERT(allocSize > 0);
    7234  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7235  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7236  VMA_ASSERT(pOffset != VMA_NULL);
    7237 
    7238  *itemsToMakeLostCount = 0;
    7239  *pSumFreeSize = 0;
    7240  *pSumItemSize = 0;
    7241 
    7242  if(canMakeOtherLost)
    7243  {
    7244  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7245  {
    7246  *pSumFreeSize = suballocItem->size;
    7247  }
    7248  else
    7249  {
    7250  if(suballocItem->hAllocation->CanBecomeLost() &&
    7251  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7252  {
    7253  ++*itemsToMakeLostCount;
    7254  *pSumItemSize = suballocItem->size;
    7255  }
    7256  else
    7257  {
    7258  return false;
    7259  }
    7260  }
    7261 
    7262  // Remaining size is too small for this request: Early return.
    7263  if(GetSize() - suballocItem->offset < allocSize)
    7264  {
    7265  return false;
    7266  }
    7267 
    7268  // Start from offset equal to beginning of this suballocation.
    7269  *pOffset = suballocItem->offset;
    7270 
    7271  // Apply VMA_DEBUG_MARGIN at the beginning.
    7272  if(VMA_DEBUG_MARGIN > 0)
    7273  {
    7274  *pOffset += VMA_DEBUG_MARGIN;
    7275  }
    7276 
    7277  // Apply alignment.
    7278  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7279 
    7280  // Check previous suballocations for BufferImageGranularity conflicts.
    7281  // Make bigger alignment if necessary.
    7282  if(bufferImageGranularity > 1)
    7283  {
    7284  bool bufferImageGranularityConflict = false;
    7285  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7286  while(prevSuballocItem != m_Suballocations.cbegin())
    7287  {
    7288  --prevSuballocItem;
    7289  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7290  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7291  {
    7292  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7293  {
    7294  bufferImageGranularityConflict = true;
    7295  break;
    7296  }
    7297  }
    7298  else
    7299  // Already on previous page.
    7300  break;
    7301  }
    7302  if(bufferImageGranularityConflict)
    7303  {
    7304  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7305  }
    7306  }
    7307 
    7308  // Now that we have final *pOffset, check if we are past suballocItem.
    7309  // If yes, return false - this function should be called for another suballocItem as starting point.
    7310  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7311  {
    7312  return false;
    7313  }
    7314 
    7315  // Calculate padding at the beginning based on current offset.
    7316  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7317 
    7318  // Calculate required margin at the end.
    7319  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7320 
    7321  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7322  // Another early return check.
    7323  if(suballocItem->offset + totalSize > GetSize())
    7324  {
    7325  return false;
    7326  }
    7327 
    7328  // Advance lastSuballocItem until desired size is reached.
    7329  // Update itemsToMakeLostCount.
    7330  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7331  if(totalSize > suballocItem->size)
    7332  {
    7333  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7334  while(remainingSize > 0)
    7335  {
    7336  ++lastSuballocItem;
    7337  if(lastSuballocItem == m_Suballocations.cend())
    7338  {
    7339  return false;
    7340  }
    7341  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7342  {
    7343  *pSumFreeSize += lastSuballocItem->size;
    7344  }
    7345  else
    7346  {
    7347  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7348  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7349  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7350  {
    7351  ++*itemsToMakeLostCount;
    7352  *pSumItemSize += lastSuballocItem->size;
    7353  }
    7354  else
    7355  {
    7356  return false;
    7357  }
    7358  }
    7359  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7360  remainingSize - lastSuballocItem->size : 0;
    7361  }
    7362  }
    7363 
    7364  // Check next suballocations for BufferImageGranularity conflicts.
    7365  // If conflict exists, we must mark more allocations lost or fail.
    7366  if(bufferImageGranularity > 1)
    7367  {
    7368  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7369  ++nextSuballocItem;
    7370  while(nextSuballocItem != m_Suballocations.cend())
    7371  {
    7372  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7373  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7374  {
    7375  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7376  {
    7377  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7378  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7379  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7380  {
    7381  ++*itemsToMakeLostCount;
    7382  }
    7383  else
    7384  {
    7385  return false;
    7386  }
    7387  }
    7388  }
    7389  else
    7390  {
    7391  // Already on next page.
    7392  break;
    7393  }
    7394  ++nextSuballocItem;
    7395  }
    7396  }
    7397  }
    7398  else
    7399  {
    7400  const VmaSuballocation& suballoc = *suballocItem;
    7401  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7402 
    7403  *pSumFreeSize = suballoc.size;
    7404 
    7405  // Size of this suballocation is too small for this request: Early return.
    7406  if(suballoc.size < allocSize)
    7407  {
    7408  return false;
    7409  }
    7410 
    7411  // Start from offset equal to beginning of this suballocation.
    7412  *pOffset = suballoc.offset;
    7413 
    7414  // Apply VMA_DEBUG_MARGIN at the beginning.
    7415  if(VMA_DEBUG_MARGIN > 0)
    7416  {
    7417  *pOffset += VMA_DEBUG_MARGIN;
    7418  }
    7419 
    7420  // Apply alignment.
    7421  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7422 
    7423  // Check previous suballocations for BufferImageGranularity conflicts.
    7424  // Make bigger alignment if necessary.
    7425  if(bufferImageGranularity > 1)
    7426  {
    7427  bool bufferImageGranularityConflict = false;
    7428  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7429  while(prevSuballocItem != m_Suballocations.cbegin())
    7430  {
    7431  --prevSuballocItem;
    7432  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7433  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7434  {
    7435  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7436  {
    7437  bufferImageGranularityConflict = true;
    7438  break;
    7439  }
    7440  }
    7441  else
    7442  // Already on previous page.
    7443  break;
    7444  }
    7445  if(bufferImageGranularityConflict)
    7446  {
    7447  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7448  }
    7449  }
    7450 
    7451  // Calculate padding at the beginning based on current offset.
    7452  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7453 
    7454  // Calculate required margin at the end.
    7455  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7456 
    7457  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7458  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7459  {
    7460  return false;
    7461  }
    7462 
    7463  // Check next suballocations for BufferImageGranularity conflicts.
    7464  // If conflict exists, allocation cannot be made here.
    7465  if(bufferImageGranularity > 1)
    7466  {
    7467  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7468  ++nextSuballocItem;
    7469  while(nextSuballocItem != m_Suballocations.cend())
    7470  {
    7471  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7472  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7473  {
    7474  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7475  {
    7476  return false;
    7477  }
    7478  }
    7479  else
    7480  {
    7481  // Already on next page.
    7482  break;
    7483  }
    7484  ++nextSuballocItem;
    7485  }
    7486  }
    7487  }
    7488 
    7489  // All tests passed: Success. pOffset is already filled.
    7490  return true;
    7491 }
    7492 
    7493 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7494 {
    7495  VMA_ASSERT(item != m_Suballocations.end());
    7496  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7497 
    7498  VmaSuballocationList::iterator nextItem = item;
    7499  ++nextItem;
    7500  VMA_ASSERT(nextItem != m_Suballocations.end());
    7501  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7502 
    7503  item->size += nextItem->size;
    7504  --m_FreeCount;
    7505  m_Suballocations.erase(nextItem);
    7506 }
    7507 
    7508 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7509 {
    7510  // Change this suballocation to be marked as free.
    7511  VmaSuballocation& suballoc = *suballocItem;
    7512  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7513  suballoc.hAllocation = VK_NULL_HANDLE;
    7514 
    7515  // Update totals.
    7516  ++m_FreeCount;
    7517  m_SumFreeSize += suballoc.size;
    7518 
    7519  // Merge with previous and/or next suballocation if it's also free.
    7520  bool mergeWithNext = false;
    7521  bool mergeWithPrev = false;
    7522 
    7523  VmaSuballocationList::iterator nextItem = suballocItem;
    7524  ++nextItem;
    7525  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7526  {
    7527  mergeWithNext = true;
    7528  }
    7529 
    7530  VmaSuballocationList::iterator prevItem = suballocItem;
    7531  if(suballocItem != m_Suballocations.begin())
    7532  {
    7533  --prevItem;
    7534  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7535  {
    7536  mergeWithPrev = true;
    7537  }
    7538  }
    7539 
    7540  if(mergeWithNext)
    7541  {
    7542  UnregisterFreeSuballocation(nextItem);
    7543  MergeFreeWithNext(suballocItem);
    7544  }
    7545 
    7546  if(mergeWithPrev)
    7547  {
    7548  UnregisterFreeSuballocation(prevItem);
    7549  MergeFreeWithNext(prevItem);
    7550  RegisterFreeSuballocation(prevItem);
    7551  return prevItem;
    7552  }
    7553  else
    7554  {
    7555  RegisterFreeSuballocation(suballocItem);
    7556  return suballocItem;
    7557  }
    7558 }
    7559 
    7560 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7561 {
    7562  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7563  VMA_ASSERT(item->size > 0);
    7564 
    7565  // You may want to enable this validation at the beginning or at the end of
    7566  // this function, depending on what do you want to check.
    7567  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7568 
    7569  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7570  {
    7571  if(m_FreeSuballocationsBySize.empty())
    7572  {
    7573  m_FreeSuballocationsBySize.push_back(item);
    7574  }
    7575  else
    7576  {
    7577  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7578  }
    7579  }
    7580 
    7581  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7582 }
    7583 
    7584 
    7585 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7586 {
    7587  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7588  VMA_ASSERT(item->size > 0);
    7589 
    7590  // You may want to enable this validation at the beginning or at the end of
    7591  // this function, depending on what do you want to check.
    7592  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7593 
    7594  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7595  {
    7596  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7597  m_FreeSuballocationsBySize.data(),
    7598  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7599  item,
    7600  VmaSuballocationItemSizeLess());
    7601  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7602  index < m_FreeSuballocationsBySize.size();
    7603  ++index)
    7604  {
    7605  if(m_FreeSuballocationsBySize[index] == item)
    7606  {
    7607  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7608  return;
    7609  }
    7610  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7611  }
    7612  VMA_ASSERT(0 && "Not found.");
    7613  }
    7614 
    7615  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7616 }
    7617 
    7619 // class VmaBlockMetadata_Linear
    7620 
    7621 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7622  VmaBlockMetadata(hAllocator),
    7623  m_SumFreeSize(0),
    7624  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7625  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7626  m_1stVectorIndex(0),
    7627  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7628  m_1stNullItemsBeginCount(0),
    7629  m_1stNullItemsMiddleCount(0),
    7630  m_2ndNullItemsCount(0)
    7631 {
    7632 }
    7633 
    7634 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7635 {
    7636 }
    7637 
    7638 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7639 {
    7640  VmaBlockMetadata::Init(size);
    7641  m_SumFreeSize = size;
    7642 }
    7643 
    7644 bool VmaBlockMetadata_Linear::Validate() const
    7645 {
    7646  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7647  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7648 
    7649  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7650  VMA_VALIDATE(!suballocations1st.empty() ||
    7651  suballocations2nd.empty() ||
    7652  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7653 
    7654  if(!suballocations1st.empty())
    7655  {
    7656  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7657  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7658  // Null item at the end should be just pop_back().
    7659  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7660  }
    7661  if(!suballocations2nd.empty())
    7662  {
    7663  // Null item at the end should be just pop_back().
    7664  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7665  }
    7666 
    7667  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7668  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7669 
    7670  VkDeviceSize sumUsedSize = 0;
    7671  const size_t suballoc1stCount = suballocations1st.size();
    7672  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7673 
    7674  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7675  {
    7676  const size_t suballoc2ndCount = suballocations2nd.size();
    7677  size_t nullItem2ndCount = 0;
    7678  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7679  {
    7680  const VmaSuballocation& suballoc = suballocations2nd[i];
    7681  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7682 
    7683  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7684  VMA_VALIDATE(suballoc.offset >= offset);
    7685 
    7686  if(!currFree)
    7687  {
    7688  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7689  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7690  sumUsedSize += suballoc.size;
    7691  }
    7692  else
    7693  {
    7694  ++nullItem2ndCount;
    7695  }
    7696 
    7697  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7698  }
    7699 
    7700  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7701  }
    7702 
    7703  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7704  {
    7705  const VmaSuballocation& suballoc = suballocations1st[i];
    7706  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7707  suballoc.hAllocation == VK_NULL_HANDLE);
    7708  }
    7709 
    7710  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7711 
    7712  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7713  {
    7714  const VmaSuballocation& suballoc = suballocations1st[i];
    7715  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7716 
    7717  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7718  VMA_VALIDATE(suballoc.offset >= offset);
    7719  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7720 
    7721  if(!currFree)
    7722  {
    7723  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7724  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7725  sumUsedSize += suballoc.size;
    7726  }
    7727  else
    7728  {
    7729  ++nullItem1stCount;
    7730  }
    7731 
    7732  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7733  }
    7734  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7735 
    7736  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7737  {
    7738  const size_t suballoc2ndCount = suballocations2nd.size();
    7739  size_t nullItem2ndCount = 0;
    7740  for(size_t i = suballoc2ndCount; i--; )
    7741  {
    7742  const VmaSuballocation& suballoc = suballocations2nd[i];
    7743  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7744 
    7745  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7746  VMA_VALIDATE(suballoc.offset >= offset);
    7747 
    7748  if(!currFree)
    7749  {
    7750  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7751  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7752  sumUsedSize += suballoc.size;
    7753  }
    7754  else
    7755  {
    7756  ++nullItem2ndCount;
    7757  }
    7758 
    7759  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7760  }
    7761 
    7762  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7763  }
    7764 
    7765  VMA_VALIDATE(offset <= GetSize());
    7766  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7767 
    7768  return true;
    7769 }
    7770 
    7771 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7772 {
    7773  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7774  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7775 }
    7776 
    7777 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7778 {
    7779  const VkDeviceSize size = GetSize();
    7780 
    7781  /*
    7782  We don't consider gaps inside allocation vectors with freed allocations because
    7783  they are not suitable for reuse in linear allocator. We consider only space that
    7784  is available for new allocations.
    7785  */
    7786  if(IsEmpty())
    7787  {
    7788  return size;
    7789  }
    7790 
    7791  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7792 
    7793  switch(m_2ndVectorMode)
    7794  {
    7795  case SECOND_VECTOR_EMPTY:
    7796  /*
    7797  Available space is after end of 1st, as well as before beginning of 1st (which
    7798  whould make it a ring buffer).
    7799  */
    7800  {
    7801  const size_t suballocations1stCount = suballocations1st.size();
    7802  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7803  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7804  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7805  return VMA_MAX(
    7806  firstSuballoc.offset,
    7807  size - (lastSuballoc.offset + lastSuballoc.size));
    7808  }
    7809  break;
    7810 
    7811  case SECOND_VECTOR_RING_BUFFER:
    7812  /*
    7813  Available space is only between end of 2nd and beginning of 1st.
    7814  */
    7815  {
    7816  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7817  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7818  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7819  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7820  }
    7821  break;
    7822 
    7823  case SECOND_VECTOR_DOUBLE_STACK:
    7824  /*
    7825  Available space is only between end of 1st and top of 2nd.
    7826  */
    7827  {
    7828  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7829  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7830  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7831  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7832  }
    7833  break;
    7834 
    7835  default:
    7836  VMA_ASSERT(0);
    7837  return 0;
    7838  }
    7839 }
    7840 
    7841 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7842 {
    7843  const VkDeviceSize size = GetSize();
    7844  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7845  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7846  const size_t suballoc1stCount = suballocations1st.size();
    7847  const size_t suballoc2ndCount = suballocations2nd.size();
    7848 
    7849  outInfo.blockCount = 1;
    7850  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7851  outInfo.unusedRangeCount = 0;
    7852  outInfo.usedBytes = 0;
    7853  outInfo.allocationSizeMin = UINT64_MAX;
    7854  outInfo.allocationSizeMax = 0;
    7855  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7856  outInfo.unusedRangeSizeMax = 0;
    7857 
    7858  VkDeviceSize lastOffset = 0;
    7859 
    7860  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7861  {
    7862  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7863  size_t nextAlloc2ndIndex = 0;
    7864  while(lastOffset < freeSpace2ndTo1stEnd)
    7865  {
    7866  // Find next non-null allocation or move nextAllocIndex to the end.
    7867  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7868  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7869  {
    7870  ++nextAlloc2ndIndex;
    7871  }
    7872 
    7873  // Found non-null allocation.
    7874  if(nextAlloc2ndIndex < suballoc2ndCount)
    7875  {
    7876  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7877 
    7878  // 1. Process free space before this allocation.
    7879  if(lastOffset < suballoc.offset)
    7880  {
    7881  // There is free space from lastOffset to suballoc.offset.
    7882  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7883  ++outInfo.unusedRangeCount;
    7884  outInfo.unusedBytes += unusedRangeSize;
    7885  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7886  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7887  }
    7888 
    7889  // 2. Process this allocation.
    7890  // There is allocation with suballoc.offset, suballoc.size.
    7891  outInfo.usedBytes += suballoc.size;
    7892  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7893  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7894 
    7895  // 3. Prepare for next iteration.
    7896  lastOffset = suballoc.offset + suballoc.size;
    7897  ++nextAlloc2ndIndex;
    7898  }
    7899  // We are at the end.
    7900  else
    7901  {
    7902  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7903  if(lastOffset < freeSpace2ndTo1stEnd)
    7904  {
    7905  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7906  ++outInfo.unusedRangeCount;
    7907  outInfo.unusedBytes += unusedRangeSize;
    7908  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7909  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7910  }
    7911 
    7912  // End of loop.
    7913  lastOffset = freeSpace2ndTo1stEnd;
    7914  }
    7915  }
    7916  }
    7917 
    7918  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7919  const VkDeviceSize freeSpace1stTo2ndEnd =
    7920  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7921  while(lastOffset < freeSpace1stTo2ndEnd)
    7922  {
    7923  // Find next non-null allocation or move nextAllocIndex to the end.
    7924  while(nextAlloc1stIndex < suballoc1stCount &&
    7925  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7926  {
    7927  ++nextAlloc1stIndex;
    7928  }
    7929 
    7930  // Found non-null allocation.
    7931  if(nextAlloc1stIndex < suballoc1stCount)
    7932  {
    7933  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7934 
    7935  // 1. Process free space before this allocation.
    7936  if(lastOffset < suballoc.offset)
    7937  {
    7938  // There is free space from lastOffset to suballoc.offset.
    7939  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7940  ++outInfo.unusedRangeCount;
    7941  outInfo.unusedBytes += unusedRangeSize;
    7942  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7943  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7944  }
    7945 
    7946  // 2. Process this allocation.
    7947  // There is allocation with suballoc.offset, suballoc.size.
    7948  outInfo.usedBytes += suballoc.size;
    7949  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7950  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7951 
    7952  // 3. Prepare for next iteration.
    7953  lastOffset = suballoc.offset + suballoc.size;
    7954  ++nextAlloc1stIndex;
    7955  }
    7956  // We are at the end.
    7957  else
    7958  {
    7959  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7960  if(lastOffset < freeSpace1stTo2ndEnd)
    7961  {
    7962  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7963  ++outInfo.unusedRangeCount;
    7964  outInfo.unusedBytes += unusedRangeSize;
    7965  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7966  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7967  }
    7968 
    7969  // End of loop.
    7970  lastOffset = freeSpace1stTo2ndEnd;
    7971  }
    7972  }
    7973 
    7974  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7975  {
    7976  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7977  while(lastOffset < size)
    7978  {
    7979  // Find next non-null allocation or move nextAllocIndex to the end.
    7980  while(nextAlloc2ndIndex != SIZE_MAX &&
    7981  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7982  {
    7983  --nextAlloc2ndIndex;
    7984  }
    7985 
    7986  // Found non-null allocation.
    7987  if(nextAlloc2ndIndex != SIZE_MAX)
    7988  {
    7989  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7990 
    7991  // 1. Process free space before this allocation.
    7992  if(lastOffset < suballoc.offset)
    7993  {
    7994  // There is free space from lastOffset to suballoc.offset.
    7995  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7996  ++outInfo.unusedRangeCount;
    7997  outInfo.unusedBytes += unusedRangeSize;
    7998  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7999  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8000  }
    8001 
    8002  // 2. Process this allocation.
    8003  // There is allocation with suballoc.offset, suballoc.size.
    8004  outInfo.usedBytes += suballoc.size;
    8005  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8006  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8007 
    8008  // 3. Prepare for next iteration.
    8009  lastOffset = suballoc.offset + suballoc.size;
    8010  --nextAlloc2ndIndex;
    8011  }
    8012  // We are at the end.
    8013  else
    8014  {
    8015  // There is free space from lastOffset to size.
    8016  if(lastOffset < size)
    8017  {
    8018  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8019  ++outInfo.unusedRangeCount;
    8020  outInfo.unusedBytes += unusedRangeSize;
    8021  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8022  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8023  }
    8024 
    8025  // End of loop.
    8026  lastOffset = size;
    8027  }
    8028  }
    8029  }
    8030 
    8031  outInfo.unusedBytes = size - outInfo.usedBytes;
    8032 }
    8033 
    8034 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8035 {
    8036  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8037  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8038  const VkDeviceSize size = GetSize();
    8039  const size_t suballoc1stCount = suballocations1st.size();
    8040  const size_t suballoc2ndCount = suballocations2nd.size();
    8041 
    8042  inoutStats.size += size;
    8043 
    8044  VkDeviceSize lastOffset = 0;
    8045 
    8046  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8047  {
    8048  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8049  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8050  while(lastOffset < freeSpace2ndTo1stEnd)
    8051  {
    8052  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8053  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8054  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8055  {
    8056  ++nextAlloc2ndIndex;
    8057  }
    8058 
    8059  // Found non-null allocation.
    8060  if(nextAlloc2ndIndex < suballoc2ndCount)
    8061  {
    8062  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8063 
    8064  // 1. Process free space before this allocation.
    8065  if(lastOffset < suballoc.offset)
    8066  {
    8067  // There is free space from lastOffset to suballoc.offset.
    8068  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8069  inoutStats.unusedSize += unusedRangeSize;
    8070  ++inoutStats.unusedRangeCount;
    8071  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8072  }
    8073 
    8074  // 2. Process this allocation.
    8075  // There is allocation with suballoc.offset, suballoc.size.
    8076  ++inoutStats.allocationCount;
    8077 
    8078  // 3. Prepare for next iteration.
    8079  lastOffset = suballoc.offset + suballoc.size;
    8080  ++nextAlloc2ndIndex;
    8081  }
    8082  // We are at the end.
    8083  else
    8084  {
    8085  if(lastOffset < freeSpace2ndTo1stEnd)
    8086  {
    8087  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8088  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8089  inoutStats.unusedSize += unusedRangeSize;
    8090  ++inoutStats.unusedRangeCount;
    8091  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8092  }
    8093 
    8094  // End of loop.
    8095  lastOffset = freeSpace2ndTo1stEnd;
    8096  }
    8097  }
    8098  }
    8099 
    8100  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8101  const VkDeviceSize freeSpace1stTo2ndEnd =
    8102  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8103  while(lastOffset < freeSpace1stTo2ndEnd)
    8104  {
    8105  // Find next non-null allocation or move nextAllocIndex to the end.
    8106  while(nextAlloc1stIndex < suballoc1stCount &&
    8107  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8108  {
    8109  ++nextAlloc1stIndex;
    8110  }
    8111 
    8112  // Found non-null allocation.
    8113  if(nextAlloc1stIndex < suballoc1stCount)
    8114  {
    8115  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8116 
    8117  // 1. Process free space before this allocation.
    8118  if(lastOffset < suballoc.offset)
    8119  {
    8120  // There is free space from lastOffset to suballoc.offset.
    8121  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8122  inoutStats.unusedSize += unusedRangeSize;
    8123  ++inoutStats.unusedRangeCount;
    8124  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8125  }
    8126 
    8127  // 2. Process this allocation.
    8128  // There is allocation with suballoc.offset, suballoc.size.
    8129  ++inoutStats.allocationCount;
    8130 
    8131  // 3. Prepare for next iteration.
    8132  lastOffset = suballoc.offset + suballoc.size;
    8133  ++nextAlloc1stIndex;
    8134  }
    8135  // We are at the end.
    8136  else
    8137  {
    8138  if(lastOffset < freeSpace1stTo2ndEnd)
    8139  {
    8140  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8141  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8142  inoutStats.unusedSize += unusedRangeSize;
    8143  ++inoutStats.unusedRangeCount;
    8144  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8145  }
    8146 
    8147  // End of loop.
    8148  lastOffset = freeSpace1stTo2ndEnd;
    8149  }
    8150  }
    8151 
    8152  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8153  {
    8154  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8155  while(lastOffset < size)
    8156  {
    8157  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8158  while(nextAlloc2ndIndex != SIZE_MAX &&
    8159  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8160  {
    8161  --nextAlloc2ndIndex;
    8162  }
    8163 
    8164  // Found non-null allocation.
    8165  if(nextAlloc2ndIndex != SIZE_MAX)
    8166  {
    8167  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8168 
    8169  // 1. Process free space before this allocation.
    8170  if(lastOffset < suballoc.offset)
    8171  {
    8172  // There is free space from lastOffset to suballoc.offset.
    8173  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8174  inoutStats.unusedSize += unusedRangeSize;
    8175  ++inoutStats.unusedRangeCount;
    8176  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8177  }
    8178 
    8179  // 2. Process this allocation.
    8180  // There is allocation with suballoc.offset, suballoc.size.
    8181  ++inoutStats.allocationCount;
    8182 
    8183  // 3. Prepare for next iteration.
    8184  lastOffset = suballoc.offset + suballoc.size;
    8185  --nextAlloc2ndIndex;
    8186  }
    8187  // We are at the end.
    8188  else
    8189  {
    8190  if(lastOffset < size)
    8191  {
    8192  // There is free space from lastOffset to size.
    8193  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8194  inoutStats.unusedSize += unusedRangeSize;
    8195  ++inoutStats.unusedRangeCount;
    8196  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8197  }
    8198 
    8199  // End of loop.
    8200  lastOffset = size;
    8201  }
    8202  }
    8203  }
    8204 }
    8205 
    8206 #if VMA_STATS_STRING_ENABLED
    8207 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8208 {
    8209  const VkDeviceSize size = GetSize();
    8210  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8211  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8212  const size_t suballoc1stCount = suballocations1st.size();
    8213  const size_t suballoc2ndCount = suballocations2nd.size();
    8214 
    8215  // FIRST PASS
    8216 
    8217  size_t unusedRangeCount = 0;
    8218  VkDeviceSize usedBytes = 0;
    8219 
    8220  VkDeviceSize lastOffset = 0;
    8221 
    8222  size_t alloc2ndCount = 0;
    8223  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8224  {
    8225  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8226  size_t nextAlloc2ndIndex = 0;
    8227  while(lastOffset < freeSpace2ndTo1stEnd)
    8228  {
    8229  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8230  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8231  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8232  {
    8233  ++nextAlloc2ndIndex;
    8234  }
    8235 
    8236  // Found non-null allocation.
    8237  if(nextAlloc2ndIndex < suballoc2ndCount)
    8238  {
    8239  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8240 
    8241  // 1. Process free space before this allocation.
    8242  if(lastOffset < suballoc.offset)
    8243  {
    8244  // There is free space from lastOffset to suballoc.offset.
    8245  ++unusedRangeCount;
    8246  }
    8247 
    8248  // 2. Process this allocation.
    8249  // There is allocation with suballoc.offset, suballoc.size.
    8250  ++alloc2ndCount;
    8251  usedBytes += suballoc.size;
    8252 
    8253  // 3. Prepare for next iteration.
    8254  lastOffset = suballoc.offset + suballoc.size;
    8255  ++nextAlloc2ndIndex;
    8256  }
    8257  // We are at the end.
    8258  else
    8259  {
    8260  if(lastOffset < freeSpace2ndTo1stEnd)
    8261  {
    8262  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8263  ++unusedRangeCount;
    8264  }
    8265 
    8266  // End of loop.
    8267  lastOffset = freeSpace2ndTo1stEnd;
    8268  }
    8269  }
    8270  }
    8271 
    8272  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8273  size_t alloc1stCount = 0;
    8274  const VkDeviceSize freeSpace1stTo2ndEnd =
    8275  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8276  while(lastOffset < freeSpace1stTo2ndEnd)
    8277  {
    8278  // Find next non-null allocation or move nextAllocIndex to the end.
    8279  while(nextAlloc1stIndex < suballoc1stCount &&
    8280  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8281  {
    8282  ++nextAlloc1stIndex;
    8283  }
    8284 
    8285  // Found non-null allocation.
    8286  if(nextAlloc1stIndex < suballoc1stCount)
    8287  {
    8288  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8289 
    8290  // 1. Process free space before this allocation.
    8291  if(lastOffset < suballoc.offset)
    8292  {
    8293  // There is free space from lastOffset to suballoc.offset.
    8294  ++unusedRangeCount;
    8295  }
    8296 
    8297  // 2. Process this allocation.
    8298  // There is allocation with suballoc.offset, suballoc.size.
    8299  ++alloc1stCount;
    8300  usedBytes += suballoc.size;
    8301 
    8302  // 3. Prepare for next iteration.
    8303  lastOffset = suballoc.offset + suballoc.size;
    8304  ++nextAlloc1stIndex;
    8305  }
    8306  // We are at the end.
    8307  else
    8308  {
    8309  if(lastOffset < size)
    8310  {
    8311  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8312  ++unusedRangeCount;
    8313  }
    8314 
    8315  // End of loop.
    8316  lastOffset = freeSpace1stTo2ndEnd;
    8317  }
    8318  }
    8319 
    8320  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8321  {
    8322  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8323  while(lastOffset < size)
    8324  {
    8325  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8326  while(nextAlloc2ndIndex != SIZE_MAX &&
    8327  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8328  {
    8329  --nextAlloc2ndIndex;
    8330  }
    8331 
    8332  // Found non-null allocation.
    8333  if(nextAlloc2ndIndex != SIZE_MAX)
    8334  {
    8335  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8336 
    8337  // 1. Process free space before this allocation.
    8338  if(lastOffset < suballoc.offset)
    8339  {
    8340  // There is free space from lastOffset to suballoc.offset.
    8341  ++unusedRangeCount;
    8342  }
    8343 
    8344  // 2. Process this allocation.
    8345  // There is allocation with suballoc.offset, suballoc.size.
    8346  ++alloc2ndCount;
    8347  usedBytes += suballoc.size;
    8348 
    8349  // 3. Prepare for next iteration.
    8350  lastOffset = suballoc.offset + suballoc.size;
    8351  --nextAlloc2ndIndex;
    8352  }
    8353  // We are at the end.
    8354  else
    8355  {
    8356  if(lastOffset < size)
    8357  {
    8358  // There is free space from lastOffset to size.
    8359  ++unusedRangeCount;
    8360  }
    8361 
    8362  // End of loop.
    8363  lastOffset = size;
    8364  }
    8365  }
    8366  }
    8367 
    8368  const VkDeviceSize unusedBytes = size - usedBytes;
    8369  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8370 
    8371  // SECOND PASS
    8372  lastOffset = 0;
    8373 
    8374  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8375  {
    8376  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8377  size_t nextAlloc2ndIndex = 0;
    8378  while(lastOffset < freeSpace2ndTo1stEnd)
    8379  {
    8380  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8381  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8382  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8383  {
    8384  ++nextAlloc2ndIndex;
    8385  }
    8386 
    8387  // Found non-null allocation.
    8388  if(nextAlloc2ndIndex < suballoc2ndCount)
    8389  {
    8390  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8391 
    8392  // 1. Process free space before this allocation.
    8393  if(lastOffset < suballoc.offset)
    8394  {
    8395  // There is free space from lastOffset to suballoc.offset.
    8396  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8397  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8398  }
    8399 
    8400  // 2. Process this allocation.
    8401  // There is allocation with suballoc.offset, suballoc.size.
    8402  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8403 
    8404  // 3. Prepare for next iteration.
    8405  lastOffset = suballoc.offset + suballoc.size;
    8406  ++nextAlloc2ndIndex;
    8407  }
    8408  // We are at the end.
    8409  else
    8410  {
    8411  if(lastOffset < freeSpace2ndTo1stEnd)
    8412  {
    8413  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8414  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8415  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8416  }
    8417 
    8418  // End of loop.
    8419  lastOffset = freeSpace2ndTo1stEnd;
    8420  }
    8421  }
    8422  }
    8423 
    8424  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8425  while(lastOffset < freeSpace1stTo2ndEnd)
    8426  {
    8427  // Find next non-null allocation or move nextAllocIndex to the end.
    8428  while(nextAlloc1stIndex < suballoc1stCount &&
    8429  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8430  {
    8431  ++nextAlloc1stIndex;
    8432  }
    8433 
    8434  // Found non-null allocation.
    8435  if(nextAlloc1stIndex < suballoc1stCount)
    8436  {
    8437  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8438 
    8439  // 1. Process free space before this allocation.
    8440  if(lastOffset < suballoc.offset)
    8441  {
    8442  // There is free space from lastOffset to suballoc.offset.
    8443  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8444  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8445  }
    8446 
    8447  // 2. Process this allocation.
    8448  // There is allocation with suballoc.offset, suballoc.size.
    8449  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8450 
    8451  // 3. Prepare for next iteration.
    8452  lastOffset = suballoc.offset + suballoc.size;
    8453  ++nextAlloc1stIndex;
    8454  }
    8455  // We are at the end.
    8456  else
    8457  {
    8458  if(lastOffset < freeSpace1stTo2ndEnd)
    8459  {
    8460  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8461  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8462  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8463  }
    8464 
    8465  // End of loop.
    8466  lastOffset = freeSpace1stTo2ndEnd;
    8467  }
    8468  }
    8469 
    8470  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8471  {
    8472  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8473  while(lastOffset < size)
    8474  {
    8475  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8476  while(nextAlloc2ndIndex != SIZE_MAX &&
    8477  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8478  {
    8479  --nextAlloc2ndIndex;
    8480  }
    8481 
    8482  // Found non-null allocation.
    8483  if(nextAlloc2ndIndex != SIZE_MAX)
    8484  {
    8485  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8486 
    8487  // 1. Process free space before this allocation.
    8488  if(lastOffset < suballoc.offset)
    8489  {
    8490  // There is free space from lastOffset to suballoc.offset.
    8491  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8492  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8493  }
    8494 
    8495  // 2. Process this allocation.
    8496  // There is allocation with suballoc.offset, suballoc.size.
    8497  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8498 
    8499  // 3. Prepare for next iteration.
    8500  lastOffset = suballoc.offset + suballoc.size;
    8501  --nextAlloc2ndIndex;
    8502  }
    8503  // We are at the end.
    8504  else
    8505  {
    8506  if(lastOffset < size)
    8507  {
    8508  // There is free space from lastOffset to size.
    8509  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8510  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8511  }
    8512 
    8513  // End of loop.
    8514  lastOffset = size;
    8515  }
    8516  }
    8517  }
    8518 
    8519  PrintDetailedMap_End(json);
    8520 }
    8521 #endif // #if VMA_STATS_STRING_ENABLED
    8522 
    8523 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8524  uint32_t currentFrameIndex,
    8525  uint32_t frameInUseCount,
    8526  VkDeviceSize bufferImageGranularity,
    8527  VkDeviceSize allocSize,
    8528  VkDeviceSize allocAlignment,
    8529  bool upperAddress,
    8530  VmaSuballocationType allocType,
    8531  bool canMakeOtherLost,
    8532  uint32_t strategy,
    8533  VmaAllocationRequest* pAllocationRequest)
    8534 {
    8535  VMA_ASSERT(allocSize > 0);
    8536  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8537  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8538  VMA_HEAVY_ASSERT(Validate());
    8539 
    8540  const VkDeviceSize size = GetSize();
    8541  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8542  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8543 
    8544  if(upperAddress)
    8545  {
    8546  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8547  {
    8548  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8549  return false;
    8550  }
    8551 
    8552  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8553  if(allocSize > size)
    8554  {
    8555  return false;
    8556  }
    8557  VkDeviceSize resultBaseOffset = size - allocSize;
    8558  if(!suballocations2nd.empty())
    8559  {
    8560  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8561  resultBaseOffset = lastSuballoc.offset - allocSize;
    8562  if(allocSize > lastSuballoc.offset)
    8563  {
    8564  return false;
    8565  }
    8566  }
    8567 
    8568  // Start from offset equal to end of free space.
    8569  VkDeviceSize resultOffset = resultBaseOffset;
    8570 
    8571  // Apply VMA_DEBUG_MARGIN at the end.
    8572  if(VMA_DEBUG_MARGIN > 0)
    8573  {
    8574  if(resultOffset < VMA_DEBUG_MARGIN)
    8575  {
    8576  return false;
    8577  }
    8578  resultOffset -= VMA_DEBUG_MARGIN;
    8579  }
    8580 
    8581  // Apply alignment.
    8582  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8583 
    8584  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8585  // Make bigger alignment if necessary.
    8586  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8587  {
    8588  bool bufferImageGranularityConflict = false;
    8589  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8590  {
    8591  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8592  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8593  {
    8594  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8595  {
    8596  bufferImageGranularityConflict = true;
    8597  break;
    8598  }
    8599  }
    8600  else
    8601  // Already on previous page.
    8602  break;
    8603  }
    8604  if(bufferImageGranularityConflict)
    8605  {
    8606  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8607  }
    8608  }
    8609 
    8610  // There is enough free space.
    8611  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8612  suballocations1st.back().offset + suballocations1st.back().size :
    8613  0;
    8614  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8615  {
    8616  // Check previous suballocations for BufferImageGranularity conflicts.
    8617  // If conflict exists, allocation cannot be made here.
    8618  if(bufferImageGranularity > 1)
    8619  {
    8620  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8621  {
    8622  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8623  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8624  {
    8625  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8626  {
    8627  return false;
    8628  }
    8629  }
    8630  else
    8631  {
    8632  // Already on next page.
    8633  break;
    8634  }
    8635  }
    8636  }
    8637 
    8638  // All tests passed: Success.
    8639  pAllocationRequest->offset = resultOffset;
    8640  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8641  pAllocationRequest->sumItemSize = 0;
    8642  // pAllocationRequest->item unused.
    8643  pAllocationRequest->itemsToMakeLostCount = 0;
    8644  return true;
    8645  }
    8646  }
    8647  else // !upperAddress
    8648  {
    8649  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8650  {
    8651  // Try to allocate at the end of 1st vector.
    8652 
    8653  VkDeviceSize resultBaseOffset = 0;
    8654  if(!suballocations1st.empty())
    8655  {
    8656  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8657  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8658  }
    8659 
    8660  // Start from offset equal to beginning of free space.
    8661  VkDeviceSize resultOffset = resultBaseOffset;
    8662 
    8663  // Apply VMA_DEBUG_MARGIN at the beginning.
    8664  if(VMA_DEBUG_MARGIN > 0)
    8665  {
    8666  resultOffset += VMA_DEBUG_MARGIN;
    8667  }
    8668 
    8669  // Apply alignment.
    8670  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8671 
    8672  // Check previous suballocations for BufferImageGranularity conflicts.
    8673  // Make bigger alignment if necessary.
    8674  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8675  {
    8676  bool bufferImageGranularityConflict = false;
    8677  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8678  {
    8679  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8680  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8681  {
    8682  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8683  {
    8684  bufferImageGranularityConflict = true;
    8685  break;
    8686  }
    8687  }
    8688  else
    8689  // Already on previous page.
    8690  break;
    8691  }
    8692  if(bufferImageGranularityConflict)
    8693  {
    8694  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8695  }
    8696  }
    8697 
    8698  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8699  suballocations2nd.back().offset : size;
    8700 
    8701  // There is enough free space at the end after alignment.
    8702  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8703  {
    8704  // Check next suballocations for BufferImageGranularity conflicts.
    8705  // If conflict exists, allocation cannot be made here.
    8706  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8707  {
    8708  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8709  {
    8710  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8711  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8712  {
    8713  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8714  {
    8715  return false;
    8716  }
    8717  }
    8718  else
    8719  {
    8720  // Already on previous page.
    8721  break;
    8722  }
    8723  }
    8724  }
    8725 
    8726  // All tests passed: Success.
    8727  pAllocationRequest->offset = resultOffset;
    8728  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8729  pAllocationRequest->sumItemSize = 0;
    8730  // pAllocationRequest->item unused.
    8731  pAllocationRequest->itemsToMakeLostCount = 0;
    8732  return true;
    8733  }
    8734  }
    8735 
    8736  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8737  // beginning of 1st vector as the end of free space.
    8738  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8739  {
    8740  VMA_ASSERT(!suballocations1st.empty());
    8741 
    8742  VkDeviceSize resultBaseOffset = 0;
    8743  if(!suballocations2nd.empty())
    8744  {
    8745  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8746  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8747  }
    8748 
    8749  // Start from offset equal to beginning of free space.
    8750  VkDeviceSize resultOffset = resultBaseOffset;
    8751 
    8752  // Apply VMA_DEBUG_MARGIN at the beginning.
    8753  if(VMA_DEBUG_MARGIN > 0)
    8754  {
    8755  resultOffset += VMA_DEBUG_MARGIN;
    8756  }
    8757 
    8758  // Apply alignment.
    8759  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8760 
    8761  // Check previous suballocations for BufferImageGranularity conflicts.
    8762  // Make bigger alignment if necessary.
    8763  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8764  {
    8765  bool bufferImageGranularityConflict = false;
    8766  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8767  {
    8768  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8769  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8770  {
    8771  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8772  {
    8773  bufferImageGranularityConflict = true;
    8774  break;
    8775  }
    8776  }
    8777  else
    8778  // Already on previous page.
    8779  break;
    8780  }
    8781  if(bufferImageGranularityConflict)
    8782  {
    8783  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8784  }
    8785  }
    8786 
    8787  pAllocationRequest->itemsToMakeLostCount = 0;
    8788  pAllocationRequest->sumItemSize = 0;
    8789  size_t index1st = m_1stNullItemsBeginCount;
    8790 
    8791  if(canMakeOtherLost)
    8792  {
    8793  while(index1st < suballocations1st.size() &&
    8794  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8795  {
    8796  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8797  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8798  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8799  {
    8800  // No problem.
    8801  }
    8802  else
    8803  {
    8804  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8805  if(suballoc.hAllocation->CanBecomeLost() &&
    8806  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8807  {
    8808  ++pAllocationRequest->itemsToMakeLostCount;
    8809  pAllocationRequest->sumItemSize += suballoc.size;
    8810  }
    8811  else
    8812  {
    8813  return false;
    8814  }
    8815  }
    8816  ++index1st;
    8817  }
    8818 
    8819  // Check next suballocations for BufferImageGranularity conflicts.
    8820  // If conflict exists, we must mark more allocations lost or fail.
    8821  if(bufferImageGranularity > 1)
    8822  {
    8823  while(index1st < suballocations1st.size())
    8824  {
    8825  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8826  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8827  {
    8828  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8829  {
    8830  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8831  if(suballoc.hAllocation->CanBecomeLost() &&
    8832  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8833  {
    8834  ++pAllocationRequest->itemsToMakeLostCount;
    8835  pAllocationRequest->sumItemSize += suballoc.size;
    8836  }
    8837  else
    8838  {
    8839  return false;
    8840  }
    8841  }
    8842  }
    8843  else
    8844  {
    8845  // Already on next page.
    8846  break;
    8847  }
    8848  ++index1st;
    8849  }
    8850  }
    8851  }
    8852 
    8853  // There is enough free space at the end after alignment.
    8854  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8855  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8856  {
    8857  // Check next suballocations for BufferImageGranularity conflicts.
    8858  // If conflict exists, allocation cannot be made here.
    8859  if(bufferImageGranularity > 1)
    8860  {
    8861  for(size_t nextSuballocIndex = index1st;
    8862  nextSuballocIndex < suballocations1st.size();
    8863  nextSuballocIndex++)
    8864  {
    8865  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8866  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8867  {
    8868  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8869  {
    8870  return false;
    8871  }
    8872  }
    8873  else
    8874  {
    8875  // Already on next page.
    8876  break;
    8877  }
    8878  }
    8879  }
    8880 
    8881  // All tests passed: Success.
    8882  pAllocationRequest->offset = resultOffset;
    8883  pAllocationRequest->sumFreeSize =
    8884  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8885  - resultBaseOffset
    8886  - pAllocationRequest->sumItemSize;
    8887  // pAllocationRequest->item unused.
    8888  return true;
    8889  }
    8890  }
    8891  }
    8892 
    8893  return false;
    8894 }
    8895 
    8896 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8897  uint32_t currentFrameIndex,
    8898  uint32_t frameInUseCount,
    8899  VmaAllocationRequest* pAllocationRequest)
    8900 {
    8901  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8902  {
    8903  return true;
    8904  }
    8905 
    8906  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8907 
    8908  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8909  size_t index1st = m_1stNullItemsBeginCount;
    8910  size_t madeLostCount = 0;
    8911  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8912  {
    8913  VMA_ASSERT(index1st < suballocations1st.size());
    8914  VmaSuballocation& suballoc = suballocations1st[index1st];
    8915  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8916  {
    8917  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8918  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8919  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8920  {
    8921  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8922  suballoc.hAllocation = VK_NULL_HANDLE;
    8923  m_SumFreeSize += suballoc.size;
    8924  ++m_1stNullItemsMiddleCount;
    8925  ++madeLostCount;
    8926  }
    8927  else
    8928  {
    8929  return false;
    8930  }
    8931  }
    8932  ++index1st;
    8933  }
    8934 
    8935  CleanupAfterFree();
    8936  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8937 
    8938  return true;
    8939 }
    8940 
    8941 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8942 {
    8943  uint32_t lostAllocationCount = 0;
    8944 
    8945  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8946  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8947  {
    8948  VmaSuballocation& suballoc = suballocations1st[i];
    8949  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8950  suballoc.hAllocation->CanBecomeLost() &&
    8951  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8952  {
    8953  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8954  suballoc.hAllocation = VK_NULL_HANDLE;
    8955  ++m_1stNullItemsMiddleCount;
    8956  m_SumFreeSize += suballoc.size;
    8957  ++lostAllocationCount;
    8958  }
    8959  }
    8960 
    8961  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8962  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8963  {
    8964  VmaSuballocation& suballoc = suballocations2nd[i];
    8965  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8966  suballoc.hAllocation->CanBecomeLost() &&
    8967  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8968  {
    8969  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8970  suballoc.hAllocation = VK_NULL_HANDLE;
    8971  ++m_2ndNullItemsCount;
    8972  ++lostAllocationCount;
    8973  }
    8974  }
    8975 
    8976  if(lostAllocationCount)
    8977  {
    8978  CleanupAfterFree();
    8979  }
    8980 
    8981  return lostAllocationCount;
    8982 }
    8983 
    8984 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8985 {
    8986  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8987  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8988  {
    8989  const VmaSuballocation& suballoc = suballocations1st[i];
    8990  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8991  {
    8992  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8993  {
    8994  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8995  return VK_ERROR_VALIDATION_FAILED_EXT;
    8996  }
    8997  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8998  {
    8999  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9000  return VK_ERROR_VALIDATION_FAILED_EXT;
    9001  }
    9002  }
    9003  }
    9004 
    9005  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9006  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9007  {
    9008  const VmaSuballocation& suballoc = suballocations2nd[i];
    9009  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9010  {
    9011  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9012  {
    9013  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9014  return VK_ERROR_VALIDATION_FAILED_EXT;
    9015  }
    9016  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9017  {
    9018  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9019  return VK_ERROR_VALIDATION_FAILED_EXT;
    9020  }
    9021  }
    9022  }
    9023 
    9024  return VK_SUCCESS;
    9025 }
    9026 
    9027 void VmaBlockMetadata_Linear::Alloc(
    9028  const VmaAllocationRequest& request,
    9029  VmaSuballocationType type,
    9030  VkDeviceSize allocSize,
    9031  bool upperAddress,
    9032  VmaAllocation hAllocation)
    9033 {
    9034  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9035 
    9036  if(upperAddress)
    9037  {
    9038  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9039  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9040  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9041  suballocations2nd.push_back(newSuballoc);
    9042  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9043  }
    9044  else
    9045  {
    9046  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9047 
    9048  // First allocation.
    9049  if(suballocations1st.empty())
    9050  {
    9051  suballocations1st.push_back(newSuballoc);
    9052  }
    9053  else
    9054  {
    9055  // New allocation at the end of 1st vector.
    9056  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9057  {
    9058  // Check if it fits before the end of the block.
    9059  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9060  suballocations1st.push_back(newSuballoc);
    9061  }
    9062  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9063  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9064  {
    9065  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9066 
    9067  switch(m_2ndVectorMode)
    9068  {
    9069  case SECOND_VECTOR_EMPTY:
    9070  // First allocation from second part ring buffer.
    9071  VMA_ASSERT(suballocations2nd.empty());
    9072  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9073  break;
    9074  case SECOND_VECTOR_RING_BUFFER:
    9075  // 2-part ring buffer is already started.
    9076  VMA_ASSERT(!suballocations2nd.empty());
    9077  break;
    9078  case SECOND_VECTOR_DOUBLE_STACK:
    9079  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9080  break;
    9081  default:
    9082  VMA_ASSERT(0);
    9083  }
    9084 
    9085  suballocations2nd.push_back(newSuballoc);
    9086  }
    9087  else
    9088  {
    9089  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9090  }
    9091  }
    9092  }
    9093 
    9094  m_SumFreeSize -= newSuballoc.size;
    9095 }
    9096 
    9097 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9098 {
    9099  FreeAtOffset(allocation->GetOffset());
    9100 }
    9101 
    9102 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9103 {
    9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9105  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9106 
    9107  if(!suballocations1st.empty())
    9108  {
    9109  // First allocation: Mark it as next empty at the beginning.
    9110  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9111  if(firstSuballoc.offset == offset)
    9112  {
    9113  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9114  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9115  m_SumFreeSize += firstSuballoc.size;
    9116  ++m_1stNullItemsBeginCount;
    9117  CleanupAfterFree();
    9118  return;
    9119  }
    9120  }
    9121 
    9122  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9123  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9124  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9125  {
    9126  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9127  if(lastSuballoc.offset == offset)
    9128  {
    9129  m_SumFreeSize += lastSuballoc.size;
    9130  suballocations2nd.pop_back();
    9131  CleanupAfterFree();
    9132  return;
    9133  }
    9134  }
    9135  // Last allocation in 1st vector.
    9136  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9137  {
    9138  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9139  if(lastSuballoc.offset == offset)
    9140  {
    9141  m_SumFreeSize += lastSuballoc.size;
    9142  suballocations1st.pop_back();
    9143  CleanupAfterFree();
    9144  return;
    9145  }
    9146  }
    9147 
    9148  // Item from the middle of 1st vector.
    9149  {
    9150  VmaSuballocation refSuballoc;
    9151  refSuballoc.offset = offset;
    9152  // Rest of members stays uninitialized intentionally for better performance.
    9153  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9154  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9155  suballocations1st.end(),
    9156  refSuballoc);
    9157  if(it != suballocations1st.end())
    9158  {
    9159  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9160  it->hAllocation = VK_NULL_HANDLE;
    9161  ++m_1stNullItemsMiddleCount;
    9162  m_SumFreeSize += it->size;
    9163  CleanupAfterFree();
    9164  return;
    9165  }
    9166  }
    9167 
    9168  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9169  {
    9170  // Item from the middle of 2nd vector.
    9171  VmaSuballocation refSuballoc;
    9172  refSuballoc.offset = offset;
    9173  // Rest of members stays uninitialized intentionally for better performance.
    9174  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9175  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9176  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9177  if(it != suballocations2nd.end())
    9178  {
    9179  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9180  it->hAllocation = VK_NULL_HANDLE;
    9181  ++m_2ndNullItemsCount;
    9182  m_SumFreeSize += it->size;
    9183  CleanupAfterFree();
    9184  return;
    9185  }
    9186  }
    9187 
    9188  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9189 }
    9190 
    9191 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9192 {
    9193  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9194  const size_t suballocCount = AccessSuballocations1st().size();
    9195  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9196 }
    9197 
    9198 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9199 {
    9200  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9202 
    9203  if(IsEmpty())
    9204  {
    9205  suballocations1st.clear();
    9206  suballocations2nd.clear();
    9207  m_1stNullItemsBeginCount = 0;
    9208  m_1stNullItemsMiddleCount = 0;
    9209  m_2ndNullItemsCount = 0;
    9210  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9211  }
    9212  else
    9213  {
    9214  const size_t suballoc1stCount = suballocations1st.size();
    9215  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9216  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9217 
    9218  // Find more null items at the beginning of 1st vector.
    9219  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9220  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9221  {
    9222  ++m_1stNullItemsBeginCount;
    9223  --m_1stNullItemsMiddleCount;
    9224  }
    9225 
    9226  // Find more null items at the end of 1st vector.
    9227  while(m_1stNullItemsMiddleCount > 0 &&
    9228  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9229  {
    9230  --m_1stNullItemsMiddleCount;
    9231  suballocations1st.pop_back();
    9232  }
    9233 
    9234  // Find more null items at the end of 2nd vector.
    9235  while(m_2ndNullItemsCount > 0 &&
    9236  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9237  {
    9238  --m_2ndNullItemsCount;
    9239  suballocations2nd.pop_back();
    9240  }
    9241 
    9242  if(ShouldCompact1st())
    9243  {
    9244  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9245  size_t srcIndex = m_1stNullItemsBeginCount;
    9246  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9247  {
    9248  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9249  {
    9250  ++srcIndex;
    9251  }
    9252  if(dstIndex != srcIndex)
    9253  {
    9254  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9255  }
    9256  ++srcIndex;
    9257  }
    9258  suballocations1st.resize(nonNullItemCount);
    9259  m_1stNullItemsBeginCount = 0;
    9260  m_1stNullItemsMiddleCount = 0;
    9261  }
    9262 
    9263  // 2nd vector became empty.
    9264  if(suballocations2nd.empty())
    9265  {
    9266  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9267  }
    9268 
    9269  // 1st vector became empty.
    9270  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9271  {
    9272  suballocations1st.clear();
    9273  m_1stNullItemsBeginCount = 0;
    9274 
    9275  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9276  {
    9277  // Swap 1st with 2nd. Now 2nd is empty.
    9278  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9279  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9280  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9281  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9282  {
    9283  ++m_1stNullItemsBeginCount;
    9284  --m_1stNullItemsMiddleCount;
    9285  }
    9286  m_2ndNullItemsCount = 0;
    9287  m_1stVectorIndex ^= 1;
    9288  }
    9289  }
    9290  }
    9291 
    9292  VMA_HEAVY_ASSERT(Validate());
    9293 }
    9294 
    9295 
    9297 // class VmaBlockMetadata_Buddy
    9298 
    9299 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9300  VmaBlockMetadata(hAllocator),
    9301  m_Root(VMA_NULL),
    9302  m_AllocationCount(0),
    9303  m_FreeCount(1),
    9304  m_SumFreeSize(0)
    9305 {
    9306  memset(m_FreeList, 0, sizeof(m_FreeList));
    9307 }
    9308 
    9309 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9310 {
    9311  DeleteNode(m_Root);
    9312 }
    9313 
    9314 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9315 {
    9316  VmaBlockMetadata::Init(size);
    9317 
    9318  m_UsableSize = VmaPrevPow2(size);
    9319  m_SumFreeSize = m_UsableSize;
    9320 
    9321  // Calculate m_LevelCount.
    9322  m_LevelCount = 1;
    9323  while(m_LevelCount < MAX_LEVELS &&
    9324  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9325  {
    9326  ++m_LevelCount;
    9327  }
    9328 
    9329  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9330  rootNode->offset = 0;
    9331  rootNode->type = Node::TYPE_FREE;
    9332  rootNode->parent = VMA_NULL;
    9333  rootNode->buddy = VMA_NULL;
    9334 
    9335  m_Root = rootNode;
    9336  AddToFreeListFront(0, rootNode);
    9337 }
    9338 
    9339 bool VmaBlockMetadata_Buddy::Validate() const
    9340 {
    9341  // Validate tree.
    9342  ValidationContext ctx;
    9343  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9344  {
    9345  VMA_VALIDATE(false && "ValidateNode failed.");
    9346  }
    9347  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9348  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9349 
    9350  // Validate free node lists.
    9351  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9352  {
    9353  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9354  m_FreeList[level].front->free.prev == VMA_NULL);
    9355 
    9356  for(Node* node = m_FreeList[level].front;
    9357  node != VMA_NULL;
    9358  node = node->free.next)
    9359  {
    9360  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9361 
    9362  if(node->free.next == VMA_NULL)
    9363  {
    9364  VMA_VALIDATE(m_FreeList[level].back == node);
    9365  }
    9366  else
    9367  {
    9368  VMA_VALIDATE(node->free.next->free.prev == node);
    9369  }
    9370  }
    9371  }
    9372 
    9373  // Validate that free lists ar higher levels are empty.
    9374  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9375  {
    9376  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9377  }
    9378 
    9379  return true;
    9380 }
    9381 
    9382 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9383 {
    9384  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9385  {
    9386  if(m_FreeList[level].front != VMA_NULL)
    9387  {
    9388  return LevelToNodeSize(level);
    9389  }
    9390  }
    9391  return 0;
    9392 }
    9393 
    9394 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9395 {
    9396  const VkDeviceSize unusableSize = GetUnusableSize();
    9397 
    9398  outInfo.blockCount = 1;
    9399 
    9400  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9401  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9402 
    9403  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9404  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9405  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9406 
    9407  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9408 
    9409  if(unusableSize > 0)
    9410  {
    9411  ++outInfo.unusedRangeCount;
    9412  outInfo.unusedBytes += unusableSize;
    9413  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9414  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9415  }
    9416 }
    9417 
    9418 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9419 {
    9420  const VkDeviceSize unusableSize = GetUnusableSize();
    9421 
    9422  inoutStats.size += GetSize();
    9423  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9424  inoutStats.allocationCount += m_AllocationCount;
    9425  inoutStats.unusedRangeCount += m_FreeCount;
    9426  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9427 
    9428  if(unusableSize > 0)
    9429  {
    9430  ++inoutStats.unusedRangeCount;
    9431  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9432  }
    9433 }
    9434 
    9435 #if VMA_STATS_STRING_ENABLED
    9436 
    9437 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9438 {
    9439  // TODO optimize
    9440  VmaStatInfo stat;
    9441  CalcAllocationStatInfo(stat);
    9442 
    9443  PrintDetailedMap_Begin(
    9444  json,
    9445  stat.unusedBytes,
    9446  stat.allocationCount,
    9447  stat.unusedRangeCount);
    9448 
    9449  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9450 
    9451  const VkDeviceSize unusableSize = GetUnusableSize();
    9452  if(unusableSize > 0)
    9453  {
    9454  PrintDetailedMap_UnusedRange(json,
    9455  m_UsableSize, // offset
    9456  unusableSize); // size
    9457  }
    9458 
    9459  PrintDetailedMap_End(json);
    9460 }
    9461 
    9462 #endif // #if VMA_STATS_STRING_ENABLED
    9463 
    9464 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9465  uint32_t currentFrameIndex,
    9466  uint32_t frameInUseCount,
    9467  VkDeviceSize bufferImageGranularity,
    9468  VkDeviceSize allocSize,
    9469  VkDeviceSize allocAlignment,
    9470  bool upperAddress,
    9471  VmaSuballocationType allocType,
    9472  bool canMakeOtherLost,
    9473  uint32_t strategy,
    9474  VmaAllocationRequest* pAllocationRequest)
    9475 {
    9476  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9477 
    9478  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9479  // Whenever it might be an OPTIMAL image...
    9480  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9481  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9482  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9483  {
    9484  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9485  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9486  }
    9487 
    9488  if(allocSize > m_UsableSize)
    9489  {
    9490  return false;
    9491  }
    9492 
    9493  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9494  for(uint32_t level = targetLevel + 1; level--; )
    9495  {
    9496  for(Node* freeNode = m_FreeList[level].front;
    9497  freeNode != VMA_NULL;
    9498  freeNode = freeNode->free.next)
    9499  {
    9500  if(freeNode->offset % allocAlignment == 0)
    9501  {
    9502  pAllocationRequest->offset = freeNode->offset;
    9503  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9504  pAllocationRequest->sumItemSize = 0;
    9505  pAllocationRequest->itemsToMakeLostCount = 0;
    9506  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9507  return true;
    9508  }
    9509  }
    9510  }
    9511 
    9512  return false;
    9513 }
    9514 
    9515 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9516  uint32_t currentFrameIndex,
    9517  uint32_t frameInUseCount,
    9518  VmaAllocationRequest* pAllocationRequest)
    9519 {
    9520  /*
    9521  Lost allocations are not supported in buddy allocator at the moment.
    9522  Support might be added in the future.
    9523  */
    9524  return pAllocationRequest->itemsToMakeLostCount == 0;
    9525 }
    9526 
    9527 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9528 {
    9529  /*
    9530  Lost allocations are not supported in buddy allocator at the moment.
    9531  Support might be added in the future.
    9532  */
    9533  return 0;
    9534 }
    9535 
    9536 void VmaBlockMetadata_Buddy::Alloc(
    9537  const VmaAllocationRequest& request,
    9538  VmaSuballocationType type,
    9539  VkDeviceSize allocSize,
    9540  bool upperAddress,
    9541  VmaAllocation hAllocation)
    9542 {
    9543  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9544  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9545 
    9546  Node* currNode = m_FreeList[currLevel].front;
    9547  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9548  while(currNode->offset != request.offset)
    9549  {
    9550  currNode = currNode->free.next;
    9551  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9552  }
    9553 
    9554  // Go down, splitting free nodes.
    9555  while(currLevel < targetLevel)
    9556  {
    9557  // currNode is already first free node at currLevel.
    9558  // Remove it from list of free nodes at this currLevel.
    9559  RemoveFromFreeList(currLevel, currNode);
    9560 
    9561  const uint32_t childrenLevel = currLevel + 1;
    9562 
    9563  // Create two free sub-nodes.
    9564  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9565  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9566 
    9567  leftChild->offset = currNode->offset;
    9568  leftChild->type = Node::TYPE_FREE;
    9569  leftChild->parent = currNode;
    9570  leftChild->buddy = rightChild;
    9571 
    9572  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9573  rightChild->type = Node::TYPE_FREE;
    9574  rightChild->parent = currNode;
    9575  rightChild->buddy = leftChild;
    9576 
    9577  // Convert current currNode to split type.
    9578  currNode->type = Node::TYPE_SPLIT;
    9579  currNode->split.leftChild = leftChild;
    9580 
    9581  // Add child nodes to free list. Order is important!
    9582  AddToFreeListFront(childrenLevel, rightChild);
    9583  AddToFreeListFront(childrenLevel, leftChild);
    9584 
    9585  ++m_FreeCount;
    9586  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9587  ++currLevel;
    9588  currNode = m_FreeList[currLevel].front;
    9589 
    9590  /*
    9591  We can be sure that currNode, as left child of node previously split,
    9592  also fullfills the alignment requirement.
    9593  */
    9594  }
    9595 
    9596  // Remove from free list.
    9597  VMA_ASSERT(currLevel == targetLevel &&
    9598  currNode != VMA_NULL &&
    9599  currNode->type == Node::TYPE_FREE);
    9600  RemoveFromFreeList(currLevel, currNode);
    9601 
    9602  // Convert to allocation node.
    9603  currNode->type = Node::TYPE_ALLOCATION;
    9604  currNode->allocation.alloc = hAllocation;
    9605 
    9606  ++m_AllocationCount;
    9607  --m_FreeCount;
    9608  m_SumFreeSize -= allocSize;
    9609 }
    9610 
    9611 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9612 {
    9613  if(node->type == Node::TYPE_SPLIT)
    9614  {
    9615  DeleteNode(node->split.leftChild->buddy);
    9616  DeleteNode(node->split.leftChild);
    9617  }
    9618 
    9619  vma_delete(GetAllocationCallbacks(), node);
    9620 }
    9621 
    9622 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9623 {
    9624  VMA_VALIDATE(level < m_LevelCount);
    9625  VMA_VALIDATE(curr->parent == parent);
    9626  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9627  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9628  switch(curr->type)
    9629  {
    9630  case Node::TYPE_FREE:
    9631  // curr->free.prev, next are validated separately.
    9632  ctx.calculatedSumFreeSize += levelNodeSize;
    9633  ++ctx.calculatedFreeCount;
    9634  break;
    9635  case Node::TYPE_ALLOCATION:
    9636  ++ctx.calculatedAllocationCount;
    9637  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9638  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9639  break;
    9640  case Node::TYPE_SPLIT:
    9641  {
    9642  const uint32_t childrenLevel = level + 1;
    9643  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9644  const Node* const leftChild = curr->split.leftChild;
    9645  VMA_VALIDATE(leftChild != VMA_NULL);
    9646  VMA_VALIDATE(leftChild->offset == curr->offset);
    9647  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9648  {
    9649  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9650  }
    9651  const Node* const rightChild = leftChild->buddy;
    9652  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9653  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9654  {
    9655  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9656  }
    9657  }
    9658  break;
    9659  default:
    9660  return false;
    9661  }
    9662 
    9663  return true;
    9664 }
    9665 
    9666 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9667 {
    9668  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9669  uint32_t level = 0;
    9670  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9671  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9672  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9673  {
    9674  ++level;
    9675  currLevelNodeSize = nextLevelNodeSize;
    9676  nextLevelNodeSize = currLevelNodeSize >> 1;
    9677  }
    9678  return level;
    9679 }
    9680 
    9681 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9682 {
    9683  // Find node and level.
    9684  Node* node = m_Root;
    9685  VkDeviceSize nodeOffset = 0;
    9686  uint32_t level = 0;
    9687  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9688  while(node->type == Node::TYPE_SPLIT)
    9689  {
    9690  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9691  if(offset < nodeOffset + nextLevelSize)
    9692  {
    9693  node = node->split.leftChild;
    9694  }
    9695  else
    9696  {
    9697  node = node->split.leftChild->buddy;
    9698  nodeOffset += nextLevelSize;
    9699  }
    9700  ++level;
    9701  levelNodeSize = nextLevelSize;
    9702  }
    9703 
    9704  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9705  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9706 
    9707  ++m_FreeCount;
    9708  --m_AllocationCount;
    9709  m_SumFreeSize += alloc->GetSize();
    9710 
    9711  node->type = Node::TYPE_FREE;
    9712 
    9713  // Join free nodes if possible.
    9714  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9715  {
    9716  RemoveFromFreeList(level, node->buddy);
    9717  Node* const parent = node->parent;
    9718 
    9719  vma_delete(GetAllocationCallbacks(), node->buddy);
    9720  vma_delete(GetAllocationCallbacks(), node);
    9721  parent->type = Node::TYPE_FREE;
    9722 
    9723  node = parent;
    9724  --level;
    9725  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9726  --m_FreeCount;
    9727  }
    9728 
    9729  AddToFreeListFront(level, node);
    9730 }
    9731 
    9732 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9733 {
    9734  switch(node->type)
    9735  {
    9736  case Node::TYPE_FREE:
    9737  ++outInfo.unusedRangeCount;
    9738  outInfo.unusedBytes += levelNodeSize;
    9739  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9740  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9741  break;
    9742  case Node::TYPE_ALLOCATION:
    9743  {
    9744  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9745  ++outInfo.allocationCount;
    9746  outInfo.usedBytes += allocSize;
    9747  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9748  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9749 
    9750  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9751  if(unusedRangeSize > 0)
    9752  {
    9753  ++outInfo.unusedRangeCount;
    9754  outInfo.unusedBytes += unusedRangeSize;
    9755  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9756  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9757  }
    9758  }
    9759  break;
    9760  case Node::TYPE_SPLIT:
    9761  {
    9762  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9763  const Node* const leftChild = node->split.leftChild;
    9764  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9765  const Node* const rightChild = leftChild->buddy;
    9766  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9767  }
    9768  break;
    9769  default:
    9770  VMA_ASSERT(0);
    9771  }
    9772 }
    9773 
    9774 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9775 {
    9776  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9777 
    9778  // List is empty.
    9779  Node* const frontNode = m_FreeList[level].front;
    9780  if(frontNode == VMA_NULL)
    9781  {
    9782  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9783  node->free.prev = node->free.next = VMA_NULL;
    9784  m_FreeList[level].front = m_FreeList[level].back = node;
    9785  }
    9786  else
    9787  {
    9788  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9789  node->free.prev = VMA_NULL;
    9790  node->free.next = frontNode;
    9791  frontNode->free.prev = node;
    9792  m_FreeList[level].front = node;
    9793  }
    9794 }
    9795 
    9796 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9797 {
    9798  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9799 
    9800  // It is at the front.
    9801  if(node->free.prev == VMA_NULL)
    9802  {
    9803  VMA_ASSERT(m_FreeList[level].front == node);
    9804  m_FreeList[level].front = node->free.next;
    9805  }
    9806  else
    9807  {
    9808  Node* const prevFreeNode = node->free.prev;
    9809  VMA_ASSERT(prevFreeNode->free.next == node);
    9810  prevFreeNode->free.next = node->free.next;
    9811  }
    9812 
    9813  // It is at the back.
    9814  if(node->free.next == VMA_NULL)
    9815  {
    9816  VMA_ASSERT(m_FreeList[level].back == node);
    9817  m_FreeList[level].back = node->free.prev;
    9818  }
    9819  else
    9820  {
    9821  Node* const nextFreeNode = node->free.next;
    9822  VMA_ASSERT(nextFreeNode->free.prev == node);
    9823  nextFreeNode->free.prev = node->free.prev;
    9824  }
    9825 }
    9826 
    9827 #if VMA_STATS_STRING_ENABLED
    9828 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9829 {
    9830  switch(node->type)
    9831  {
    9832  case Node::TYPE_FREE:
    9833  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9834  break;
    9835  case Node::TYPE_ALLOCATION:
    9836  {
    9837  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9838  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9839  if(allocSize < levelNodeSize)
    9840  {
    9841  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9842  }
    9843  }
    9844  break;
    9845  case Node::TYPE_SPLIT:
    9846  {
    9847  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9848  const Node* const leftChild = node->split.leftChild;
    9849  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9850  const Node* const rightChild = leftChild->buddy;
    9851  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9852  }
    9853  break;
    9854  default:
    9855  VMA_ASSERT(0);
    9856  }
    9857 }
    9858 #endif // #if VMA_STATS_STRING_ENABLED
    9859 
    9860 
    9862 // class VmaDeviceMemoryBlock
    9863 
    9864 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9865  m_pMetadata(VMA_NULL),
    9866  m_MemoryTypeIndex(UINT32_MAX),
    9867  m_Id(0),
    9868  m_hMemory(VK_NULL_HANDLE),
    9869  m_MapCount(0),
    9870  m_pMappedData(VMA_NULL)
    9871 {
    9872 }
    9873 
    9874 void VmaDeviceMemoryBlock::Init(
    9875  VmaAllocator hAllocator,
    9876  uint32_t newMemoryTypeIndex,
    9877  VkDeviceMemory newMemory,
    9878  VkDeviceSize newSize,
    9879  uint32_t id,
    9880  uint32_t algorithm)
    9881 {
    9882  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9883 
    9884  m_MemoryTypeIndex = newMemoryTypeIndex;
    9885  m_Id = id;
    9886  m_hMemory = newMemory;
    9887 
    9888  switch(algorithm)
    9889  {
    9891  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9892  break;
    9894  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9895  break;
    9896  default:
    9897  VMA_ASSERT(0);
    9898  // Fall-through.
    9899  case 0:
    9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9901  }
    9902  m_pMetadata->Init(newSize);
    9903 }
    9904 
    9905 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9906 {
    9907  // This is the most important assert in the entire library.
    9908  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9909  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9910 
    9911  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9912  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9913  m_hMemory = VK_NULL_HANDLE;
    9914 
    9915  vma_delete(allocator, m_pMetadata);
    9916  m_pMetadata = VMA_NULL;
    9917 }
    9918 
    9919 bool VmaDeviceMemoryBlock::Validate() const
    9920 {
    9921  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9922  (m_pMetadata->GetSize() != 0));
    9923 
    9924  return m_pMetadata->Validate();
    9925 }
    9926 
    9927 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9928 {
    9929  void* pData = nullptr;
    9930  VkResult res = Map(hAllocator, 1, &pData);
    9931  if(res != VK_SUCCESS)
    9932  {
    9933  return res;
    9934  }
    9935 
    9936  res = m_pMetadata->CheckCorruption(pData);
    9937 
    9938  Unmap(hAllocator, 1);
    9939 
    9940  return res;
    9941 }
    9942 
    9943 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9944 {
    9945  if(count == 0)
    9946  {
    9947  return VK_SUCCESS;
    9948  }
    9949 
    9950  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9951  if(m_MapCount != 0)
    9952  {
    9953  m_MapCount += count;
    9954  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9955  if(ppData != VMA_NULL)
    9956  {
    9957  *ppData = m_pMappedData;
    9958  }
    9959  return VK_SUCCESS;
    9960  }
    9961  else
    9962  {
    9963  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9964  hAllocator->m_hDevice,
    9965  m_hMemory,
    9966  0, // offset
    9967  VK_WHOLE_SIZE,
    9968  0, // flags
    9969  &m_pMappedData);
    9970  if(result == VK_SUCCESS)
    9971  {
    9972  if(ppData != VMA_NULL)
    9973  {
    9974  *ppData = m_pMappedData;
    9975  }
    9976  m_MapCount = count;
    9977  }
    9978  return result;
    9979  }
    9980 }
    9981 
    9982 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9983 {
    9984  if(count == 0)
    9985  {
    9986  return;
    9987  }
    9988 
    9989  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9990  if(m_MapCount >= count)
    9991  {
    9992  m_MapCount -= count;
    9993  if(m_MapCount == 0)
    9994  {
    9995  m_pMappedData = VMA_NULL;
    9996  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9997  }
    9998  }
    9999  else
    10000  {
    10001  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10002  }
    10003 }
    10004 
    10005 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10006 {
    10007  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10008  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10009 
    10010  void* pData;
    10011  VkResult res = Map(hAllocator, 1, &pData);
    10012  if(res != VK_SUCCESS)
    10013  {
    10014  return res;
    10015  }
    10016 
    10017  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10018  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10019 
    10020  Unmap(hAllocator, 1);
    10021 
    10022  return VK_SUCCESS;
    10023 }
    10024 
    10025 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10026 {
    10027  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10028  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10029 
    10030  void* pData;
    10031  VkResult res = Map(hAllocator, 1, &pData);
    10032  if(res != VK_SUCCESS)
    10033  {
    10034  return res;
    10035  }
    10036 
    10037  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10038  {
    10039  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10040  }
    10041  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10042  {
    10043  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10044  }
    10045 
    10046  Unmap(hAllocator, 1);
    10047 
    10048  return VK_SUCCESS;
    10049 }
    10050 
    10051 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10052  const VmaAllocator hAllocator,
    10053  const VmaAllocation hAllocation,
    10054  VkBuffer hBuffer)
    10055 {
    10056  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10057  hAllocation->GetBlock() == this);
    10058  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10059  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10060  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10061  hAllocator->m_hDevice,
    10062  hBuffer,
    10063  m_hMemory,
    10064  hAllocation->GetOffset());
    10065 }
    10066 
    10067 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10068  const VmaAllocator hAllocator,
    10069  const VmaAllocation hAllocation,
    10070  VkImage hImage)
    10071 {
    10072  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10073  hAllocation->GetBlock() == this);
    10074  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10075  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10076  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10077  hAllocator->m_hDevice,
    10078  hImage,
    10079  m_hMemory,
    10080  hAllocation->GetOffset());
    10081 }
    10082 
    10083 static void InitStatInfo(VmaStatInfo& outInfo)
    10084 {
    10085  memset(&outInfo, 0, sizeof(outInfo));
    10086  outInfo.allocationSizeMin = UINT64_MAX;
    10087  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10088 }
    10089 
    10090 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10091 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10092 {
    10093  inoutInfo.blockCount += srcInfo.blockCount;
    10094  inoutInfo.allocationCount += srcInfo.allocationCount;
    10095  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10096  inoutInfo.usedBytes += srcInfo.usedBytes;
    10097  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10098  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10099  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10100  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10101  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10102 }
    10103 
    10104 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10105 {
    10106  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10107  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10108  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10109  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10110 }
    10111 
    10112 VmaPool_T::VmaPool_T(
    10113  VmaAllocator hAllocator,
    10114  const VmaPoolCreateInfo& createInfo,
    10115  VkDeviceSize preferredBlockSize) :
    10116  m_BlockVector(
    10117  hAllocator,
    10118  createInfo.memoryTypeIndex,
    10119  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10120  createInfo.minBlockCount,
    10121  createInfo.maxBlockCount,
    10122  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10123  createInfo.frameInUseCount,
    10124  true, // isCustomPool
    10125  createInfo.blockSize != 0, // explicitBlockSize
    10126  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10127  m_Id(0)
    10128 {
    10129 }
    10130 
    10131 VmaPool_T::~VmaPool_T()
    10132 {
    10133 }
    10134 
    10135 #if VMA_STATS_STRING_ENABLED
    10136 
    10137 #endif // #if VMA_STATS_STRING_ENABLED
    10138 
    10139 VmaBlockVector::VmaBlockVector(
    10140  VmaAllocator hAllocator,
    10141  uint32_t memoryTypeIndex,
    10142  VkDeviceSize preferredBlockSize,
    10143  size_t minBlockCount,
    10144  size_t maxBlockCount,
    10145  VkDeviceSize bufferImageGranularity,
    10146  uint32_t frameInUseCount,
    10147  bool isCustomPool,
    10148  bool explicitBlockSize,
    10149  uint32_t algorithm) :
    10150  m_hAllocator(hAllocator),
    10151  m_MemoryTypeIndex(memoryTypeIndex),
    10152  m_PreferredBlockSize(preferredBlockSize),
    10153  m_MinBlockCount(minBlockCount),
    10154  m_MaxBlockCount(maxBlockCount),
    10155  m_BufferImageGranularity(bufferImageGranularity),
    10156  m_FrameInUseCount(frameInUseCount),
    10157  m_IsCustomPool(isCustomPool),
    10158  m_ExplicitBlockSize(explicitBlockSize),
    10159  m_Algorithm(algorithm),
    10160  m_HasEmptyBlock(false),
    10161  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10162  m_pDefragmentator(VMA_NULL),
    10163  m_NextBlockId(0)
    10164 {
    10165 }
    10166 
    10167 VmaBlockVector::~VmaBlockVector()
    10168 {
    10169  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10170 
    10171  for(size_t i = m_Blocks.size(); i--; )
    10172  {
    10173  m_Blocks[i]->Destroy(m_hAllocator);
    10174  vma_delete(m_hAllocator, m_Blocks[i]);
    10175  }
    10176 }
    10177 
    10178 VkResult VmaBlockVector::CreateMinBlocks()
    10179 {
    10180  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10181  {
    10182  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10183  if(res != VK_SUCCESS)
    10184  {
    10185  return res;
    10186  }
    10187  }
    10188  return VK_SUCCESS;
    10189 }
    10190 
    10191 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10192 {
    10193  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10194 
    10195  const size_t blockCount = m_Blocks.size();
    10196 
    10197  pStats->size = 0;
    10198  pStats->unusedSize = 0;
    10199  pStats->allocationCount = 0;
    10200  pStats->unusedRangeCount = 0;
    10201  pStats->unusedRangeSizeMax = 0;
    10202  pStats->blockCount = blockCount;
    10203 
    10204  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10205  {
    10206  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10207  VMA_ASSERT(pBlock);
    10208  VMA_HEAVY_ASSERT(pBlock->Validate());
    10209  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10210  }
    10211 }
    10212 
    10213 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10214 {
    10215  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10216  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10217  (VMA_DEBUG_MARGIN > 0) &&
    10218  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10219 }
    10220 
    10221 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10222 
    10223 VkResult VmaBlockVector::Allocate(
    10224  VmaPool hCurrentPool,
    10225  uint32_t currentFrameIndex,
    10226  VkDeviceSize size,
    10227  VkDeviceSize alignment,
    10228  const VmaAllocationCreateInfo& createInfo,
    10229  VmaSuballocationType suballocType,
    10230  VmaAllocation* pAllocation)
    10231 {
    10232  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10233  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10234  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10235  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10236  const bool canCreateNewBlock =
    10237  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10238  (m_Blocks.size() < m_MaxBlockCount);
    10239  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10240 
    10241  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10242  // Which in turn is available only when maxBlockCount = 1.
    10243  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10244  {
    10245  canMakeOtherLost = false;
    10246  }
    10247 
    10248  // Upper address can only be used with linear allocator and within single memory block.
    10249  if(isUpperAddress &&
    10250  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10251  {
    10252  return VK_ERROR_FEATURE_NOT_PRESENT;
    10253  }
    10254 
    10255  // Validate strategy.
    10256  switch(strategy)
    10257  {
    10258  case 0:
    10260  break;
    10264  break;
    10265  default:
    10266  return VK_ERROR_FEATURE_NOT_PRESENT;
    10267  }
    10268 
    10269  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10270  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10271  {
    10272  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10273  }
    10274 
    10275  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10276 
    10277  /*
    10278  Under certain condition, this whole section can be skipped for optimization, so
    10279  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10280  e.g. for custom pools with linear algorithm.
    10281  */
    10282  if(!canMakeOtherLost || canCreateNewBlock)
    10283  {
    10284  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10285  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10287 
    10288  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10289  {
    10290  // Use only last block.
    10291  if(!m_Blocks.empty())
    10292  {
    10293  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10294  VMA_ASSERT(pCurrBlock);
    10295  VkResult res = AllocateFromBlock(
    10296  pCurrBlock,
    10297  hCurrentPool,
    10298  currentFrameIndex,
    10299  size,
    10300  alignment,
    10301  allocFlagsCopy,
    10302  createInfo.pUserData,
    10303  suballocType,
    10304  strategy,
    10305  pAllocation);
    10306  if(res == VK_SUCCESS)
    10307  {
    10308  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10309  return VK_SUCCESS;
    10310  }
    10311  }
    10312  }
    10313  else
    10314  {
    10316  {
    10317  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10318  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10319  {
    10320  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10321  VMA_ASSERT(pCurrBlock);
    10322  VkResult res = AllocateFromBlock(
    10323  pCurrBlock,
    10324  hCurrentPool,
    10325  currentFrameIndex,
    10326  size,
    10327  alignment,
    10328  allocFlagsCopy,
    10329  createInfo.pUserData,
    10330  suballocType,
    10331  strategy,
    10332  pAllocation);
    10333  if(res == VK_SUCCESS)
    10334  {
    10335  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10336  return VK_SUCCESS;
    10337  }
    10338  }
    10339  }
    10340  else // WORST_FIT, FIRST_FIT
    10341  {
    10342  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10343  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10344  {
    10345  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10346  VMA_ASSERT(pCurrBlock);
    10347  VkResult res = AllocateFromBlock(
    10348  pCurrBlock,
    10349  hCurrentPool,
    10350  currentFrameIndex,
    10351  size,
    10352  alignment,
    10353  allocFlagsCopy,
    10354  createInfo.pUserData,
    10355  suballocType,
    10356  strategy,
    10357  pAllocation);
    10358  if(res == VK_SUCCESS)
    10359  {
    10360  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10361  return VK_SUCCESS;
    10362  }
    10363  }
    10364  }
    10365  }
    10366 
    10367  // 2. Try to create new block.
    10368  if(canCreateNewBlock)
    10369  {
    10370  // Calculate optimal size for new block.
    10371  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10372  uint32_t newBlockSizeShift = 0;
    10373  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10374 
    10375  if(!m_ExplicitBlockSize)
    10376  {
    10377  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10378  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10379  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10380  {
    10381  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10382  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10383  {
    10384  newBlockSize = smallerNewBlockSize;
    10385  ++newBlockSizeShift;
    10386  }
    10387  else
    10388  {
    10389  break;
    10390  }
    10391  }
    10392  }
    10393 
    10394  size_t newBlockIndex = 0;
    10395  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10396  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10397  if(!m_ExplicitBlockSize)
    10398  {
    10399  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10400  {
    10401  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10402  if(smallerNewBlockSize >= size)
    10403  {
    10404  newBlockSize = smallerNewBlockSize;
    10405  ++newBlockSizeShift;
    10406  res = CreateBlock(newBlockSize, &newBlockIndex);
    10407  }
    10408  else
    10409  {
    10410  break;
    10411  }
    10412  }
    10413  }
    10414 
    10415  if(res == VK_SUCCESS)
    10416  {
    10417  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10418  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10419 
    10420  res = AllocateFromBlock(
    10421  pBlock,
    10422  hCurrentPool,
    10423  currentFrameIndex,
    10424  size,
    10425  alignment,
    10426  allocFlagsCopy,
    10427  createInfo.pUserData,
    10428  suballocType,
    10429  strategy,
    10430  pAllocation);
    10431  if(res == VK_SUCCESS)
    10432  {
    10433  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10434  return VK_SUCCESS;
    10435  }
    10436  else
    10437  {
    10438  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10439  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10440  }
    10441  }
    10442  }
    10443  }
    10444 
    10445  // 3. Try to allocate from existing blocks with making other allocations lost.
    10446  if(canMakeOtherLost)
    10447  {
    10448  uint32_t tryIndex = 0;
    10449  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10450  {
    10451  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10452  VmaAllocationRequest bestRequest = {};
    10453  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10454 
    10455  // 1. Search existing allocations.
    10457  {
    10458  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10459  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10460  {
    10461  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10462  VMA_ASSERT(pCurrBlock);
    10463  VmaAllocationRequest currRequest = {};
    10464  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10465  currentFrameIndex,
    10466  m_FrameInUseCount,
    10467  m_BufferImageGranularity,
    10468  size,
    10469  alignment,
    10470  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10471  suballocType,
    10472  canMakeOtherLost,
    10473  strategy,
    10474  &currRequest))
    10475  {
    10476  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10477  if(pBestRequestBlock == VMA_NULL ||
    10478  currRequestCost < bestRequestCost)
    10479  {
    10480  pBestRequestBlock = pCurrBlock;
    10481  bestRequest = currRequest;
    10482  bestRequestCost = currRequestCost;
    10483 
    10484  if(bestRequestCost == 0)
    10485  {
    10486  break;
    10487  }
    10488  }
    10489  }
    10490  }
    10491  }
    10492  else // WORST_FIT, FIRST_FIT
    10493  {
    10494  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10495  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10496  {
    10497  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10498  VMA_ASSERT(pCurrBlock);
    10499  VmaAllocationRequest currRequest = {};
    10500  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10501  currentFrameIndex,
    10502  m_FrameInUseCount,
    10503  m_BufferImageGranularity,
    10504  size,
    10505  alignment,
    10506  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10507  suballocType,
    10508  canMakeOtherLost,
    10509  strategy,
    10510  &currRequest))
    10511  {
    10512  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10513  if(pBestRequestBlock == VMA_NULL ||
    10514  currRequestCost < bestRequestCost ||
    10516  {
    10517  pBestRequestBlock = pCurrBlock;
    10518  bestRequest = currRequest;
    10519  bestRequestCost = currRequestCost;
    10520 
    10521  if(bestRequestCost == 0 ||
    10523  {
    10524  break;
    10525  }
    10526  }
    10527  }
    10528  }
    10529  }
    10530 
    10531  if(pBestRequestBlock != VMA_NULL)
    10532  {
    10533  if(mapped)
    10534  {
    10535  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10536  if(res != VK_SUCCESS)
    10537  {
    10538  return res;
    10539  }
    10540  }
    10541 
    10542  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10543  currentFrameIndex,
    10544  m_FrameInUseCount,
    10545  &bestRequest))
    10546  {
    10547  // We no longer have an empty Allocation.
    10548  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10549  {
    10550  m_HasEmptyBlock = false;
    10551  }
    10552  // Allocate from this pBlock.
    10553  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10554  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10555  (*pAllocation)->InitBlockAllocation(
    10556  hCurrentPool,
    10557  pBestRequestBlock,
    10558  bestRequest.offset,
    10559  alignment,
    10560  size,
    10561  suballocType,
    10562  mapped,
    10563  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10564  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10565  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10566  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10567  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10568  {
    10569  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10570  }
    10571  if(IsCorruptionDetectionEnabled())
    10572  {
    10573  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10574  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10575  }
    10576  return VK_SUCCESS;
    10577  }
    10578  // else: Some allocations must have been touched while we are here. Next try.
    10579  }
    10580  else
    10581  {
    10582  // Could not find place in any of the blocks - break outer loop.
    10583  break;
    10584  }
    10585  }
    10586  /* Maximum number of tries exceeded - a very unlike event when many other
    10587  threads are simultaneously touching allocations making it impossible to make
    10588  lost at the same time as we try to allocate. */
    10589  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10590  {
    10591  return VK_ERROR_TOO_MANY_OBJECTS;
    10592  }
    10593  }
    10594 
    10595  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10596 }
    10597 
    10598 void VmaBlockVector::Free(
    10599  VmaAllocation hAllocation)
    10600 {
    10601  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10602 
    10603  // Scope for lock.
    10604  {
    10605  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10606 
    10607  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10608 
    10609  if(IsCorruptionDetectionEnabled())
    10610  {
    10611  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10612  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10613  }
    10614 
    10615  if(hAllocation->IsPersistentMap())
    10616  {
    10617  pBlock->Unmap(m_hAllocator, 1);
    10618  }
    10619 
    10620  pBlock->m_pMetadata->Free(hAllocation);
    10621  VMA_HEAVY_ASSERT(pBlock->Validate());
    10622 
    10623  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10624 
    10625  // pBlock became empty after this deallocation.
    10626  if(pBlock->m_pMetadata->IsEmpty())
    10627  {
    10628  // Already has empty Allocation. We don't want to have two, so delete this one.
    10629  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10630  {
    10631  pBlockToDelete = pBlock;
    10632  Remove(pBlock);
    10633  }
    10634  // We now have first empty block.
    10635  else
    10636  {
    10637  m_HasEmptyBlock = true;
    10638  }
    10639  }
    10640  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10641  // (This is optional, heuristics.)
    10642  else if(m_HasEmptyBlock)
    10643  {
    10644  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10645  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10646  {
    10647  pBlockToDelete = pLastBlock;
    10648  m_Blocks.pop_back();
    10649  m_HasEmptyBlock = false;
    10650  }
    10651  }
    10652 
    10653  IncrementallySortBlocks();
    10654  }
    10655 
    10656  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10657  // lock, for performance reason.
    10658  if(pBlockToDelete != VMA_NULL)
    10659  {
    10660  VMA_DEBUG_LOG(" Deleted empty allocation");
    10661  pBlockToDelete->Destroy(m_hAllocator);
    10662  vma_delete(m_hAllocator, pBlockToDelete);
    10663  }
    10664 }
    10665 
    10666 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10667 {
    10668  VkDeviceSize result = 0;
    10669  for(size_t i = m_Blocks.size(); i--; )
    10670  {
    10671  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10672  if(result >= m_PreferredBlockSize)
    10673  {
    10674  break;
    10675  }
    10676  }
    10677  return result;
    10678 }
    10679 
    10680 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10681 {
    10682  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10683  {
    10684  if(m_Blocks[blockIndex] == pBlock)
    10685  {
    10686  VmaVectorRemove(m_Blocks, blockIndex);
    10687  return;
    10688  }
    10689  }
    10690  VMA_ASSERT(0);
    10691 }
    10692 
    10693 void VmaBlockVector::IncrementallySortBlocks()
    10694 {
    10695  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10696  {
    10697  // Bubble sort only until first swap.
    10698  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10699  {
    10700  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10701  {
    10702  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10703  return;
    10704  }
    10705  }
    10706  }
    10707 }
    10708 
    10709 VkResult VmaBlockVector::AllocateFromBlock(
    10710  VmaDeviceMemoryBlock* pBlock,
    10711  VmaPool hCurrentPool,
    10712  uint32_t currentFrameIndex,
    10713  VkDeviceSize size,
    10714  VkDeviceSize alignment,
    10715  VmaAllocationCreateFlags allocFlags,
    10716  void* pUserData,
    10717  VmaSuballocationType suballocType,
    10718  uint32_t strategy,
    10719  VmaAllocation* pAllocation)
    10720 {
    10721  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10722  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10723  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10724  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10725 
    10726  VmaAllocationRequest currRequest = {};
    10727  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10728  currentFrameIndex,
    10729  m_FrameInUseCount,
    10730  m_BufferImageGranularity,
    10731  size,
    10732  alignment,
    10733  isUpperAddress,
    10734  suballocType,
    10735  false, // canMakeOtherLost
    10736  strategy,
    10737  &currRequest))
    10738  {
    10739  // Allocate from pCurrBlock.
    10740  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10741 
    10742  if(mapped)
    10743  {
    10744  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10745  if(res != VK_SUCCESS)
    10746  {
    10747  return res;
    10748  }
    10749  }
    10750 
    10751  // We no longer have an empty Allocation.
    10752  if(pBlock->m_pMetadata->IsEmpty())
    10753  {
    10754  m_HasEmptyBlock = false;
    10755  }
    10756 
    10757  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10758  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10759  (*pAllocation)->InitBlockAllocation(
    10760  hCurrentPool,
    10761  pBlock,
    10762  currRequest.offset,
    10763  alignment,
    10764  size,
    10765  suballocType,
    10766  mapped,
    10767  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10768  VMA_HEAVY_ASSERT(pBlock->Validate());
    10769  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10770  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10771  {
    10772  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10773  }
    10774  if(IsCorruptionDetectionEnabled())
    10775  {
    10776  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10777  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10778  }
    10779  return VK_SUCCESS;
    10780  }
    10781  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10782 }
    10783 
    10784 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10785 {
    10786  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10787  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10788  allocInfo.allocationSize = blockSize;
    10789  VkDeviceMemory mem = VK_NULL_HANDLE;
    10790  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10791  if(res < 0)
    10792  {
    10793  return res;
    10794  }
    10795 
    10796  // New VkDeviceMemory successfully created.
    10797 
    10798  // Create new Allocation for it.
    10799  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10800  pBlock->Init(
    10801  m_hAllocator,
    10802  m_MemoryTypeIndex,
    10803  mem,
    10804  allocInfo.allocationSize,
    10805  m_NextBlockId++,
    10806  m_Algorithm);
    10807 
    10808  m_Blocks.push_back(pBlock);
    10809  if(pNewBlockIndex != VMA_NULL)
    10810  {
    10811  *pNewBlockIndex = m_Blocks.size() - 1;
    10812  }
    10813 
    10814  return VK_SUCCESS;
    10815 }
    10816 
    10817 #if VMA_STATS_STRING_ENABLED
    10818 
    10819 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10820 {
    10821  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10822 
    10823  json.BeginObject();
    10824 
    10825  if(m_IsCustomPool)
    10826  {
    10827  json.WriteString("MemoryTypeIndex");
    10828  json.WriteNumber(m_MemoryTypeIndex);
    10829 
    10830  json.WriteString("BlockSize");
    10831  json.WriteNumber(m_PreferredBlockSize);
    10832 
    10833  json.WriteString("BlockCount");
    10834  json.BeginObject(true);
    10835  if(m_MinBlockCount > 0)
    10836  {
    10837  json.WriteString("Min");
    10838  json.WriteNumber((uint64_t)m_MinBlockCount);
    10839  }
    10840  if(m_MaxBlockCount < SIZE_MAX)
    10841  {
    10842  json.WriteString("Max");
    10843  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10844  }
    10845  json.WriteString("Cur");
    10846  json.WriteNumber((uint64_t)m_Blocks.size());
    10847  json.EndObject();
    10848 
    10849  if(m_FrameInUseCount > 0)
    10850  {
    10851  json.WriteString("FrameInUseCount");
    10852  json.WriteNumber(m_FrameInUseCount);
    10853  }
    10854 
    10855  if(m_Algorithm != 0)
    10856  {
    10857  json.WriteString("Algorithm");
    10858  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10859  }
    10860  }
    10861  else
    10862  {
    10863  json.WriteString("PreferredBlockSize");
    10864  json.WriteNumber(m_PreferredBlockSize);
    10865  }
    10866 
    10867  json.WriteString("Blocks");
    10868  json.BeginObject();
    10869  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10870  {
    10871  json.BeginString();
    10872  json.ContinueString(m_Blocks[i]->GetId());
    10873  json.EndString();
    10874 
    10875  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10876  }
    10877  json.EndObject();
    10878 
    10879  json.EndObject();
    10880 }
    10881 
    10882 #endif // #if VMA_STATS_STRING_ENABLED
    10883 
    10884 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10885  VmaAllocator hAllocator,
    10886  uint32_t currentFrameIndex)
    10887 {
    10888  if(m_pDefragmentator == VMA_NULL)
    10889  {
    10890  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10891  hAllocator,
    10892  this,
    10893  currentFrameIndex);
    10894  }
    10895 
    10896  return m_pDefragmentator;
    10897 }
    10898 
    10899 VkResult VmaBlockVector::Defragment(
    10900  VmaDefragmentationStats* pDefragmentationStats,
    10901  VkDeviceSize& maxBytesToMove,
    10902  uint32_t& maxAllocationsToMove)
    10903 {
    10904  if(m_pDefragmentator == VMA_NULL)
    10905  {
    10906  return VK_SUCCESS;
    10907  }
    10908 
    10909  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10910 
    10911  // Defragment.
    10912  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10913 
    10914  // Accumulate statistics.
    10915  if(pDefragmentationStats != VMA_NULL)
    10916  {
    10917  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10918  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10919  pDefragmentationStats->bytesMoved += bytesMoved;
    10920  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10921  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10922  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10923  maxBytesToMove -= bytesMoved;
    10924  maxAllocationsToMove -= allocationsMoved;
    10925  }
    10926 
    10927  // Free empty blocks.
    10928  m_HasEmptyBlock = false;
    10929  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10930  {
    10931  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10932  if(pBlock->m_pMetadata->IsEmpty())
    10933  {
    10934  if(m_Blocks.size() > m_MinBlockCount)
    10935  {
    10936  if(pDefragmentationStats != VMA_NULL)
    10937  {
    10938  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10939  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10940  }
    10941 
    10942  VmaVectorRemove(m_Blocks, blockIndex);
    10943  pBlock->Destroy(m_hAllocator);
    10944  vma_delete(m_hAllocator, pBlock);
    10945  }
    10946  else
    10947  {
    10948  m_HasEmptyBlock = true;
    10949  }
    10950  }
    10951  }
    10952 
    10953  return result;
    10954 }
    10955 
    10956 void VmaBlockVector::DestroyDefragmentator()
    10957 {
    10958  if(m_pDefragmentator != VMA_NULL)
    10959  {
    10960  vma_delete(m_hAllocator, m_pDefragmentator);
    10961  m_pDefragmentator = VMA_NULL;
    10962  }
    10963 }
    10964 
    10965 void VmaBlockVector::MakePoolAllocationsLost(
    10966  uint32_t currentFrameIndex,
    10967  size_t* pLostAllocationCount)
    10968 {
    10969  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10970  size_t lostAllocationCount = 0;
    10971  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10972  {
    10973  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10974  VMA_ASSERT(pBlock);
    10975  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10976  }
    10977  if(pLostAllocationCount != VMA_NULL)
    10978  {
    10979  *pLostAllocationCount = lostAllocationCount;
    10980  }
    10981 }
    10982 
    10983 VkResult VmaBlockVector::CheckCorruption()
    10984 {
    10985  if(!IsCorruptionDetectionEnabled())
    10986  {
    10987  return VK_ERROR_FEATURE_NOT_PRESENT;
    10988  }
    10989 
    10990  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10991  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10992  {
    10993  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10994  VMA_ASSERT(pBlock);
    10995  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10996  if(res != VK_SUCCESS)
    10997  {
    10998  return res;
    10999  }
    11000  }
    11001  return VK_SUCCESS;
    11002 }
    11003 
    11004 void VmaBlockVector::AddStats(VmaStats* pStats)
    11005 {
    11006  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11007  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11008 
    11009  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11010 
    11011  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11012  {
    11013  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11014  VMA_ASSERT(pBlock);
    11015  VMA_HEAVY_ASSERT(pBlock->Validate());
    11016  VmaStatInfo allocationStatInfo;
    11017  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11018  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11019  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11020  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11021  }
    11022 }
    11023 
    11025 // VmaDefragmentator members definition
    11026 
    11027 VmaDefragmentator::VmaDefragmentator(
    11028  VmaAllocator hAllocator,
    11029  VmaBlockVector* pBlockVector,
    11030  uint32_t currentFrameIndex) :
    11031  m_hAllocator(hAllocator),
    11032  m_pBlockVector(pBlockVector),
    11033  m_CurrentFrameIndex(currentFrameIndex),
    11034  m_BytesMoved(0),
    11035  m_AllocationsMoved(0),
    11036  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11037  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11038 {
    11039  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11040 }
    11041 
    11042 VmaDefragmentator::~VmaDefragmentator()
    11043 {
    11044  for(size_t i = m_Blocks.size(); i--; )
    11045  {
    11046  vma_delete(m_hAllocator, m_Blocks[i]);
    11047  }
    11048 }
    11049 
    11050 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11051 {
    11052  AllocationInfo allocInfo;
    11053  allocInfo.m_hAllocation = hAlloc;
    11054  allocInfo.m_pChanged = pChanged;
    11055  m_Allocations.push_back(allocInfo);
    11056 }
    11057 
    11058 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11059 {
    11060  // It has already been mapped for defragmentation.
    11061  if(m_pMappedDataForDefragmentation)
    11062  {
    11063  *ppMappedData = m_pMappedDataForDefragmentation;
    11064  return VK_SUCCESS;
    11065  }
    11066 
    11067  // It is originally mapped.
    11068  if(m_pBlock->GetMappedData())
    11069  {
    11070  *ppMappedData = m_pBlock->GetMappedData();
    11071  return VK_SUCCESS;
    11072  }
    11073 
    11074  // Map on first usage.
    11075  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11076  *ppMappedData = m_pMappedDataForDefragmentation;
    11077  return res;
    11078 }
    11079 
    11080 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11081 {
    11082  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11083  {
    11084  m_pBlock->Unmap(hAllocator, 1);
    11085  }
    11086 }
    11087 
    11088 VkResult VmaDefragmentator::DefragmentRound(
    11089  VkDeviceSize maxBytesToMove,
    11090  uint32_t maxAllocationsToMove)
    11091 {
    11092  if(m_Blocks.empty())
    11093  {
    11094  return VK_SUCCESS;
    11095  }
    11096 
    11097  size_t srcBlockIndex = m_Blocks.size() - 1;
    11098  size_t srcAllocIndex = SIZE_MAX;
    11099  for(;;)
    11100  {
    11101  // 1. Find next allocation to move.
    11102  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11103  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11104  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11105  {
    11106  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11107  {
    11108  // Finished: no more allocations to process.
    11109  if(srcBlockIndex == 0)
    11110  {
    11111  return VK_SUCCESS;
    11112  }
    11113  else
    11114  {
    11115  --srcBlockIndex;
    11116  srcAllocIndex = SIZE_MAX;
    11117  }
    11118  }
    11119  else
    11120  {
    11121  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11122  }
    11123  }
    11124 
    11125  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11126  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11127 
    11128  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11129  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11130  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11131  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11132 
    11133  // 2. Try to find new place for this allocation in preceding or current block.
    11134  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11135  {
    11136  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11137  VmaAllocationRequest dstAllocRequest;
    11138  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11139  m_CurrentFrameIndex,
    11140  m_pBlockVector->GetFrameInUseCount(),
    11141  m_pBlockVector->GetBufferImageGranularity(),
    11142  size,
    11143  alignment,
    11144  false, // upperAddress
    11145  suballocType,
    11146  false, // canMakeOtherLost
    11148  &dstAllocRequest) &&
    11149  MoveMakesSense(
    11150  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11151  {
    11152  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11153 
    11154  // Reached limit on number of allocations or bytes to move.
    11155  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11156  (m_BytesMoved + size > maxBytesToMove))
    11157  {
    11158  return VK_INCOMPLETE;
    11159  }
    11160 
    11161  void* pDstMappedData = VMA_NULL;
    11162  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11163  if(res != VK_SUCCESS)
    11164  {
    11165  return res;
    11166  }
    11167 
    11168  void* pSrcMappedData = VMA_NULL;
    11169  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11170  if(res != VK_SUCCESS)
    11171  {
    11172  return res;
    11173  }
    11174 
    11175  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11176  memcpy(
    11177  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11178  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11179  static_cast<size_t>(size));
    11180 
    11181  if(VMA_DEBUG_MARGIN > 0)
    11182  {
    11183  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11184  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11185  }
    11186 
    11187  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11188  dstAllocRequest,
    11189  suballocType,
    11190  size,
    11191  false, // upperAddress
    11192  allocInfo.m_hAllocation);
    11193  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11194 
    11195  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11196 
    11197  if(allocInfo.m_pChanged != VMA_NULL)
    11198  {
    11199  *allocInfo.m_pChanged = VK_TRUE;
    11200  }
    11201 
    11202  ++m_AllocationsMoved;
    11203  m_BytesMoved += size;
    11204 
    11205  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11206 
    11207  break;
    11208  }
    11209  }
    11210 
    11211  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11212 
    11213  if(srcAllocIndex > 0)
    11214  {
    11215  --srcAllocIndex;
    11216  }
    11217  else
    11218  {
    11219  if(srcBlockIndex > 0)
    11220  {
    11221  --srcBlockIndex;
    11222  srcAllocIndex = SIZE_MAX;
    11223  }
    11224  else
    11225  {
    11226  return VK_SUCCESS;
    11227  }
    11228  }
    11229  }
    11230 }
    11231 
    11232 VkResult VmaDefragmentator::Defragment(
    11233  VkDeviceSize maxBytesToMove,
    11234  uint32_t maxAllocationsToMove)
    11235 {
    11236  if(m_Allocations.empty())
    11237  {
    11238  return VK_SUCCESS;
    11239  }
    11240 
    11241  // Create block info for each block.
    11242  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11243  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11244  {
    11245  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11246  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11247  m_Blocks.push_back(pBlockInfo);
    11248  }
    11249 
    11250  // Sort them by m_pBlock pointer value.
    11251  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11252 
    11253  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11254  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11255  {
    11256  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11257  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11258  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11259  {
    11260  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11261  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11262  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11263  {
    11264  (*it)->m_Allocations.push_back(allocInfo);
    11265  }
    11266  else
    11267  {
    11268  VMA_ASSERT(0);
    11269  }
    11270  }
    11271  }
    11272  m_Allocations.clear();
    11273 
    11274  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11275  {
    11276  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11277  pBlockInfo->CalcHasNonMovableAllocations();
    11278  pBlockInfo->SortAllocationsBySizeDescecnding();
    11279  }
    11280 
    11281  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11282  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11283 
    11284  // Execute defragmentation rounds (the main part).
    11285  VkResult result = VK_SUCCESS;
    11286  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11287  {
    11288  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11289  }
    11290 
    11291  // Unmap blocks that were mapped for defragmentation.
    11292  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11293  {
    11294  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11295  }
    11296 
    11297  return result;
    11298 }
    11299 
    11300 bool VmaDefragmentator::MoveMakesSense(
    11301  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11302  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11303 {
    11304  if(dstBlockIndex < srcBlockIndex)
    11305  {
    11306  return true;
    11307  }
    11308  if(dstBlockIndex > srcBlockIndex)
    11309  {
    11310  return false;
    11311  }
    11312  if(dstOffset < srcOffset)
    11313  {
    11314  return true;
    11315  }
    11316  return false;
    11317 }
    11318 
    11320 // VmaRecorder
    11321 
    11322 #if VMA_RECORDING_ENABLED
    11323 
    11324 VmaRecorder::VmaRecorder() :
    11325  m_UseMutex(true),
    11326  m_Flags(0),
    11327  m_File(VMA_NULL),
    11328  m_Freq(INT64_MAX),
    11329  m_StartCounter(INT64_MAX)
    11330 {
    11331 }
    11332 
    11333 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11334 {
    11335  m_UseMutex = useMutex;
    11336  m_Flags = settings.flags;
    11337 
    11338  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11339  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11340 
    11341  // Open file for writing.
    11342  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11343  if(err != 0)
    11344  {
    11345  return VK_ERROR_INITIALIZATION_FAILED;
    11346  }
    11347 
    11348  // Write header.
    11349  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11350  fprintf(m_File, "%s\n", "1,3");
    11351 
    11352  return VK_SUCCESS;
    11353 }
    11354 
    11355 VmaRecorder::~VmaRecorder()
    11356 {
    11357  if(m_File != VMA_NULL)
    11358  {
    11359  fclose(m_File);
    11360  }
    11361 }
    11362 
    11363 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11364 {
    11365  CallParams callParams;
    11366  GetBasicParams(callParams);
    11367 
    11368  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11369  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11370  Flush();
    11371 }
    11372 
    11373 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11374 {
    11375  CallParams callParams;
    11376  GetBasicParams(callParams);
    11377 
    11378  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11379  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11380  Flush();
    11381 }
    11382 
    11383 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11384 {
    11385  CallParams callParams;
    11386  GetBasicParams(callParams);
    11387 
    11388  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11389  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11390  createInfo.memoryTypeIndex,
    11391  createInfo.flags,
    11392  createInfo.blockSize,
    11393  (uint64_t)createInfo.minBlockCount,
    11394  (uint64_t)createInfo.maxBlockCount,
    11395  createInfo.frameInUseCount,
    11396  pool);
    11397  Flush();
    11398 }
    11399 
    11400 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11401 {
    11402  CallParams callParams;
    11403  GetBasicParams(callParams);
    11404 
    11405  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11406  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11407  pool);
    11408  Flush();
    11409 }
    11410 
    11411 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11412  const VkMemoryRequirements& vkMemReq,
    11413  const VmaAllocationCreateInfo& createInfo,
    11414  VmaAllocation allocation)
    11415 {
    11416  CallParams callParams;
    11417  GetBasicParams(callParams);
    11418 
    11419  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11420  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11421  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11422  vkMemReq.size,
    11423  vkMemReq.alignment,
    11424  vkMemReq.memoryTypeBits,
    11425  createInfo.flags,
    11426  createInfo.usage,
    11427  createInfo.requiredFlags,
    11428  createInfo.preferredFlags,
    11429  createInfo.memoryTypeBits,
    11430  createInfo.pool,
    11431  allocation,
    11432  userDataStr.GetString());
    11433  Flush();
    11434 }
    11435 
    11436 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11437  const VkMemoryRequirements& vkMemReq,
    11438  bool requiresDedicatedAllocation,
    11439  bool prefersDedicatedAllocation,
    11440  const VmaAllocationCreateInfo& createInfo,
    11441  VmaAllocation allocation)
    11442 {
    11443  CallParams callParams;
    11444  GetBasicParams(callParams);
    11445 
    11446  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11447  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11448  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11449  vkMemReq.size,
    11450  vkMemReq.alignment,
    11451  vkMemReq.memoryTypeBits,
    11452  requiresDedicatedAllocation ? 1 : 0,
    11453  prefersDedicatedAllocation ? 1 : 0,
    11454  createInfo.flags,
    11455  createInfo.usage,
    11456  createInfo.requiredFlags,
    11457  createInfo.preferredFlags,
    11458  createInfo.memoryTypeBits,
    11459  createInfo.pool,
    11460  allocation,
    11461  userDataStr.GetString());
    11462  Flush();
    11463 }
    11464 
    11465 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11466  const VkMemoryRequirements& vkMemReq,
    11467  bool requiresDedicatedAllocation,
    11468  bool prefersDedicatedAllocation,
    11469  const VmaAllocationCreateInfo& createInfo,
    11470  VmaAllocation allocation)
    11471 {
    11472  CallParams callParams;
    11473  GetBasicParams(callParams);
    11474 
    11475  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11476  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11477  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11478  vkMemReq.size,
    11479  vkMemReq.alignment,
    11480  vkMemReq.memoryTypeBits,
    11481  requiresDedicatedAllocation ? 1 : 0,
    11482  prefersDedicatedAllocation ? 1 : 0,
    11483  createInfo.flags,
    11484  createInfo.usage,
    11485  createInfo.requiredFlags,
    11486  createInfo.preferredFlags,
    11487  createInfo.memoryTypeBits,
    11488  createInfo.pool,
    11489  allocation,
    11490  userDataStr.GetString());
    11491  Flush();
    11492 }
    11493 
    11494 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11495  VmaAllocation allocation)
    11496 {
    11497  CallParams callParams;
    11498  GetBasicParams(callParams);
    11499 
    11500  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11501  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11502  allocation);
    11503  Flush();
    11504 }
    11505 
    11506 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11507  VmaAllocation allocation,
    11508  const void* pUserData)
    11509 {
    11510  CallParams callParams;
    11511  GetBasicParams(callParams);
    11512 
    11513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11514  UserDataString userDataStr(
    11515  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11516  pUserData);
    11517  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11518  allocation,
    11519  userDataStr.GetString());
    11520  Flush();
    11521 }
    11522 
    11523 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11524  VmaAllocation allocation)
    11525 {
    11526  CallParams callParams;
    11527  GetBasicParams(callParams);
    11528 
    11529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11530  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11531  allocation);
    11532  Flush();
    11533 }
    11534 
    11535 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11536  VmaAllocation allocation)
    11537 {
    11538  CallParams callParams;
    11539  GetBasicParams(callParams);
    11540 
    11541  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11542  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11543  allocation);
    11544  Flush();
    11545 }
    11546 
    11547 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11548  VmaAllocation allocation)
    11549 {
    11550  CallParams callParams;
    11551  GetBasicParams(callParams);
    11552 
    11553  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11554  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11555  allocation);
    11556  Flush();
    11557 }
    11558 
    11559 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11560  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11561 {
    11562  CallParams callParams;
    11563  GetBasicParams(callParams);
    11564 
    11565  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11566  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11567  allocation,
    11568  offset,
    11569  size);
    11570  Flush();
    11571 }
    11572 
    11573 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11574  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11575 {
    11576  CallParams callParams;
    11577  GetBasicParams(callParams);
    11578 
    11579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11580  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11581  allocation,
    11582  offset,
    11583  size);
    11584  Flush();
    11585 }
    11586 
    11587 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11588  const VkBufferCreateInfo& bufCreateInfo,
    11589  const VmaAllocationCreateInfo& allocCreateInfo,
    11590  VmaAllocation allocation)
    11591 {
    11592  CallParams callParams;
    11593  GetBasicParams(callParams);
    11594 
    11595  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11596  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11597  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11598  bufCreateInfo.flags,
    11599  bufCreateInfo.size,
    11600  bufCreateInfo.usage,
    11601  bufCreateInfo.sharingMode,
    11602  allocCreateInfo.flags,
    11603  allocCreateInfo.usage,
    11604  allocCreateInfo.requiredFlags,
    11605  allocCreateInfo.preferredFlags,
    11606  allocCreateInfo.memoryTypeBits,
    11607  allocCreateInfo.pool,
    11608  allocation,
    11609  userDataStr.GetString());
    11610  Flush();
    11611 }
    11612 
    11613 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11614  const VkImageCreateInfo& imageCreateInfo,
    11615  const VmaAllocationCreateInfo& allocCreateInfo,
    11616  VmaAllocation allocation)
    11617 {
    11618  CallParams callParams;
    11619  GetBasicParams(callParams);
    11620 
    11621  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11622  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11623  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11624  imageCreateInfo.flags,
    11625  imageCreateInfo.imageType,
    11626  imageCreateInfo.format,
    11627  imageCreateInfo.extent.width,
    11628  imageCreateInfo.extent.height,
    11629  imageCreateInfo.extent.depth,
    11630  imageCreateInfo.mipLevels,
    11631  imageCreateInfo.arrayLayers,
    11632  imageCreateInfo.samples,
    11633  imageCreateInfo.tiling,
    11634  imageCreateInfo.usage,
    11635  imageCreateInfo.sharingMode,
    11636  imageCreateInfo.initialLayout,
    11637  allocCreateInfo.flags,
    11638  allocCreateInfo.usage,
    11639  allocCreateInfo.requiredFlags,
    11640  allocCreateInfo.preferredFlags,
    11641  allocCreateInfo.memoryTypeBits,
    11642  allocCreateInfo.pool,
    11643  allocation,
    11644  userDataStr.GetString());
    11645  Flush();
    11646 }
    11647 
    11648 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11649  VmaAllocation allocation)
    11650 {
    11651  CallParams callParams;
    11652  GetBasicParams(callParams);
    11653 
    11654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11655  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11656  allocation);
    11657  Flush();
    11658 }
    11659 
    11660 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11661  VmaAllocation allocation)
    11662 {
    11663  CallParams callParams;
    11664  GetBasicParams(callParams);
    11665 
    11666  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11667  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11668  allocation);
    11669  Flush();
    11670 }
    11671 
    11672 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11673  VmaAllocation allocation)
    11674 {
    11675  CallParams callParams;
    11676  GetBasicParams(callParams);
    11677 
    11678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11679  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11680  allocation);
    11681  Flush();
    11682 }
    11683 
    11684 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11685  VmaAllocation allocation)
    11686 {
    11687  CallParams callParams;
    11688  GetBasicParams(callParams);
    11689 
    11690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11691  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11692  allocation);
    11693  Flush();
    11694 }
    11695 
    11696 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11697  VmaPool pool)
    11698 {
    11699  CallParams callParams;
    11700  GetBasicParams(callParams);
    11701 
    11702  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11703  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11704  pool);
    11705  Flush();
    11706 }
    11707 
    11708 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11709 {
    11710  if(pUserData != VMA_NULL)
    11711  {
    11712  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11713  {
    11714  m_Str = (const char*)pUserData;
    11715  }
    11716  else
    11717  {
    11718  sprintf_s(m_PtrStr, "%p", pUserData);
    11719  m_Str = m_PtrStr;
    11720  }
    11721  }
    11722  else
    11723  {
    11724  m_Str = "";
    11725  }
    11726 }
    11727 
    11728 void VmaRecorder::WriteConfiguration(
    11729  const VkPhysicalDeviceProperties& devProps,
    11730  const VkPhysicalDeviceMemoryProperties& memProps,
    11731  bool dedicatedAllocationExtensionEnabled)
    11732 {
    11733  fprintf(m_File, "Config,Begin\n");
    11734 
    11735  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11736  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11737  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11738  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11739  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11740  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11741 
    11742  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11743  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11744  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11745 
    11746  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11747  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11748  {
    11749  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11750  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11751  }
    11752  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11753  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11754  {
    11755  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11756  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11757  }
    11758 
    11759  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11760 
    11761  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11762  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11763  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11764  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11765  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11766  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11767  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11768  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11769  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11770 
    11771  fprintf(m_File, "Config,End\n");
    11772 }
    11773 
    11774 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11775 {
    11776  outParams.threadId = GetCurrentThreadId();
    11777 
    11778  LARGE_INTEGER counter;
    11779  QueryPerformanceCounter(&counter);
    11780  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11781 }
    11782 
    11783 void VmaRecorder::Flush()
    11784 {
    11785  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11786  {
    11787  fflush(m_File);
    11788  }
    11789 }
    11790 
    11791 #endif // #if VMA_RECORDING_ENABLED
    11792 
    11794 // VmaAllocator_T
    11795 
    11796 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11797  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11798  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11799  m_hDevice(pCreateInfo->device),
    11800  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11801  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11802  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11803  m_PreferredLargeHeapBlockSize(0),
    11804  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11805  m_CurrentFrameIndex(0),
    11806  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11807  m_NextPoolId(0)
    11809  ,m_pRecorder(VMA_NULL)
    11810 #endif
    11811 {
    11812  if(VMA_DEBUG_DETECT_CORRUPTION)
    11813  {
    11814  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11815  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11816  }
    11817 
    11818  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11819 
    11820 #if !(VMA_DEDICATED_ALLOCATION)
    11822  {
    11823  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11824  }
    11825 #endif
    11826 
    11827  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11828  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11829  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11830 
    11831  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11832  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11833 
    11834  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11835  {
    11836  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11837  }
    11838 
    11839  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11840  {
    11841  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11842  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11843  }
    11844 
    11845  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11846 
    11847  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11848  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11849 
    11850  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11851  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11852  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11853  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11854 
    11855  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11856  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11857 
    11858  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11859  {
    11860  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11861  {
    11862  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11863  if(limit != VK_WHOLE_SIZE)
    11864  {
    11865  m_HeapSizeLimit[heapIndex] = limit;
    11866  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11867  {
    11868  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11869  }
    11870  }
    11871  }
    11872  }
    11873 
    11874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11875  {
    11876  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11877 
    11878  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11879  this,
    11880  memTypeIndex,
    11881  preferredBlockSize,
    11882  0,
    11883  SIZE_MAX,
    11884  GetBufferImageGranularity(),
    11885  pCreateInfo->frameInUseCount,
    11886  false, // isCustomPool
    11887  false, // explicitBlockSize
    11888  false); // linearAlgorithm
    11889  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11890  // becase minBlockCount is 0.
    11891  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11892 
    11893  }
    11894 }
    11895 
    11896 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11897 {
    11898  VkResult res = VK_SUCCESS;
    11899 
    11900  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11901  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11902  {
    11903 #if VMA_RECORDING_ENABLED
    11904  m_pRecorder = vma_new(this, VmaRecorder)();
    11905  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11906  if(res != VK_SUCCESS)
    11907  {
    11908  return res;
    11909  }
    11910  m_pRecorder->WriteConfiguration(
    11911  m_PhysicalDeviceProperties,
    11912  m_MemProps,
    11913  m_UseKhrDedicatedAllocation);
    11914  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11915 #else
    11916  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11917  return VK_ERROR_FEATURE_NOT_PRESENT;
    11918 #endif
    11919  }
    11920 
    11921  return res;
    11922 }
    11923 
    11924 VmaAllocator_T::~VmaAllocator_T()
    11925 {
    11926 #if VMA_RECORDING_ENABLED
    11927  if(m_pRecorder != VMA_NULL)
    11928  {
    11929  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11930  vma_delete(this, m_pRecorder);
    11931  }
    11932 #endif
    11933 
    11934  VMA_ASSERT(m_Pools.empty());
    11935 
    11936  for(size_t i = GetMemoryTypeCount(); i--; )
    11937  {
    11938  vma_delete(this, m_pDedicatedAllocations[i]);
    11939  vma_delete(this, m_pBlockVectors[i]);
    11940  }
    11941 }
    11942 
    11943 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11944 {
    11945 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11946  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11947  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11948  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11949  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11950  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11951  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11952  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11953  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11954  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11955  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11956  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11957  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11958  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11959  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11960  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11961  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11962 #if VMA_DEDICATED_ALLOCATION
    11963  if(m_UseKhrDedicatedAllocation)
    11964  {
    11965  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11966  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11967  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11968  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11969  }
    11970 #endif // #if VMA_DEDICATED_ALLOCATION
    11971 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11972 
    11973 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11974  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11975 
    11976  if(pVulkanFunctions != VMA_NULL)
    11977  {
    11978  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11979  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11980  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11981  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11982  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11983  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11984  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11985  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11986  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11987  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11988  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11989  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11990  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11991  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11992  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11993  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11994 #if VMA_DEDICATED_ALLOCATION
    11995  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11996  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11997 #endif
    11998  }
    11999 
    12000 #undef VMA_COPY_IF_NOT_NULL
    12001 
    12002  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12003  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12004  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12012  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12013  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12017  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12018  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12019  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12020 #if VMA_DEDICATED_ALLOCATION
    12021  if(m_UseKhrDedicatedAllocation)
    12022  {
    12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12025  }
    12026 #endif
    12027 }
    12028 
    12029 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12030 {
    12031  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12032  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12033  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12034  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12035 }
    12036 
    12037 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12038  VkDeviceSize size,
    12039  VkDeviceSize alignment,
    12040  bool dedicatedAllocation,
    12041  VkBuffer dedicatedBuffer,
    12042  VkImage dedicatedImage,
    12043  const VmaAllocationCreateInfo& createInfo,
    12044  uint32_t memTypeIndex,
    12045  VmaSuballocationType suballocType,
    12046  VmaAllocation* pAllocation)
    12047 {
    12048  VMA_ASSERT(pAllocation != VMA_NULL);
    12049  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12050 
    12051  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12052 
    12053  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12054  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12055  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12056  {
    12057  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12058  }
    12059 
    12060  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12061  VMA_ASSERT(blockVector);
    12062 
    12063  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12064  bool preferDedicatedMemory =
    12065  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12066  dedicatedAllocation ||
    12067  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12068  size > preferredBlockSize / 2;
    12069 
    12070  if(preferDedicatedMemory &&
    12071  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12072  finalCreateInfo.pool == VK_NULL_HANDLE)
    12073  {
    12075  }
    12076 
    12077  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12078  {
    12079  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12080  {
    12081  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12082  }
    12083  else
    12084  {
    12085  return AllocateDedicatedMemory(
    12086  size,
    12087  suballocType,
    12088  memTypeIndex,
    12089  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12090  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12091  finalCreateInfo.pUserData,
    12092  dedicatedBuffer,
    12093  dedicatedImage,
    12094  pAllocation);
    12095  }
    12096  }
    12097  else
    12098  {
    12099  VkResult res = blockVector->Allocate(
    12100  VK_NULL_HANDLE, // hCurrentPool
    12101  m_CurrentFrameIndex.load(),
    12102  size,
    12103  alignment,
    12104  finalCreateInfo,
    12105  suballocType,
    12106  pAllocation);
    12107  if(res == VK_SUCCESS)
    12108  {
    12109  return res;
    12110  }
    12111 
    12112  // 5. Try dedicated memory.
    12113  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12114  {
    12115  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12116  }
    12117  else
    12118  {
    12119  res = AllocateDedicatedMemory(
    12120  size,
    12121  suballocType,
    12122  memTypeIndex,
    12123  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12124  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12125  finalCreateInfo.pUserData,
    12126  dedicatedBuffer,
    12127  dedicatedImage,
    12128  pAllocation);
    12129  if(res == VK_SUCCESS)
    12130  {
    12131  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12132  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12133  return VK_SUCCESS;
    12134  }
    12135  else
    12136  {
    12137  // Everything failed: Return error code.
    12138  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12139  return res;
    12140  }
    12141  }
    12142  }
    12143 }
    12144 
    12145 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12146  VkDeviceSize size,
    12147  VmaSuballocationType suballocType,
    12148  uint32_t memTypeIndex,
    12149  bool map,
    12150  bool isUserDataString,
    12151  void* pUserData,
    12152  VkBuffer dedicatedBuffer,
    12153  VkImage dedicatedImage,
    12154  VmaAllocation* pAllocation)
    12155 {
    12156  VMA_ASSERT(pAllocation);
    12157 
    12158  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12159  allocInfo.memoryTypeIndex = memTypeIndex;
    12160  allocInfo.allocationSize = size;
    12161 
    12162 #if VMA_DEDICATED_ALLOCATION
    12163  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12164  if(m_UseKhrDedicatedAllocation)
    12165  {
    12166  if(dedicatedBuffer != VK_NULL_HANDLE)
    12167  {
    12168  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12169  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12170  allocInfo.pNext = &dedicatedAllocInfo;
    12171  }
    12172  else if(dedicatedImage != VK_NULL_HANDLE)
    12173  {
    12174  dedicatedAllocInfo.image = dedicatedImage;
    12175  allocInfo.pNext = &dedicatedAllocInfo;
    12176  }
    12177  }
    12178 #endif // #if VMA_DEDICATED_ALLOCATION
    12179 
    12180  // Allocate VkDeviceMemory.
    12181  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12182  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12183  if(res < 0)
    12184  {
    12185  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12186  return res;
    12187  }
    12188 
    12189  void* pMappedData = VMA_NULL;
    12190  if(map)
    12191  {
    12192  res = (*m_VulkanFunctions.vkMapMemory)(
    12193  m_hDevice,
    12194  hMemory,
    12195  0,
    12196  VK_WHOLE_SIZE,
    12197  0,
    12198  &pMappedData);
    12199  if(res < 0)
    12200  {
    12201  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12202  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12203  return res;
    12204  }
    12205  }
    12206 
    12207  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12208  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12209  (*pAllocation)->SetUserData(this, pUserData);
    12210  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12211  {
    12212  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12213  }
    12214 
    12215  // Register it in m_pDedicatedAllocations.
    12216  {
    12217  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12218  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12219  VMA_ASSERT(pDedicatedAllocations);
    12220  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12221  }
    12222 
    12223  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12224 
    12225  return VK_SUCCESS;
    12226 }
    12227 
    12228 void VmaAllocator_T::GetBufferMemoryRequirements(
    12229  VkBuffer hBuffer,
    12230  VkMemoryRequirements& memReq,
    12231  bool& requiresDedicatedAllocation,
    12232  bool& prefersDedicatedAllocation) const
    12233 {
    12234 #if VMA_DEDICATED_ALLOCATION
    12235  if(m_UseKhrDedicatedAllocation)
    12236  {
    12237  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12238  memReqInfo.buffer = hBuffer;
    12239 
    12240  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12241 
    12242  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12243  memReq2.pNext = &memDedicatedReq;
    12244 
    12245  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12246 
    12247  memReq = memReq2.memoryRequirements;
    12248  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12249  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12250  }
    12251  else
    12252 #endif // #if VMA_DEDICATED_ALLOCATION
    12253  {
    12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12255  requiresDedicatedAllocation = false;
    12256  prefersDedicatedAllocation = false;
    12257  }
    12258 }
    12259 
    12260 void VmaAllocator_T::GetImageMemoryRequirements(
    12261  VkImage hImage,
    12262  VkMemoryRequirements& memReq,
    12263  bool& requiresDedicatedAllocation,
    12264  bool& prefersDedicatedAllocation) const
    12265 {
    12266 #if VMA_DEDICATED_ALLOCATION
    12267  if(m_UseKhrDedicatedAllocation)
    12268  {
    12269  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12270  memReqInfo.image = hImage;
    12271 
    12272  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12273 
    12274  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12275  memReq2.pNext = &memDedicatedReq;
    12276 
    12277  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12278 
    12279  memReq = memReq2.memoryRequirements;
    12280  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12281  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12282  }
    12283  else
    12284 #endif // #if VMA_DEDICATED_ALLOCATION
    12285  {
    12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12287  requiresDedicatedAllocation = false;
    12288  prefersDedicatedAllocation = false;
    12289  }
    12290 }
    12291 
    12292 VkResult VmaAllocator_T::AllocateMemory(
    12293  const VkMemoryRequirements& vkMemReq,
    12294  bool requiresDedicatedAllocation,
    12295  bool prefersDedicatedAllocation,
    12296  VkBuffer dedicatedBuffer,
    12297  VkImage dedicatedImage,
    12298  const VmaAllocationCreateInfo& createInfo,
    12299  VmaSuballocationType suballocType,
    12300  VmaAllocation* pAllocation)
    12301 {
    12302  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12303 
    12304  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12305  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12306  {
    12307  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12309  }
    12310  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12312  {
    12313  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12314  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12315  }
    12316  if(requiresDedicatedAllocation)
    12317  {
    12318  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12319  {
    12320  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12322  }
    12323  if(createInfo.pool != VK_NULL_HANDLE)
    12324  {
    12325  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12326  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12327  }
    12328  }
    12329  if((createInfo.pool != VK_NULL_HANDLE) &&
    12330  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12331  {
    12332  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12333  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12334  }
    12335 
    12336  if(createInfo.pool != VK_NULL_HANDLE)
    12337  {
    12338  const VkDeviceSize alignmentForPool = VMA_MAX(
    12339  vkMemReq.alignment,
    12340  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12341  return createInfo.pool->m_BlockVector.Allocate(
    12342  createInfo.pool,
    12343  m_CurrentFrameIndex.load(),
    12344  vkMemReq.size,
    12345  alignmentForPool,
    12346  createInfo,
    12347  suballocType,
    12348  pAllocation);
    12349  }
    12350  else
    12351  {
    12352  // Bit mask of memory Vulkan types acceptable for this allocation.
    12353  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12354  uint32_t memTypeIndex = UINT32_MAX;
    12355  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12356  if(res == VK_SUCCESS)
    12357  {
    12358  VkDeviceSize alignmentForMemType = VMA_MAX(
    12359  vkMemReq.alignment,
    12360  GetMemoryTypeMinAlignment(memTypeIndex));
    12361 
    12362  res = AllocateMemoryOfType(
    12363  vkMemReq.size,
    12364  alignmentForMemType,
    12365  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12366  dedicatedBuffer,
    12367  dedicatedImage,
    12368  createInfo,
    12369  memTypeIndex,
    12370  suballocType,
    12371  pAllocation);
    12372  // Succeeded on first try.
    12373  if(res == VK_SUCCESS)
    12374  {
    12375  return res;
    12376  }
    12377  // Allocation from this memory type failed. Try other compatible memory types.
    12378  else
    12379  {
    12380  for(;;)
    12381  {
    12382  // Remove old memTypeIndex from list of possibilities.
    12383  memoryTypeBits &= ~(1u << memTypeIndex);
    12384  // Find alternative memTypeIndex.
    12385  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12386  if(res == VK_SUCCESS)
    12387  {
    12388  alignmentForMemType = VMA_MAX(
    12389  vkMemReq.alignment,
    12390  GetMemoryTypeMinAlignment(memTypeIndex));
    12391 
    12392  res = AllocateMemoryOfType(
    12393  vkMemReq.size,
    12394  alignmentForMemType,
    12395  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12396  dedicatedBuffer,
    12397  dedicatedImage,
    12398  createInfo,
    12399  memTypeIndex,
    12400  suballocType,
    12401  pAllocation);
    12402  // Allocation from this alternative memory type succeeded.
    12403  if(res == VK_SUCCESS)
    12404  {
    12405  return res;
    12406  }
    12407  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12408  }
    12409  // No other matching memory type index could be found.
    12410  else
    12411  {
    12412  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12413  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12414  }
    12415  }
    12416  }
    12417  }
    12418  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12419  else
    12420  return res;
    12421  }
    12422 }
    12423 
    12424 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12425 {
    12426  VMA_ASSERT(allocation);
    12427 
    12428  if(TouchAllocation(allocation))
    12429  {
    12430  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12431  {
    12432  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12433  }
    12434 
    12435  switch(allocation->GetType())
    12436  {
    12437  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12438  {
    12439  VmaBlockVector* pBlockVector = VMA_NULL;
    12440  VmaPool hPool = allocation->GetPool();
    12441  if(hPool != VK_NULL_HANDLE)
    12442  {
    12443  pBlockVector = &hPool->m_BlockVector;
    12444  }
    12445  else
    12446  {
    12447  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12448  pBlockVector = m_pBlockVectors[memTypeIndex];
    12449  }
    12450  pBlockVector->Free(allocation);
    12451  }
    12452  break;
    12453  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12454  FreeDedicatedMemory(allocation);
    12455  break;
    12456  default:
    12457  VMA_ASSERT(0);
    12458  }
    12459  }
    12460 
    12461  allocation->SetUserData(this, VMA_NULL);
    12462  vma_delete(this, allocation);
    12463 }
    12464 
    12465 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12466 {
    12467  // Initialize.
    12468  InitStatInfo(pStats->total);
    12469  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12470  InitStatInfo(pStats->memoryType[i]);
    12471  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12472  InitStatInfo(pStats->memoryHeap[i]);
    12473 
    12474  // Process default pools.
    12475  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12476  {
    12477  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12478  VMA_ASSERT(pBlockVector);
    12479  pBlockVector->AddStats(pStats);
    12480  }
    12481 
    12482  // Process custom pools.
    12483  {
    12484  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12485  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12486  {
    12487  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12488  }
    12489  }
    12490 
    12491  // Process dedicated allocations.
    12492  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12493  {
    12494  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12495  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12496  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12497  VMA_ASSERT(pDedicatedAllocVector);
    12498  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12499  {
    12500  VmaStatInfo allocationStatInfo;
    12501  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12502  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12503  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12504  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12505  }
    12506  }
    12507 
    12508  // Postprocess.
    12509  VmaPostprocessCalcStatInfo(pStats->total);
    12510  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12511  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12512  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12513  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12514 }
    12515 
    12516 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12517 
    12518 VkResult VmaAllocator_T::Defragment(
    12519  VmaAllocation* pAllocations,
    12520  size_t allocationCount,
    12521  VkBool32* pAllocationsChanged,
    12522  const VmaDefragmentationInfo* pDefragmentationInfo,
    12523  VmaDefragmentationStats* pDefragmentationStats)
    12524 {
    12525  if(pAllocationsChanged != VMA_NULL)
    12526  {
    12527  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12528  }
    12529  if(pDefragmentationStats != VMA_NULL)
    12530  {
    12531  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12532  }
    12533 
    12534  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12535 
    12536  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12537 
    12538  const size_t poolCount = m_Pools.size();
    12539 
    12540  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12541  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12542  {
    12543  VmaAllocation hAlloc = pAllocations[allocIndex];
    12544  VMA_ASSERT(hAlloc);
    12545  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12546  // DedicatedAlloc cannot be defragmented.
    12547  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12548  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12549  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12550  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12551  // Lost allocation cannot be defragmented.
    12552  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12553  {
    12554  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12555 
    12556  const VmaPool hAllocPool = hAlloc->GetPool();
    12557  // This allocation belongs to custom pool.
    12558  if(hAllocPool != VK_NULL_HANDLE)
    12559  {
    12560  // Pools with linear or buddy algorithm are not defragmented.
    12561  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12562  {
    12563  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12564  }
    12565  }
    12566  // This allocation belongs to general pool.
    12567  else
    12568  {
    12569  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12570  }
    12571 
    12572  if(pAllocBlockVector != VMA_NULL)
    12573  {
    12574  VmaDefragmentator* const pDefragmentator =
    12575  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12576  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12577  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12578  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12579  }
    12580  }
    12581  }
    12582 
    12583  VkResult result = VK_SUCCESS;
    12584 
    12585  // ======== Main processing.
    12586 
    12587  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12588  uint32_t maxAllocationsToMove = UINT32_MAX;
    12589  if(pDefragmentationInfo != VMA_NULL)
    12590  {
    12591  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12592  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12593  }
    12594 
    12595  // Process standard memory.
    12596  for(uint32_t memTypeIndex = 0;
    12597  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12598  ++memTypeIndex)
    12599  {
    12600  // Only HOST_VISIBLE memory types can be defragmented.
    12601  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12602  {
    12603  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12604  pDefragmentationStats,
    12605  maxBytesToMove,
    12606  maxAllocationsToMove);
    12607  }
    12608  }
    12609 
    12610  // Process custom pools.
    12611  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12612  {
    12613  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12614  pDefragmentationStats,
    12615  maxBytesToMove,
    12616  maxAllocationsToMove);
    12617  }
    12618 
    12619  // ======== Destroy defragmentators.
    12620 
    12621  // Process custom pools.
    12622  for(size_t poolIndex = poolCount; poolIndex--; )
    12623  {
    12624  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12625  }
    12626 
    12627  // Process standard memory.
    12628  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12629  {
    12630  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12631  {
    12632  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12633  }
    12634  }
    12635 
    12636  return result;
    12637 }
    12638 
    12639 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12640 {
    12641  if(hAllocation->CanBecomeLost())
    12642  {
    12643  /*
    12644  Warning: This is a carefully designed algorithm.
    12645  Do not modify unless you really know what you're doing :)
    12646  */
    12647  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12648  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12649  for(;;)
    12650  {
    12651  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12652  {
    12653  pAllocationInfo->memoryType = UINT32_MAX;
    12654  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12655  pAllocationInfo->offset = 0;
    12656  pAllocationInfo->size = hAllocation->GetSize();
    12657  pAllocationInfo->pMappedData = VMA_NULL;
    12658  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12659  return;
    12660  }
    12661  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12662  {
    12663  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12664  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12665  pAllocationInfo->offset = hAllocation->GetOffset();
    12666  pAllocationInfo->size = hAllocation->GetSize();
    12667  pAllocationInfo->pMappedData = VMA_NULL;
    12668  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12669  return;
    12670  }
    12671  else // Last use time earlier than current time.
    12672  {
    12673  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12674  {
    12675  localLastUseFrameIndex = localCurrFrameIndex;
    12676  }
    12677  }
    12678  }
    12679  }
    12680  else
    12681  {
    12682 #if VMA_STATS_STRING_ENABLED
    12683  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12684  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12685  for(;;)
    12686  {
    12687  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12688  if(localLastUseFrameIndex == localCurrFrameIndex)
    12689  {
    12690  break;
    12691  }
    12692  else // Last use time earlier than current time.
    12693  {
    12694  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12695  {
    12696  localLastUseFrameIndex = localCurrFrameIndex;
    12697  }
    12698  }
    12699  }
    12700 #endif
    12701 
    12702  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12703  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12704  pAllocationInfo->offset = hAllocation->GetOffset();
    12705  pAllocationInfo->size = hAllocation->GetSize();
    12706  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12707  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12708  }
    12709 }
    12710 
    12711 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12712 {
    12713  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12714  if(hAllocation->CanBecomeLost())
    12715  {
    12716  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12717  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12718  for(;;)
    12719  {
    12720  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12721  {
    12722  return false;
    12723  }
    12724  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12725  {
    12726  return true;
    12727  }
    12728  else // Last use time earlier than current time.
    12729  {
    12730  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12731  {
    12732  localLastUseFrameIndex = localCurrFrameIndex;
    12733  }
    12734  }
    12735  }
    12736  }
    12737  else
    12738  {
    12739 #if VMA_STATS_STRING_ENABLED
    12740  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12741  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12742  for(;;)
    12743  {
    12744  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12745  if(localLastUseFrameIndex == localCurrFrameIndex)
    12746  {
    12747  break;
    12748  }
    12749  else // Last use time earlier than current time.
    12750  {
    12751  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12752  {
    12753  localLastUseFrameIndex = localCurrFrameIndex;
    12754  }
    12755  }
    12756  }
    12757 #endif
    12758 
    12759  return true;
    12760  }
    12761 }
    12762 
    12763 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12764 {
    12765  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12766 
    12767  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12768 
    12769  if(newCreateInfo.maxBlockCount == 0)
    12770  {
    12771  newCreateInfo.maxBlockCount = SIZE_MAX;
    12772  }
    12773  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12774  {
    12775  return VK_ERROR_INITIALIZATION_FAILED;
    12776  }
    12777 
    12778  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12779 
    12780  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12781 
    12782  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12783  if(res != VK_SUCCESS)
    12784  {
    12785  vma_delete(this, *pPool);
    12786  *pPool = VMA_NULL;
    12787  return res;
    12788  }
    12789 
    12790  // Add to m_Pools.
    12791  {
    12792  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12793  (*pPool)->SetId(m_NextPoolId++);
    12794  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12795  }
    12796 
    12797  return VK_SUCCESS;
    12798 }
    12799 
    12800 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12801 {
    12802  // Remove from m_Pools.
    12803  {
    12804  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12805  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12806  VMA_ASSERT(success && "Pool not found in Allocator.");
    12807  }
    12808 
    12809  vma_delete(this, pool);
    12810 }
    12811 
    12812 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12813 {
    12814  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12815 }
    12816 
    12817 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12818 {
    12819  m_CurrentFrameIndex.store(frameIndex);
    12820 }
    12821 
    12822 void VmaAllocator_T::MakePoolAllocationsLost(
    12823  VmaPool hPool,
    12824  size_t* pLostAllocationCount)
    12825 {
    12826  hPool->m_BlockVector.MakePoolAllocationsLost(
    12827  m_CurrentFrameIndex.load(),
    12828  pLostAllocationCount);
    12829 }
    12830 
    12831 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12832 {
    12833  return hPool->m_BlockVector.CheckCorruption();
    12834 }
    12835 
    12836 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12837 {
    12838  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12839 
    12840  // Process default pools.
    12841  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12842  {
    12843  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12844  {
    12845  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12846  VMA_ASSERT(pBlockVector);
    12847  VkResult localRes = pBlockVector->CheckCorruption();
    12848  switch(localRes)
    12849  {
    12850  case VK_ERROR_FEATURE_NOT_PRESENT:
    12851  break;
    12852  case VK_SUCCESS:
    12853  finalRes = VK_SUCCESS;
    12854  break;
    12855  default:
    12856  return localRes;
    12857  }
    12858  }
    12859  }
    12860 
    12861  // Process custom pools.
    12862  {
    12863  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12864  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12865  {
    12866  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12867  {
    12868  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12869  switch(localRes)
    12870  {
    12871  case VK_ERROR_FEATURE_NOT_PRESENT:
    12872  break;
    12873  case VK_SUCCESS:
    12874  finalRes = VK_SUCCESS;
    12875  break;
    12876  default:
    12877  return localRes;
    12878  }
    12879  }
    12880  }
    12881  }
    12882 
    12883  return finalRes;
    12884 }
    12885 
    12886 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12887 {
    12888  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12889  (*pAllocation)->InitLost();
    12890 }
    12891 
    12892 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12893 {
    12894  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12895 
    12896  VkResult res;
    12897  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12898  {
    12899  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12900  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12901  {
    12902  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12903  if(res == VK_SUCCESS)
    12904  {
    12905  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12906  }
    12907  }
    12908  else
    12909  {
    12910  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12911  }
    12912  }
    12913  else
    12914  {
    12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12916  }
    12917 
    12918  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12919  {
    12920  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12921  }
    12922 
    12923  return res;
    12924 }
    12925 
    12926 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12927 {
    12928  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12929  {
    12930  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12931  }
    12932 
    12933  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12934 
    12935  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12936  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12937  {
    12938  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12939  m_HeapSizeLimit[heapIndex] += size;
    12940  }
    12941 }
    12942 
    12943 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12944 {
    12945  if(hAllocation->CanBecomeLost())
    12946  {
    12947  return VK_ERROR_MEMORY_MAP_FAILED;
    12948  }
    12949 
    12950  switch(hAllocation->GetType())
    12951  {
    12952  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12953  {
    12954  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12955  char *pBytes = VMA_NULL;
    12956  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12957  if(res == VK_SUCCESS)
    12958  {
    12959  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12960  hAllocation->BlockAllocMap();
    12961  }
    12962  return res;
    12963  }
    12964  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12965  return hAllocation->DedicatedAllocMap(this, ppData);
    12966  default:
    12967  VMA_ASSERT(0);
    12968  return VK_ERROR_MEMORY_MAP_FAILED;
    12969  }
    12970 }
    12971 
    12972 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12973 {
    12974  switch(hAllocation->GetType())
    12975  {
    12976  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12977  {
    12978  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12979  hAllocation->BlockAllocUnmap();
    12980  pBlock->Unmap(this, 1);
    12981  }
    12982  break;
    12983  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12984  hAllocation->DedicatedAllocUnmap(this);
    12985  break;
    12986  default:
    12987  VMA_ASSERT(0);
    12988  }
    12989 }
    12990 
    12991 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12992 {
    12993  VkResult res = VK_SUCCESS;
    12994  switch(hAllocation->GetType())
    12995  {
    12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12997  res = GetVulkanFunctions().vkBindBufferMemory(
    12998  m_hDevice,
    12999  hBuffer,
    13000  hAllocation->GetMemory(),
    13001  0); //memoryOffset
    13002  break;
    13003  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13004  {
    13005  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13006  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13007  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13008  break;
    13009  }
    13010  default:
    13011  VMA_ASSERT(0);
    13012  }
    13013  return res;
    13014 }
    13015 
    13016 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13017 {
    13018  VkResult res = VK_SUCCESS;
    13019  switch(hAllocation->GetType())
    13020  {
    13021  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13022  res = GetVulkanFunctions().vkBindImageMemory(
    13023  m_hDevice,
    13024  hImage,
    13025  hAllocation->GetMemory(),
    13026  0); //memoryOffset
    13027  break;
    13028  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13029  {
    13030  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13031  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13032  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13033  break;
    13034  }
    13035  default:
    13036  VMA_ASSERT(0);
    13037  }
    13038  return res;
    13039 }
    13040 
    13041 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13042  VmaAllocation hAllocation,
    13043  VkDeviceSize offset, VkDeviceSize size,
    13044  VMA_CACHE_OPERATION op)
    13045 {
    13046  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13047  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13048  {
    13049  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13050  VMA_ASSERT(offset <= allocationSize);
    13051 
    13052  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13053 
    13054  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13055  memRange.memory = hAllocation->GetMemory();
    13056 
    13057  switch(hAllocation->GetType())
    13058  {
    13059  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13060  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13061  if(size == VK_WHOLE_SIZE)
    13062  {
    13063  memRange.size = allocationSize - memRange.offset;
    13064  }
    13065  else
    13066  {
    13067  VMA_ASSERT(offset + size <= allocationSize);
    13068  memRange.size = VMA_MIN(
    13069  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13070  allocationSize - memRange.offset);
    13071  }
    13072  break;
    13073 
    13074  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13075  {
    13076  // 1. Still within this allocation.
    13077  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13078  if(size == VK_WHOLE_SIZE)
    13079  {
    13080  size = allocationSize - offset;
    13081  }
    13082  else
    13083  {
    13084  VMA_ASSERT(offset + size <= allocationSize);
    13085  }
    13086  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13087 
    13088  // 2. Adjust to whole block.
    13089  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13090  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13091  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13092  memRange.offset += allocationOffset;
    13093  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13094 
    13095  break;
    13096  }
    13097 
    13098  default:
    13099  VMA_ASSERT(0);
    13100  }
    13101 
    13102  switch(op)
    13103  {
    13104  case VMA_CACHE_FLUSH:
    13105  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13106  break;
    13107  case VMA_CACHE_INVALIDATE:
    13108  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13109  break;
    13110  default:
    13111  VMA_ASSERT(0);
    13112  }
    13113  }
    13114  // else: Just ignore this call.
    13115 }
    13116 
    13117 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13118 {
    13119  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13120 
    13121  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13122  {
    13123  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13124  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13125  VMA_ASSERT(pDedicatedAllocations);
    13126  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13127  VMA_ASSERT(success);
    13128  }
    13129 
    13130  VkDeviceMemory hMemory = allocation->GetMemory();
    13131 
    13132  /*
    13133  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13134  before vkFreeMemory.
    13135 
    13136  if(allocation->GetMappedData() != VMA_NULL)
    13137  {
    13138  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13139  }
    13140  */
    13141 
    13142  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13143 
    13144  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13145 }
    13146 
    13147 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13148 {
    13149  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13150  !hAllocation->CanBecomeLost() &&
    13151  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13152  {
    13153  void* pData = VMA_NULL;
    13154  VkResult res = Map(hAllocation, &pData);
    13155  if(res == VK_SUCCESS)
    13156  {
    13157  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13158  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13159  Unmap(hAllocation);
    13160  }
    13161  else
    13162  {
    13163  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13164  }
    13165  }
    13166 }
    13167 
    13168 #if VMA_STATS_STRING_ENABLED
    13169 
    13170 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13171 {
    13172  bool dedicatedAllocationsStarted = false;
    13173  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13174  {
    13175  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13176  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13177  VMA_ASSERT(pDedicatedAllocVector);
    13178  if(pDedicatedAllocVector->empty() == false)
    13179  {
    13180  if(dedicatedAllocationsStarted == false)
    13181  {
    13182  dedicatedAllocationsStarted = true;
    13183  json.WriteString("DedicatedAllocations");
    13184  json.BeginObject();
    13185  }
    13186 
    13187  json.BeginString("Type ");
    13188  json.ContinueString(memTypeIndex);
    13189  json.EndString();
    13190 
    13191  json.BeginArray();
    13192 
    13193  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13194  {
    13195  json.BeginObject(true);
    13196  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13197  hAlloc->PrintParameters(json);
    13198  json.EndObject();
    13199  }
    13200 
    13201  json.EndArray();
    13202  }
    13203  }
    13204  if(dedicatedAllocationsStarted)
    13205  {
    13206  json.EndObject();
    13207  }
    13208 
    13209  {
    13210  bool allocationsStarted = false;
    13211  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13212  {
    13213  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13214  {
    13215  if(allocationsStarted == false)
    13216  {
    13217  allocationsStarted = true;
    13218  json.WriteString("DefaultPools");
    13219  json.BeginObject();
    13220  }
    13221 
    13222  json.BeginString("Type ");
    13223  json.ContinueString(memTypeIndex);
    13224  json.EndString();
    13225 
    13226  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13227  }
    13228  }
    13229  if(allocationsStarted)
    13230  {
    13231  json.EndObject();
    13232  }
    13233  }
    13234 
    13235  // Custom pools
    13236  {
    13237  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13238  const size_t poolCount = m_Pools.size();
    13239  if(poolCount > 0)
    13240  {
    13241  json.WriteString("Pools");
    13242  json.BeginObject();
    13243  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13244  {
    13245  json.BeginString();
    13246  json.ContinueString(m_Pools[poolIndex]->GetId());
    13247  json.EndString();
    13248 
    13249  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13250  }
    13251  json.EndObject();
    13252  }
    13253  }
    13254 }
    13255 
    13256 #endif // #if VMA_STATS_STRING_ENABLED
    13257 
    13259 // Public interface
    13260 
    13261 VkResult vmaCreateAllocator(
    13262  const VmaAllocatorCreateInfo* pCreateInfo,
    13263  VmaAllocator* pAllocator)
    13264 {
    13265  VMA_ASSERT(pCreateInfo && pAllocator);
    13266  VMA_DEBUG_LOG("vmaCreateAllocator");
    13267  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13268  return (*pAllocator)->Init(pCreateInfo);
    13269 }
    13270 
    13271 void vmaDestroyAllocator(
    13272  VmaAllocator allocator)
    13273 {
    13274  if(allocator != VK_NULL_HANDLE)
    13275  {
    13276  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13277  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13278  vma_delete(&allocationCallbacks, allocator);
    13279  }
    13280 }
    13281 
    13283  VmaAllocator allocator,
    13284  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13285 {
    13286  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13287  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13288 }
    13289 
    13291  VmaAllocator allocator,
    13292  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13293 {
    13294  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13295  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13296 }
    13297 
    13299  VmaAllocator allocator,
    13300  uint32_t memoryTypeIndex,
    13301  VkMemoryPropertyFlags* pFlags)
    13302 {
    13303  VMA_ASSERT(allocator && pFlags);
    13304  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13305  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13306 }
    13307 
    13309  VmaAllocator allocator,
    13310  uint32_t frameIndex)
    13311 {
    13312  VMA_ASSERT(allocator);
    13313  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13314 
    13315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13316 
    13317  allocator->SetCurrentFrameIndex(frameIndex);
    13318 }
    13319 
    13320 void vmaCalculateStats(
    13321  VmaAllocator allocator,
    13322  VmaStats* pStats)
    13323 {
    13324  VMA_ASSERT(allocator && pStats);
    13325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13326  allocator->CalculateStats(pStats);
    13327 }
    13328 
    13329 #if VMA_STATS_STRING_ENABLED
    13330 
    13331 void vmaBuildStatsString(
    13332  VmaAllocator allocator,
    13333  char** ppStatsString,
    13334  VkBool32 detailedMap)
    13335 {
    13336  VMA_ASSERT(allocator && ppStatsString);
    13337  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13338 
    13339  VmaStringBuilder sb(allocator);
    13340  {
    13341  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13342  json.BeginObject();
    13343 
    13344  VmaStats stats;
    13345  allocator->CalculateStats(&stats);
    13346 
    13347  json.WriteString("Total");
    13348  VmaPrintStatInfo(json, stats.total);
    13349 
    13350  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13351  {
    13352  json.BeginString("Heap ");
    13353  json.ContinueString(heapIndex);
    13354  json.EndString();
    13355  json.BeginObject();
    13356 
    13357  json.WriteString("Size");
    13358  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13359 
    13360  json.WriteString("Flags");
    13361  json.BeginArray(true);
    13362  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13363  {
    13364  json.WriteString("DEVICE_LOCAL");
    13365  }
    13366  json.EndArray();
    13367 
    13368  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13369  {
    13370  json.WriteString("Stats");
    13371  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13372  }
    13373 
    13374  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13375  {
    13376  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13377  {
    13378  json.BeginString("Type ");
    13379  json.ContinueString(typeIndex);
    13380  json.EndString();
    13381 
    13382  json.BeginObject();
    13383 
    13384  json.WriteString("Flags");
    13385  json.BeginArray(true);
    13386  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13387  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13388  {
    13389  json.WriteString("DEVICE_LOCAL");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_VISIBLE");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13396  {
    13397  json.WriteString("HOST_COHERENT");
    13398  }
    13399  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13400  {
    13401  json.WriteString("HOST_CACHED");
    13402  }
    13403  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13404  {
    13405  json.WriteString("LAZILY_ALLOCATED");
    13406  }
    13407  json.EndArray();
    13408 
    13409  if(stats.memoryType[typeIndex].blockCount > 0)
    13410  {
    13411  json.WriteString("Stats");
    13412  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13413  }
    13414 
    13415  json.EndObject();
    13416  }
    13417  }
    13418 
    13419  json.EndObject();
    13420  }
    13421  if(detailedMap == VK_TRUE)
    13422  {
    13423  allocator->PrintDetailedMap(json);
    13424  }
    13425 
    13426  json.EndObject();
    13427  }
    13428 
    13429  const size_t len = sb.GetLength();
    13430  char* const pChars = vma_new_array(allocator, char, len + 1);
    13431  if(len > 0)
    13432  {
    13433  memcpy(pChars, sb.GetData(), len);
    13434  }
    13435  pChars[len] = '\0';
    13436  *ppStatsString = pChars;
    13437 }
    13438 
    13439 void vmaFreeStatsString(
    13440  VmaAllocator allocator,
    13441  char* pStatsString)
    13442 {
    13443  if(pStatsString != VMA_NULL)
    13444  {
    13445  VMA_ASSERT(allocator);
    13446  size_t len = strlen(pStatsString);
    13447  vma_delete_array(allocator, pStatsString, len + 1);
    13448  }
    13449 }
    13450 
    13451 #endif // #if VMA_STATS_STRING_ENABLED
    13452 
    13453 /*
    13454 This function is not protected by any mutex because it just reads immutable data.
    13455 */
    13456 VkResult vmaFindMemoryTypeIndex(
    13457  VmaAllocator allocator,
    13458  uint32_t memoryTypeBits,
    13459  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13460  uint32_t* pMemoryTypeIndex)
    13461 {
    13462  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13463  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13464  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13465 
    13466  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13467  {
    13468  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13469  }
    13470 
    13471  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13472  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13473 
    13474  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13475  if(mapped)
    13476  {
    13477  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13478  }
    13479 
    13480  // Convert usage to requiredFlags and preferredFlags.
    13481  switch(pAllocationCreateInfo->usage)
    13482  {
    13484  break;
    13486  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13487  {
    13488  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13489  }
    13490  break;
    13492  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13493  break;
    13495  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13496  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13497  {
    13498  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13499  }
    13500  break;
    13502  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13503  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13504  break;
    13505  default:
    13506  break;
    13507  }
    13508 
    13509  *pMemoryTypeIndex = UINT32_MAX;
    13510  uint32_t minCost = UINT32_MAX;
    13511  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13512  memTypeIndex < allocator->GetMemoryTypeCount();
    13513  ++memTypeIndex, memTypeBit <<= 1)
    13514  {
    13515  // This memory type is acceptable according to memoryTypeBits bitmask.
    13516  if((memTypeBit & memoryTypeBits) != 0)
    13517  {
    13518  const VkMemoryPropertyFlags currFlags =
    13519  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13520  // This memory type contains requiredFlags.
    13521  if((requiredFlags & ~currFlags) == 0)
    13522  {
    13523  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13524  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13525  // Remember memory type with lowest cost.
    13526  if(currCost < minCost)
    13527  {
    13528  *pMemoryTypeIndex = memTypeIndex;
    13529  if(currCost == 0)
    13530  {
    13531  return VK_SUCCESS;
    13532  }
    13533  minCost = currCost;
    13534  }
    13535  }
    13536  }
    13537  }
    13538  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13539 }
    13540 
    13542  VmaAllocator allocator,
    13543  const VkBufferCreateInfo* pBufferCreateInfo,
    13544  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13545  uint32_t* pMemoryTypeIndex)
    13546 {
    13547  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13548  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13549  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13550  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13551 
    13552  const VkDevice hDev = allocator->m_hDevice;
    13553  VkBuffer hBuffer = VK_NULL_HANDLE;
    13554  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13555  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13556  if(res == VK_SUCCESS)
    13557  {
    13558  VkMemoryRequirements memReq = {};
    13559  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13560  hDev, hBuffer, &memReq);
    13561 
    13562  res = vmaFindMemoryTypeIndex(
    13563  allocator,
    13564  memReq.memoryTypeBits,
    13565  pAllocationCreateInfo,
    13566  pMemoryTypeIndex);
    13567 
    13568  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13569  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13570  }
    13571  return res;
    13572 }
    13573 
    13575  VmaAllocator allocator,
    13576  const VkImageCreateInfo* pImageCreateInfo,
    13577  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13578  uint32_t* pMemoryTypeIndex)
    13579 {
    13580  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13581  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13582  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13583  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13584 
    13585  const VkDevice hDev = allocator->m_hDevice;
    13586  VkImage hImage = VK_NULL_HANDLE;
    13587  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13588  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13589  if(res == VK_SUCCESS)
    13590  {
    13591  VkMemoryRequirements memReq = {};
    13592  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13593  hDev, hImage, &memReq);
    13594 
    13595  res = vmaFindMemoryTypeIndex(
    13596  allocator,
    13597  memReq.memoryTypeBits,
    13598  pAllocationCreateInfo,
    13599  pMemoryTypeIndex);
    13600 
    13601  allocator->GetVulkanFunctions().vkDestroyImage(
    13602  hDev, hImage, allocator->GetAllocationCallbacks());
    13603  }
    13604  return res;
    13605 }
    13606 
    13607 VkResult vmaCreatePool(
    13608  VmaAllocator allocator,
    13609  const VmaPoolCreateInfo* pCreateInfo,
    13610  VmaPool* pPool)
    13611 {
    13612  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13613 
    13614  VMA_DEBUG_LOG("vmaCreatePool");
    13615 
    13616  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13617 
    13618  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13619 
    13620 #if VMA_RECORDING_ENABLED
    13621  if(allocator->GetRecorder() != VMA_NULL)
    13622  {
    13623  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13624  }
    13625 #endif
    13626 
    13627  return res;
    13628 }
    13629 
    13630 void vmaDestroyPool(
    13631  VmaAllocator allocator,
    13632  VmaPool pool)
    13633 {
    13634  VMA_ASSERT(allocator);
    13635 
    13636  if(pool == VK_NULL_HANDLE)
    13637  {
    13638  return;
    13639  }
    13640 
    13641  VMA_DEBUG_LOG("vmaDestroyPool");
    13642 
    13643  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13644 
    13645 #if VMA_RECORDING_ENABLED
    13646  if(allocator->GetRecorder() != VMA_NULL)
    13647  {
    13648  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13649  }
    13650 #endif
    13651 
    13652  allocator->DestroyPool(pool);
    13653 }
    13654 
    13655 void vmaGetPoolStats(
    13656  VmaAllocator allocator,
    13657  VmaPool pool,
    13658  VmaPoolStats* pPoolStats)
    13659 {
    13660  VMA_ASSERT(allocator && pool && pPoolStats);
    13661 
    13662  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13663 
    13664  allocator->GetPoolStats(pool, pPoolStats);
    13665 }
    13666 
    13668  VmaAllocator allocator,
    13669  VmaPool pool,
    13670  size_t* pLostAllocationCount)
    13671 {
    13672  VMA_ASSERT(allocator && pool);
    13673 
    13674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13675 
    13676 #if VMA_RECORDING_ENABLED
    13677  if(allocator->GetRecorder() != VMA_NULL)
    13678  {
    13679  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13680  }
    13681 #endif
    13682 
    13683  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13684 }
    13685 
    13686 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13687 {
    13688  VMA_ASSERT(allocator && pool);
    13689 
    13690  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13691 
    13692  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13693 
    13694  return allocator->CheckPoolCorruption(pool);
    13695 }
    13696 
    13697 VkResult vmaAllocateMemory(
    13698  VmaAllocator allocator,
    13699  const VkMemoryRequirements* pVkMemoryRequirements,
    13700  const VmaAllocationCreateInfo* pCreateInfo,
    13701  VmaAllocation* pAllocation,
    13702  VmaAllocationInfo* pAllocationInfo)
    13703 {
    13704  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13705 
    13706  VMA_DEBUG_LOG("vmaAllocateMemory");
    13707 
    13708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13709 
    13710  VkResult result = allocator->AllocateMemory(
    13711  *pVkMemoryRequirements,
    13712  false, // requiresDedicatedAllocation
    13713  false, // prefersDedicatedAllocation
    13714  VK_NULL_HANDLE, // dedicatedBuffer
    13715  VK_NULL_HANDLE, // dedicatedImage
    13716  *pCreateInfo,
    13717  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13718  pAllocation);
    13719 
    13720 #if VMA_RECORDING_ENABLED
    13721  if(allocator->GetRecorder() != VMA_NULL)
    13722  {
    13723  allocator->GetRecorder()->RecordAllocateMemory(
    13724  allocator->GetCurrentFrameIndex(),
    13725  *pVkMemoryRequirements,
    13726  *pCreateInfo,
    13727  *pAllocation);
    13728  }
    13729 #endif
    13730 
    13731  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13732  {
    13733  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13734  }
    13735 
    13736  return result;
    13737 }
    13738 
    13740  VmaAllocator allocator,
    13741  VkBuffer buffer,
    13742  const VmaAllocationCreateInfo* pCreateInfo,
    13743  VmaAllocation* pAllocation,
    13744  VmaAllocationInfo* pAllocationInfo)
    13745 {
    13746  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13747 
    13748  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13749 
    13750  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13751 
    13752  VkMemoryRequirements vkMemReq = {};
    13753  bool requiresDedicatedAllocation = false;
    13754  bool prefersDedicatedAllocation = false;
    13755  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13756  requiresDedicatedAllocation,
    13757  prefersDedicatedAllocation);
    13758 
    13759  VkResult result = allocator->AllocateMemory(
    13760  vkMemReq,
    13761  requiresDedicatedAllocation,
    13762  prefersDedicatedAllocation,
    13763  buffer, // dedicatedBuffer
    13764  VK_NULL_HANDLE, // dedicatedImage
    13765  *pCreateInfo,
    13766  VMA_SUBALLOCATION_TYPE_BUFFER,
    13767  pAllocation);
    13768 
    13769 #if VMA_RECORDING_ENABLED
    13770  if(allocator->GetRecorder() != VMA_NULL)
    13771  {
    13772  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13773  allocator->GetCurrentFrameIndex(),
    13774  vkMemReq,
    13775  requiresDedicatedAllocation,
    13776  prefersDedicatedAllocation,
    13777  *pCreateInfo,
    13778  *pAllocation);
    13779  }
    13780 #endif
    13781 
    13782  if(pAllocationInfo && result == VK_SUCCESS)
    13783  {
    13784  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13785  }
    13786 
    13787  return result;
    13788 }
    13789 
    13790 VkResult vmaAllocateMemoryForImage(
    13791  VmaAllocator allocator,
    13792  VkImage image,
    13793  const VmaAllocationCreateInfo* pCreateInfo,
    13794  VmaAllocation* pAllocation,
    13795  VmaAllocationInfo* pAllocationInfo)
    13796 {
    13797  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13798 
    13799  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13800 
    13801  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13802 
    13803  VkMemoryRequirements vkMemReq = {};
    13804  bool requiresDedicatedAllocation = false;
    13805  bool prefersDedicatedAllocation = false;
    13806  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13807  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13808 
    13809  VkResult result = allocator->AllocateMemory(
    13810  vkMemReq,
    13811  requiresDedicatedAllocation,
    13812  prefersDedicatedAllocation,
    13813  VK_NULL_HANDLE, // dedicatedBuffer
    13814  image, // dedicatedImage
    13815  *pCreateInfo,
    13816  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13817  pAllocation);
    13818 
    13819 #if VMA_RECORDING_ENABLED
    13820  if(allocator->GetRecorder() != VMA_NULL)
    13821  {
    13822  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13823  allocator->GetCurrentFrameIndex(),
    13824  vkMemReq,
    13825  requiresDedicatedAllocation,
    13826  prefersDedicatedAllocation,
    13827  *pCreateInfo,
    13828  *pAllocation);
    13829  }
    13830 #endif
    13831 
    13832  if(pAllocationInfo && result == VK_SUCCESS)
    13833  {
    13834  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13835  }
    13836 
    13837  return result;
    13838 }
    13839 
    13840 void vmaFreeMemory(
    13841  VmaAllocator allocator,
    13842  VmaAllocation allocation)
    13843 {
    13844  VMA_ASSERT(allocator);
    13845 
    13846  if(allocation == VK_NULL_HANDLE)
    13847  {
    13848  return;
    13849  }
    13850 
    13851  VMA_DEBUG_LOG("vmaFreeMemory");
    13852 
    13853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13854 
    13855 #if VMA_RECORDING_ENABLED
    13856  if(allocator->GetRecorder() != VMA_NULL)
    13857  {
    13858  allocator->GetRecorder()->RecordFreeMemory(
    13859  allocator->GetCurrentFrameIndex(),
    13860  allocation);
    13861  }
    13862 #endif
    13863 
    13864  allocator->FreeMemory(allocation);
    13865 }
    13866 
    13868  VmaAllocator allocator,
    13869  VmaAllocation allocation,
    13870  VmaAllocationInfo* pAllocationInfo)
    13871 {
    13872  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13873 
    13874  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13875 
    13876 #if VMA_RECORDING_ENABLED
    13877  if(allocator->GetRecorder() != VMA_NULL)
    13878  {
    13879  allocator->GetRecorder()->RecordGetAllocationInfo(
    13880  allocator->GetCurrentFrameIndex(),
    13881  allocation);
    13882  }
    13883 #endif
    13884 
    13885  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13886 }
    13887 
    13888 VkBool32 vmaTouchAllocation(
    13889  VmaAllocator allocator,
    13890  VmaAllocation allocation)
    13891 {
    13892  VMA_ASSERT(allocator && allocation);
    13893 
    13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13895 
    13896 #if VMA_RECORDING_ENABLED
    13897  if(allocator->GetRecorder() != VMA_NULL)
    13898  {
    13899  allocator->GetRecorder()->RecordTouchAllocation(
    13900  allocator->GetCurrentFrameIndex(),
    13901  allocation);
    13902  }
    13903 #endif
    13904 
    13905  return allocator->TouchAllocation(allocation);
    13906 }
    13907 
    13909  VmaAllocator allocator,
    13910  VmaAllocation allocation,
    13911  void* pUserData)
    13912 {
    13913  VMA_ASSERT(allocator && allocation);
    13914 
    13915  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13916 
    13917  allocation->SetUserData(allocator, pUserData);
    13918 
    13919 #if VMA_RECORDING_ENABLED
    13920  if(allocator->GetRecorder() != VMA_NULL)
    13921  {
    13922  allocator->GetRecorder()->RecordSetAllocationUserData(
    13923  allocator->GetCurrentFrameIndex(),
    13924  allocation,
    13925  pUserData);
    13926  }
    13927 #endif
    13928 }
    13929 
    13931  VmaAllocator allocator,
    13932  VmaAllocation* pAllocation)
    13933 {
    13934  VMA_ASSERT(allocator && pAllocation);
    13935 
    13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13937 
    13938  allocator->CreateLostAllocation(pAllocation);
    13939 
    13940 #if VMA_RECORDING_ENABLED
    13941  if(allocator->GetRecorder() != VMA_NULL)
    13942  {
    13943  allocator->GetRecorder()->RecordCreateLostAllocation(
    13944  allocator->GetCurrentFrameIndex(),
    13945  *pAllocation);
    13946  }
    13947 #endif
    13948 }
    13949 
    13950 VkResult vmaMapMemory(
    13951  VmaAllocator allocator,
    13952  VmaAllocation allocation,
    13953  void** ppData)
    13954 {
    13955  VMA_ASSERT(allocator && allocation && ppData);
    13956 
    13957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13958 
    13959  VkResult res = allocator->Map(allocation, ppData);
    13960 
    13961 #if VMA_RECORDING_ENABLED
    13962  if(allocator->GetRecorder() != VMA_NULL)
    13963  {
    13964  allocator->GetRecorder()->RecordMapMemory(
    13965  allocator->GetCurrentFrameIndex(),
    13966  allocation);
    13967  }
    13968 #endif
    13969 
    13970  return res;
    13971 }
    13972 
    13973 void vmaUnmapMemory(
    13974  VmaAllocator allocator,
    13975  VmaAllocation allocation)
    13976 {
    13977  VMA_ASSERT(allocator && allocation);
    13978 
    13979  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13980 
    13981 #if VMA_RECORDING_ENABLED
    13982  if(allocator->GetRecorder() != VMA_NULL)
    13983  {
    13984  allocator->GetRecorder()->RecordUnmapMemory(
    13985  allocator->GetCurrentFrameIndex(),
    13986  allocation);
    13987  }
    13988 #endif
    13989 
    13990  allocator->Unmap(allocation);
    13991 }
    13992 
    13993 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13994 {
    13995  VMA_ASSERT(allocator && allocation);
    13996 
    13997  VMA_DEBUG_LOG("vmaFlushAllocation");
    13998 
    13999  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14000 
    14001  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14002 
    14003 #if VMA_RECORDING_ENABLED
    14004  if(allocator->GetRecorder() != VMA_NULL)
    14005  {
    14006  allocator->GetRecorder()->RecordFlushAllocation(
    14007  allocator->GetCurrentFrameIndex(),
    14008  allocation, offset, size);
    14009  }
    14010 #endif
    14011 }
    14012 
    14013 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14014 {
    14015  VMA_ASSERT(allocator && allocation);
    14016 
    14017  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14018 
    14019  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14020 
    14021  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14022 
    14023 #if VMA_RECORDING_ENABLED
    14024  if(allocator->GetRecorder() != VMA_NULL)
    14025  {
    14026  allocator->GetRecorder()->RecordInvalidateAllocation(
    14027  allocator->GetCurrentFrameIndex(),
    14028  allocation, offset, size);
    14029  }
    14030 #endif
    14031 }
    14032 
    14033 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14034 {
    14035  VMA_ASSERT(allocator);
    14036 
    14037  VMA_DEBUG_LOG("vmaCheckCorruption");
    14038 
    14039  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14040 
    14041  return allocator->CheckCorruption(memoryTypeBits);
    14042 }
    14043 
    14044 VkResult vmaDefragment(
    14045  VmaAllocator allocator,
    14046  VmaAllocation* pAllocations,
    14047  size_t allocationCount,
    14048  VkBool32* pAllocationsChanged,
    14049  const VmaDefragmentationInfo *pDefragmentationInfo,
    14050  VmaDefragmentationStats* pDefragmentationStats)
    14051 {
    14052  VMA_ASSERT(allocator && pAllocations);
    14053 
    14054  VMA_DEBUG_LOG("vmaDefragment");
    14055 
    14056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14057 
    14058  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14059 }
    14060 
    14061 VkResult vmaBindBufferMemory(
    14062  VmaAllocator allocator,
    14063  VmaAllocation allocation,
    14064  VkBuffer buffer)
    14065 {
    14066  VMA_ASSERT(allocator && allocation && buffer);
    14067 
    14068  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14069 
    14070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14071 
    14072  return allocator->BindBufferMemory(allocation, buffer);
    14073 }
    14074 
    14075 VkResult vmaBindImageMemory(
    14076  VmaAllocator allocator,
    14077  VmaAllocation allocation,
    14078  VkImage image)
    14079 {
    14080  VMA_ASSERT(allocator && allocation && image);
    14081 
    14082  VMA_DEBUG_LOG("vmaBindImageMemory");
    14083 
    14084  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14085 
    14086  return allocator->BindImageMemory(allocation, image);
    14087 }
    14088 
    14089 VkResult vmaCreateBuffer(
    14090  VmaAllocator allocator,
    14091  const VkBufferCreateInfo* pBufferCreateInfo,
    14092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14093  VkBuffer* pBuffer,
    14094  VmaAllocation* pAllocation,
    14095  VmaAllocationInfo* pAllocationInfo)
    14096 {
    14097  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14098 
    14099  VMA_DEBUG_LOG("vmaCreateBuffer");
    14100 
    14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14102 
    14103  *pBuffer = VK_NULL_HANDLE;
    14104  *pAllocation = VK_NULL_HANDLE;
    14105 
    14106  // 1. Create VkBuffer.
    14107  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14108  allocator->m_hDevice,
    14109  pBufferCreateInfo,
    14110  allocator->GetAllocationCallbacks(),
    14111  pBuffer);
    14112  if(res >= 0)
    14113  {
    14114  // 2. vkGetBufferMemoryRequirements.
    14115  VkMemoryRequirements vkMemReq = {};
    14116  bool requiresDedicatedAllocation = false;
    14117  bool prefersDedicatedAllocation = false;
    14118  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14119  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14120 
    14121  // Make sure alignment requirements for specific buffer usages reported
    14122  // in Physical Device Properties are included in alignment reported by memory requirements.
    14123  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14124  {
    14125  VMA_ASSERT(vkMemReq.alignment %
    14126  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14127  }
    14128  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14129  {
    14130  VMA_ASSERT(vkMemReq.alignment %
    14131  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14132  }
    14133  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14134  {
    14135  VMA_ASSERT(vkMemReq.alignment %
    14136  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14137  }
    14138 
    14139  // 3. Allocate memory using allocator.
    14140  res = allocator->AllocateMemory(
    14141  vkMemReq,
    14142  requiresDedicatedAllocation,
    14143  prefersDedicatedAllocation,
    14144  *pBuffer, // dedicatedBuffer
    14145  VK_NULL_HANDLE, // dedicatedImage
    14146  *pAllocationCreateInfo,
    14147  VMA_SUBALLOCATION_TYPE_BUFFER,
    14148  pAllocation);
    14149 
    14150 #if VMA_RECORDING_ENABLED
    14151  if(allocator->GetRecorder() != VMA_NULL)
    14152  {
    14153  allocator->GetRecorder()->RecordCreateBuffer(
    14154  allocator->GetCurrentFrameIndex(),
    14155  *pBufferCreateInfo,
    14156  *pAllocationCreateInfo,
    14157  *pAllocation);
    14158  }
    14159 #endif
    14160 
    14161  if(res >= 0)
    14162  {
    14163  // 3. Bind buffer with memory.
    14164  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14165  if(res >= 0)
    14166  {
    14167  // All steps succeeded.
    14168  #if VMA_STATS_STRING_ENABLED
    14169  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14170  #endif
    14171  if(pAllocationInfo != VMA_NULL)
    14172  {
    14173  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14174  }
    14175 
    14176  return VK_SUCCESS;
    14177  }
    14178  allocator->FreeMemory(*pAllocation);
    14179  *pAllocation = VK_NULL_HANDLE;
    14180  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14181  *pBuffer = VK_NULL_HANDLE;
    14182  return res;
    14183  }
    14184  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14185  *pBuffer = VK_NULL_HANDLE;
    14186  return res;
    14187  }
    14188  return res;
    14189 }
    14190 
    14191 void vmaDestroyBuffer(
    14192  VmaAllocator allocator,
    14193  VkBuffer buffer,
    14194  VmaAllocation allocation)
    14195 {
    14196  VMA_ASSERT(allocator);
    14197 
    14198  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14199  {
    14200  return;
    14201  }
    14202 
    14203  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14204 
    14205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14206 
    14207 #if VMA_RECORDING_ENABLED
    14208  if(allocator->GetRecorder() != VMA_NULL)
    14209  {
    14210  allocator->GetRecorder()->RecordDestroyBuffer(
    14211  allocator->GetCurrentFrameIndex(),
    14212  allocation);
    14213  }
    14214 #endif
    14215 
    14216  if(buffer != VK_NULL_HANDLE)
    14217  {
    14218  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14219  }
    14220 
    14221  if(allocation != VK_NULL_HANDLE)
    14222  {
    14223  allocator->FreeMemory(allocation);
    14224  }
    14225 }
    14226 
    14227 VkResult vmaCreateImage(
    14228  VmaAllocator allocator,
    14229  const VkImageCreateInfo* pImageCreateInfo,
    14230  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14231  VkImage* pImage,
    14232  VmaAllocation* pAllocation,
    14233  VmaAllocationInfo* pAllocationInfo)
    14234 {
    14235  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14236 
    14237  VMA_DEBUG_LOG("vmaCreateImage");
    14238 
    14239  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14240 
    14241  *pImage = VK_NULL_HANDLE;
    14242  *pAllocation = VK_NULL_HANDLE;
    14243 
    14244  // 1. Create VkImage.
    14245  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14246  allocator->m_hDevice,
    14247  pImageCreateInfo,
    14248  allocator->GetAllocationCallbacks(),
    14249  pImage);
    14250  if(res >= 0)
    14251  {
    14252  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14253  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14254  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14255 
    14256  // 2. Allocate memory using allocator.
    14257  VkMemoryRequirements vkMemReq = {};
    14258  bool requiresDedicatedAllocation = false;
    14259  bool prefersDedicatedAllocation = false;
    14260  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14261  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14262 
    14263  res = allocator->AllocateMemory(
    14264  vkMemReq,
    14265  requiresDedicatedAllocation,
    14266  prefersDedicatedAllocation,
    14267  VK_NULL_HANDLE, // dedicatedBuffer
    14268  *pImage, // dedicatedImage
    14269  *pAllocationCreateInfo,
    14270  suballocType,
    14271  pAllocation);
    14272 
    14273 #if VMA_RECORDING_ENABLED
    14274  if(allocator->GetRecorder() != VMA_NULL)
    14275  {
    14276  allocator->GetRecorder()->RecordCreateImage(
    14277  allocator->GetCurrentFrameIndex(),
    14278  *pImageCreateInfo,
    14279  *pAllocationCreateInfo,
    14280  *pAllocation);
    14281  }
    14282 #endif
    14283 
    14284  if(res >= 0)
    14285  {
    14286  // 3. Bind image with memory.
    14287  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14288  if(res >= 0)
    14289  {
    14290  // All steps succeeded.
    14291  #if VMA_STATS_STRING_ENABLED
    14292  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14293  #endif
    14294  if(pAllocationInfo != VMA_NULL)
    14295  {
    14296  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14297  }
    14298 
    14299  return VK_SUCCESS;
    14300  }
    14301  allocator->FreeMemory(*pAllocation);
    14302  *pAllocation = VK_NULL_HANDLE;
    14303  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14304  *pImage = VK_NULL_HANDLE;
    14305  return res;
    14306  }
    14307  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14308  *pImage = VK_NULL_HANDLE;
    14309  return res;
    14310  }
    14311  return res;
    14312 }
    14313 
    14314 void vmaDestroyImage(
    14315  VmaAllocator allocator,
    14316  VkImage image,
    14317  VmaAllocation allocation)
    14318 {
    14319  VMA_ASSERT(allocator);
    14320 
    14321  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14322  {
    14323  return;
    14324  }
    14325 
    14326  VMA_DEBUG_LOG("vmaDestroyImage");
    14327 
    14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14329 
    14330 #if VMA_RECORDING_ENABLED
    14331  if(allocator->GetRecorder() != VMA_NULL)
    14332  {
    14333  allocator->GetRecorder()->RecordDestroyImage(
    14334  allocator->GetCurrentFrameIndex(),
    14335  allocation);
    14336  }
    14337 #endif
    14338 
    14339  if(image != VK_NULL_HANDLE)
    14340  {
    14341  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14342  }
    14343  if(allocation != VK_NULL_HANDLE)
    14344  {
    14345  allocator->FreeMemory(allocation);
    14346  }
    14347 }
    14348 
    14349 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1575
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1876
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1628
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1632
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1602
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2194
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1583
    +
    Definition: vk_mem_alloc.h:1606
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2198
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1587
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1829
    -
    Definition: vk_mem_alloc.h:1932
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1575
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2294
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1625
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2539
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2083
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1472
    +
    Definition: vk_mem_alloc.h:1833
    +
    Definition: vk_mem_alloc.h:1936
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1579
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2298
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1629
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2543
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2087
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1476
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2175
    -
    Definition: vk_mem_alloc.h:1909
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1564
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1982
    -
    Definition: vk_mem_alloc.h:1856
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1637
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2111
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2179
    +
    Definition: vk_mem_alloc.h:1913
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1568
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1986
    +
    Definition: vk_mem_alloc.h:1860
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1641
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2115
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1690
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1622
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1694
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1626
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1860
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1864
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1762
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1580
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1761
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2543
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1766
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1584
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1765
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2547
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1654
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1771
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2551
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1966
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2534
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1581
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1506
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1658
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1775
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2555
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1970
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2538
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1585
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1510
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1631
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1635
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2125
    -
    Definition: vk_mem_alloc.h:2119
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1697
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2304
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2129
    +
    Definition: vk_mem_alloc.h:2123
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1701
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2308
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1576
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1600
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2003
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2145
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2181
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1580
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1604
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2007
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2149
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2185
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1562
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2128
    +
    Definition: vk_mem_alloc.h:1566
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2132
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1807
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1811
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2529
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2533
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2547
    -
    Definition: vk_mem_alloc.h:1846
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1990
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1579
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2551
    +
    Definition: vk_mem_alloc.h:1850
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1994
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1583
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1767
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1512
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1771
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1516
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1533
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1537
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1604
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1538
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2549
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1608
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1542
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2553
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1977
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2191
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1981
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2195
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1572
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1750
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2140
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1525
    -
    Definition: vk_mem_alloc.h:2115
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1576
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1754
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2144
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1529
    +
    Definition: vk_mem_alloc.h:2119
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1916
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1763
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1529
    -
    Definition: vk_mem_alloc.h:1940
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2131
    -
    Definition: vk_mem_alloc.h:1855
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1578
    +
    Definition: vk_mem_alloc.h:1920
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1767
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1533
    +
    Definition: vk_mem_alloc.h:1944
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2135
    +
    Definition: vk_mem_alloc.h:1859
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1582
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1972
    -
    Definition: vk_mem_alloc.h:1963
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1976
    +
    Definition: vk_mem_alloc.h:1967
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1753
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1574
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2153
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1640
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2184
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1961
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:1996
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1757
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1578
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2157
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1644
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2188
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1965
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2000
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1678
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1769
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1896
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1762
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1682
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1773
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1900
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1766
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1585
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1610
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1527
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1584
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1589
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1614
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1531
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1588
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2167
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1577
    -
    Definition: vk_mem_alloc.h:1927
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2171
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1581
    +
    Definition: vk_mem_alloc.h:1931
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1618
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2318
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1634
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1762
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1759
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1622
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2322
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1638
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1766
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1763
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2172
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2176
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1936
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2299
    -
    Definition: vk_mem_alloc.h:1947
    -
    Definition: vk_mem_alloc.h:1959
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2545
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1570
    +
    Definition: vk_mem_alloc.h:1940
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2303
    +
    Definition: vk_mem_alloc.h:1951
    +
    Definition: vk_mem_alloc.h:1963
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2549
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1574
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1757
    -
    Definition: vk_mem_alloc.h:1812
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2121
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1761
    +
    Definition: vk_mem_alloc.h:1816
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2125
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1607
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1755
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1582
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1586
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1883
    -
    Definition: vk_mem_alloc.h:1954
    -
    Definition: vk_mem_alloc.h:1839
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2313
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1611
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1759
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1586
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1590
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1887
    +
    Definition: vk_mem_alloc.h:1958
    +
    Definition: vk_mem_alloc.h:1843
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2317
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1560
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1564
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1573
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2100
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2280
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1577
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2104
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2284
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1944
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2065
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1763
    +
    Definition: vk_mem_alloc.h:1948
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2069
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1767
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1594
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1770
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1598
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1774
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2178
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1763
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2182
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1767
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2285
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2289