Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
893 #include <vulkan/vulkan.h>
894 
895 VK_DEFINE_HANDLE(VmaAllocator)
896 
897 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
899  VmaAllocator allocator,
900  uint32_t memoryType,
901  VkDeviceMemory memory,
902  VkDeviceSize size);
904 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
905  VmaAllocator allocator,
906  uint32_t memoryType,
907  VkDeviceMemory memory,
908  VkDeviceSize size);
909 
917 typedef struct VmaDeviceMemoryCallbacks {
923 
953 
956 typedef VkFlags VmaAllocatorCreateFlags;
957 
962 typedef struct VmaVulkanFunctions {
963  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
964  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
965  PFN_vkAllocateMemory vkAllocateMemory;
966  PFN_vkFreeMemory vkFreeMemory;
967  PFN_vkMapMemory vkMapMemory;
968  PFN_vkUnmapMemory vkUnmapMemory;
969  PFN_vkBindBufferMemory vkBindBufferMemory;
970  PFN_vkBindImageMemory vkBindImageMemory;
971  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
972  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
973  PFN_vkCreateBuffer vkCreateBuffer;
974  PFN_vkDestroyBuffer vkDestroyBuffer;
975  PFN_vkCreateImage vkCreateImage;
976  PFN_vkDestroyImage vkDestroyImage;
977  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
978  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
980 
983 {
985  VmaAllocatorCreateFlags flags;
987 
988  VkPhysicalDevice physicalDevice;
990 
991  VkDevice device;
993 
996 
997  const VkAllocationCallbacks* pAllocationCallbacks;
999 
1038  const VkDeviceSize* pHeapSizeLimit;
1052 
1054 VkResult vmaCreateAllocator(
1055  const VmaAllocatorCreateInfo* pCreateInfo,
1056  VmaAllocator* pAllocator);
1057 
1059 void vmaDestroyAllocator(
1060  VmaAllocator allocator);
1061 
1067  VmaAllocator allocator,
1068  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1069 
1075  VmaAllocator allocator,
1076  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1077 
1085  VmaAllocator allocator,
1086  uint32_t memoryTypeIndex,
1087  VkMemoryPropertyFlags* pFlags);
1088 
1098  VmaAllocator allocator,
1099  uint32_t frameIndex);
1100 
1103 typedef struct VmaStatInfo
1104 {
1106  uint32_t blockCount;
1112  VkDeviceSize usedBytes;
1114  VkDeviceSize unusedBytes;
1115  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1116  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1117 } VmaStatInfo;
1118 
1120 typedef struct VmaStats
1121 {
1122  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1123  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1125 } VmaStats;
1126 
1128 void vmaCalculateStats(
1129  VmaAllocator allocator,
1130  VmaStats* pStats);
1131 
1132 #define VMA_STATS_STRING_ENABLED 1
1133 
1134 #if VMA_STATS_STRING_ENABLED
1135 
1137 
1139 void vmaBuildStatsString(
1140  VmaAllocator allocator,
1141  char** ppStatsString,
1142  VkBool32 detailedMap);
1143 
1144 void vmaFreeStatsString(
1145  VmaAllocator allocator,
1146  char* pStatsString);
1147 
1148 #endif // #if VMA_STATS_STRING_ENABLED
1149 
1150 VK_DEFINE_HANDLE(VmaPool)
1151 
1152 typedef enum VmaMemoryUsage
1153 {
1202 } VmaMemoryUsage;
1203 
1218 
1268 
1272 
1274 {
1276  VmaAllocationCreateFlags flags;
1287  VkMemoryPropertyFlags requiredFlags;
1292  VkMemoryPropertyFlags preferredFlags;
1300  uint32_t memoryTypeBits;
1306  VmaPool pool;
1313  void* pUserData;
1315 
1332 VkResult vmaFindMemoryTypeIndex(
1333  VmaAllocator allocator,
1334  uint32_t memoryTypeBits,
1335  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1336  uint32_t* pMemoryTypeIndex);
1337 
1351  VmaAllocator allocator,
1352  const VkBufferCreateInfo* pBufferCreateInfo,
1353  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1354  uint32_t* pMemoryTypeIndex);
1355 
1369  VmaAllocator allocator,
1370  const VkImageCreateInfo* pImageCreateInfo,
1371  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1372  uint32_t* pMemoryTypeIndex);
1373 
1394 
1397 typedef VkFlags VmaPoolCreateFlags;
1398 
1401 typedef struct VmaPoolCreateInfo {
1407  VmaPoolCreateFlags flags;
1412  VkDeviceSize blockSize;
1441 
1444 typedef struct VmaPoolStats {
1447  VkDeviceSize size;
1450  VkDeviceSize unusedSize;
1463  VkDeviceSize unusedRangeSizeMax;
1464 } VmaPoolStats;
1465 
1472 VkResult vmaCreatePool(
1473  VmaAllocator allocator,
1474  const VmaPoolCreateInfo* pCreateInfo,
1475  VmaPool* pPool);
1476 
1479 void vmaDestroyPool(
1480  VmaAllocator allocator,
1481  VmaPool pool);
1482 
1489 void vmaGetPoolStats(
1490  VmaAllocator allocator,
1491  VmaPool pool,
1492  VmaPoolStats* pPoolStats);
1493 
1501  VmaAllocator allocator,
1502  VmaPool pool,
1503  size_t* pLostAllocationCount);
1504 
1505 VK_DEFINE_HANDLE(VmaAllocation)
1506 
1507 
1509 typedef struct VmaAllocationInfo {
1514  uint32_t memoryType;
1523  VkDeviceMemory deviceMemory;
1528  VkDeviceSize offset;
1533  VkDeviceSize size;
1547  void* pUserData;
1549 
1560 VkResult vmaAllocateMemory(
1561  VmaAllocator allocator,
1562  const VkMemoryRequirements* pVkMemoryRequirements,
1563  const VmaAllocationCreateInfo* pCreateInfo,
1564  VmaAllocation* pAllocation,
1565  VmaAllocationInfo* pAllocationInfo);
1566 
1574  VmaAllocator allocator,
1575  VkBuffer buffer,
1576  const VmaAllocationCreateInfo* pCreateInfo,
1577  VmaAllocation* pAllocation,
1578  VmaAllocationInfo* pAllocationInfo);
1579 
1581 VkResult vmaAllocateMemoryForImage(
1582  VmaAllocator allocator,
1583  VkImage image,
1584  const VmaAllocationCreateInfo* pCreateInfo,
1585  VmaAllocation* pAllocation,
1586  VmaAllocationInfo* pAllocationInfo);
1587 
1589 void vmaFreeMemory(
1590  VmaAllocator allocator,
1591  VmaAllocation allocation);
1592 
1610  VmaAllocator allocator,
1611  VmaAllocation allocation,
1612  VmaAllocationInfo* pAllocationInfo);
1613 
1628 VkBool32 vmaTouchAllocation(
1629  VmaAllocator allocator,
1630  VmaAllocation allocation);
1631 
1646  VmaAllocator allocator,
1647  VmaAllocation allocation,
1648  void* pUserData);
1649 
1661  VmaAllocator allocator,
1662  VmaAllocation* pAllocation);
1663 
1698 VkResult vmaMapMemory(
1699  VmaAllocator allocator,
1700  VmaAllocation allocation,
1701  void** ppData);
1702 
1707 void vmaUnmapMemory(
1708  VmaAllocator allocator,
1709  VmaAllocation allocation);
1710 
1712 typedef struct VmaDefragmentationInfo {
1717  VkDeviceSize maxBytesToMove;
1724 
1726 typedef struct VmaDefragmentationStats {
1728  VkDeviceSize bytesMoved;
1730  VkDeviceSize bytesFreed;
1736 
1819 VkResult vmaDefragment(
1820  VmaAllocator allocator,
1821  VmaAllocation* pAllocations,
1822  size_t allocationCount,
1823  VkBool32* pAllocationsChanged,
1824  const VmaDefragmentationInfo *pDefragmentationInfo,
1825  VmaDefragmentationStats* pDefragmentationStats);
1826 
1853 VkResult vmaCreateBuffer(
1854  VmaAllocator allocator,
1855  const VkBufferCreateInfo* pBufferCreateInfo,
1856  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1857  VkBuffer* pBuffer,
1858  VmaAllocation* pAllocation,
1859  VmaAllocationInfo* pAllocationInfo);
1860 
1872 void vmaDestroyBuffer(
1873  VmaAllocator allocator,
1874  VkBuffer buffer,
1875  VmaAllocation allocation);
1876 
1878 VkResult vmaCreateImage(
1879  VmaAllocator allocator,
1880  const VkImageCreateInfo* pImageCreateInfo,
1881  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1882  VkImage* pImage,
1883  VmaAllocation* pAllocation,
1884  VmaAllocationInfo* pAllocationInfo);
1885 
1897 void vmaDestroyImage(
1898  VmaAllocator allocator,
1899  VkImage image,
1900  VmaAllocation allocation);
1901 
1902 #ifdef __cplusplus
1903 }
1904 #endif
1905 
1906 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1907 
1908 // For Visual Studio IntelliSense.
1909 #ifdef __INTELLISENSE__
1910 #define VMA_IMPLEMENTATION
1911 #endif
1912 
1913 #ifdef VMA_IMPLEMENTATION
1914 #undef VMA_IMPLEMENTATION
1915 
1916 #include <cstdint>
1917 #include <cstdlib>
1918 #include <cstring>
1919 
1920 /*******************************************************************************
1921 CONFIGURATION SECTION
1922 
1923 Define some of these macros before each #include of this header or change them
1924 here if you need other then default behavior depending on your environment.
1925 */
1926 
1927 /*
1928 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1929 internally, like:
1930 
1931  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1932 
1933 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1934 VmaAllocatorCreateInfo::pVulkanFunctions.
1935 */
1936 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1937 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1938 #endif
1939 
1940 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1941 //#define VMA_USE_STL_CONTAINERS 1
1942 
1943 /* Set this macro to 1 to make the library including and using STL containers:
1944 std::pair, std::vector, std::list, std::unordered_map.
1945 
1946 Set it to 0 or undefined to make the library using its own implementation of
1947 the containers.
1948 */
1949 #if VMA_USE_STL_CONTAINERS
1950  #define VMA_USE_STL_VECTOR 1
1951  #define VMA_USE_STL_UNORDERED_MAP 1
1952  #define VMA_USE_STL_LIST 1
1953 #endif
1954 
1955 #if VMA_USE_STL_VECTOR
1956  #include <vector>
1957 #endif
1958 
1959 #if VMA_USE_STL_UNORDERED_MAP
1960  #include <unordered_map>
1961 #endif
1962 
1963 #if VMA_USE_STL_LIST
1964  #include <list>
1965 #endif
1966 
1967 /*
1968 Following headers are used in this CONFIGURATION section only, so feel free to
1969 remove them if not needed.
1970 */
1971 #include <cassert> // for assert
1972 #include <algorithm> // for min, max
1973 #include <mutex> // for std::mutex
1974 #include <atomic> // for std::atomic
1975 
1976 #if !defined(_WIN32) && !defined(__APPLE__)
1977  #include <malloc.h> // for aligned_alloc()
1978 #endif
1979 
1980 #ifndef VMA_NULL
1981  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1982  #define VMA_NULL nullptr
1983 #endif
1984 
1985 #if defined(__APPLE__) || defined(__ANDROID__)
1986 #include <cstdlib>
1987 void *aligned_alloc(size_t alignment, size_t size)
1988 {
1989  // alignment must be >= sizeof(void*)
1990  if(alignment < sizeof(void*))
1991  {
1992  alignment = sizeof(void*);
1993  }
1994 
1995  void *pointer;
1996  if(posix_memalign(&pointer, alignment, size) == 0)
1997  return pointer;
1998  return VMA_NULL;
1999 }
2000 #endif
2001 
2002 // Normal assert to check for programmer's errors, especially in Debug configuration.
2003 #ifndef VMA_ASSERT
2004  #ifdef _DEBUG
2005  #define VMA_ASSERT(expr) assert(expr)
2006  #else
2007  #define VMA_ASSERT(expr)
2008  #endif
2009 #endif
2010 
2011 // Assert that will be called very often, like inside data structures e.g. operator[].
2012 // Making it non-empty can make program slow.
2013 #ifndef VMA_HEAVY_ASSERT
2014  #ifdef _DEBUG
2015  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2016  #else
2017  #define VMA_HEAVY_ASSERT(expr)
2018  #endif
2019 #endif
2020 
2021 #ifndef VMA_ALIGN_OF
2022  #define VMA_ALIGN_OF(type) (__alignof(type))
2023 #endif
2024 
2025 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2026  #if defined(_WIN32)
2027  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2028  #else
2029  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2030  #endif
2031 #endif
2032 
2033 #ifndef VMA_SYSTEM_FREE
2034  #if defined(_WIN32)
2035  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2036  #else
2037  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2038  #endif
2039 #endif
2040 
2041 #ifndef VMA_MIN
2042  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2043 #endif
2044 
2045 #ifndef VMA_MAX
2046  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2047 #endif
2048 
2049 #ifndef VMA_SWAP
2050  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2051 #endif
2052 
2053 #ifndef VMA_SORT
2054  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2055 #endif
2056 
2057 #ifndef VMA_DEBUG_LOG
2058  #define VMA_DEBUG_LOG(format, ...)
2059  /*
2060  #define VMA_DEBUG_LOG(format, ...) do { \
2061  printf(format, __VA_ARGS__); \
2062  printf("\n"); \
2063  } while(false)
2064  */
2065 #endif
2066 
2067 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2068 #if VMA_STATS_STRING_ENABLED
2069  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2070  {
2071  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2072  }
2073  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2074  {
2075  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2076  }
2077  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2078  {
2079  snprintf(outStr, strLen, "%p", ptr);
2080  }
2081 #endif
2082 
2083 #ifndef VMA_MUTEX
2084  class VmaMutex
2085  {
2086  public:
2087  VmaMutex() { }
2088  ~VmaMutex() { }
2089  void Lock() { m_Mutex.lock(); }
2090  void Unlock() { m_Mutex.unlock(); }
2091  private:
2092  std::mutex m_Mutex;
2093  };
2094  #define VMA_MUTEX VmaMutex
2095 #endif
2096 
2097 /*
2098 If providing your own implementation, you need to implement a subset of std::atomic:
2099 
2100 - Constructor(uint32_t desired)
2101 - uint32_t load() const
2102 - void store(uint32_t desired)
2103 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2104 */
2105 #ifndef VMA_ATOMIC_UINT32
2106  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2107 #endif
2108 
2109 #ifndef VMA_BEST_FIT
2110 
2122  #define VMA_BEST_FIT (1)
2123 #endif
2124 
2125 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2126 
2130  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2131 #endif
2132 
2133 #ifndef VMA_DEBUG_ALIGNMENT
2134 
2138  #define VMA_DEBUG_ALIGNMENT (1)
2139 #endif
2140 
2141 #ifndef VMA_DEBUG_MARGIN
2142 
2146  #define VMA_DEBUG_MARGIN (0)
2147 #endif
2148 
2149 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2150 
2154  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2155 #endif
2156 
2157 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2158 
2162  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2163 #endif
2164 
2165 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2166  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2168 #endif
2169 
2170 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2171  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2173 #endif
2174 
2175 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2176 
2177 /*******************************************************************************
2178 END OF CONFIGURATION
2179 */
2180 
2181 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2182  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2183 
2184 // Returns number of bits set to 1 in (v).
2185 static inline uint32_t VmaCountBitsSet(uint32_t v)
2186 {
2187  uint32_t c = v - ((v >> 1) & 0x55555555);
2188  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2189  c = ((c >> 4) + c) & 0x0F0F0F0F;
2190  c = ((c >> 8) + c) & 0x00FF00FF;
2191  c = ((c >> 16) + c) & 0x0000FFFF;
2192  return c;
2193 }
2194 
2195 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2196 // Use types like uint32_t, uint64_t as T.
2197 template <typename T>
2198 static inline T VmaAlignUp(T val, T align)
2199 {
2200  return (val + align - 1) / align * align;
2201 }
2202 
2203 // Division with mathematical rounding to nearest number.
2204 template <typename T>
2205 inline T VmaRoundDiv(T x, T y)
2206 {
2207  return (x + (y / (T)2)) / y;
2208 }
2209 
2210 #ifndef VMA_SORT
2211 
2212 template<typename Iterator, typename Compare>
2213 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2214 {
2215  Iterator centerValue = end; --centerValue;
2216  Iterator insertIndex = beg;
2217  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2218  {
2219  if(cmp(*memTypeIndex, *centerValue))
2220  {
2221  if(insertIndex != memTypeIndex)
2222  {
2223  VMA_SWAP(*memTypeIndex, *insertIndex);
2224  }
2225  ++insertIndex;
2226  }
2227  }
2228  if(insertIndex != centerValue)
2229  {
2230  VMA_SWAP(*insertIndex, *centerValue);
2231  }
2232  return insertIndex;
2233 }
2234 
2235 template<typename Iterator, typename Compare>
2236 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2237 {
2238  if(beg < end)
2239  {
2240  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2241  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2242  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2243  }
2244 }
2245 
2246 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2247 
2248 #endif // #ifndef VMA_SORT
2249 
2250 /*
2251 Returns true if two memory blocks occupy overlapping pages.
2252 ResourceA must be in less memory offset than ResourceB.
2253 
2254 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2255 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2256 */
2257 static inline bool VmaBlocksOnSamePage(
2258  VkDeviceSize resourceAOffset,
2259  VkDeviceSize resourceASize,
2260  VkDeviceSize resourceBOffset,
2261  VkDeviceSize pageSize)
2262 {
2263  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2264  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2265  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2266  VkDeviceSize resourceBStart = resourceBOffset;
2267  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2268  return resourceAEndPage == resourceBStartPage;
2269 }
2270 
2271 enum VmaSuballocationType
2272 {
2273  VMA_SUBALLOCATION_TYPE_FREE = 0,
2274  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2275  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2276  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2277  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2278  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2279  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2280 };
2281 
2282 /*
2283 Returns true if given suballocation types could conflict and must respect
2284 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2285 or linear image and another one is optimal image. If type is unknown, behave
2286 conservatively.
2287 */
2288 static inline bool VmaIsBufferImageGranularityConflict(
2289  VmaSuballocationType suballocType1,
2290  VmaSuballocationType suballocType2)
2291 {
2292  if(suballocType1 > suballocType2)
2293  {
2294  VMA_SWAP(suballocType1, suballocType2);
2295  }
2296 
2297  switch(suballocType1)
2298  {
2299  case VMA_SUBALLOCATION_TYPE_FREE:
2300  return false;
2301  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2302  return true;
2303  case VMA_SUBALLOCATION_TYPE_BUFFER:
2304  return
2305  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2306  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2307  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2308  return
2309  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2310  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2311  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2312  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2313  return
2314  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2315  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2316  return false;
2317  default:
2318  VMA_ASSERT(0);
2319  return true;
2320  }
2321 }
2322 
2323 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2324 struct VmaMutexLock
2325 {
2326 public:
2327  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2328  m_pMutex(useMutex ? &mutex : VMA_NULL)
2329  {
2330  if(m_pMutex)
2331  {
2332  m_pMutex->Lock();
2333  }
2334  }
2335 
2336  ~VmaMutexLock()
2337  {
2338  if(m_pMutex)
2339  {
2340  m_pMutex->Unlock();
2341  }
2342  }
2343 
2344 private:
2345  VMA_MUTEX* m_pMutex;
2346 };
2347 
2348 #if VMA_DEBUG_GLOBAL_MUTEX
2349  static VMA_MUTEX gDebugGlobalMutex;
2350  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2351 #else
2352  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2353 #endif
2354 
2355 // Minimum size of a free suballocation to register it in the free suballocation collection.
2356 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2357 
2358 /*
2359 Performs binary search and returns iterator to first element that is greater or
2360 equal to (key), according to comparison (cmp).
2361 
2362 Cmp should return true if first argument is less than second argument.
2363 
2364 Returned value is the found element, if present in the collection or place where
2365 new element with value (key) should be inserted.
2366 */
2367 template <typename IterT, typename KeyT, typename CmpT>
2368 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2369 {
2370  size_t down = 0, up = (end - beg);
2371  while(down < up)
2372  {
2373  const size_t mid = (down + up) / 2;
2374  if(cmp(*(beg+mid), key))
2375  {
2376  down = mid + 1;
2377  }
2378  else
2379  {
2380  up = mid;
2381  }
2382  }
2383  return beg + down;
2384 }
2385 
2387 // Memory allocation
2388 
2389 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2390 {
2391  if((pAllocationCallbacks != VMA_NULL) &&
2392  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2393  {
2394  return (*pAllocationCallbacks->pfnAllocation)(
2395  pAllocationCallbacks->pUserData,
2396  size,
2397  alignment,
2398  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2399  }
2400  else
2401  {
2402  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2403  }
2404 }
2405 
2406 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2407 {
2408  if((pAllocationCallbacks != VMA_NULL) &&
2409  (pAllocationCallbacks->pfnFree != VMA_NULL))
2410  {
2411  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2412  }
2413  else
2414  {
2415  VMA_SYSTEM_FREE(ptr);
2416  }
2417 }
2418 
2419 template<typename T>
2420 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2421 {
2422  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2423 }
2424 
2425 template<typename T>
2426 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2427 {
2428  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2429 }
2430 
2431 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2432 
2433 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2434 
2435 template<typename T>
2436 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2437 {
2438  ptr->~T();
2439  VmaFree(pAllocationCallbacks, ptr);
2440 }
2441 
2442 template<typename T>
2443 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2444 {
2445  if(ptr != VMA_NULL)
2446  {
2447  for(size_t i = count; i--; )
2448  {
2449  ptr[i].~T();
2450  }
2451  VmaFree(pAllocationCallbacks, ptr);
2452  }
2453 }
2454 
2455 // STL-compatible allocator.
2456 template<typename T>
2457 class VmaStlAllocator
2458 {
2459 public:
2460  const VkAllocationCallbacks* const m_pCallbacks;
2461  typedef T value_type;
2462 
2463  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2464  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2465 
2466  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2467  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2468 
2469  template<typename U>
2470  bool operator==(const VmaStlAllocator<U>& rhs) const
2471  {
2472  return m_pCallbacks == rhs.m_pCallbacks;
2473  }
2474  template<typename U>
2475  bool operator!=(const VmaStlAllocator<U>& rhs) const
2476  {
2477  return m_pCallbacks != rhs.m_pCallbacks;
2478  }
2479 
2480  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2481 };
2482 
2483 #if VMA_USE_STL_VECTOR
2484 
2485 #define VmaVector std::vector
2486 
2487 template<typename T, typename allocatorT>
2488 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2489 {
2490  vec.insert(vec.begin() + index, item);
2491 }
2492 
2493 template<typename T, typename allocatorT>
2494 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2495 {
2496  vec.erase(vec.begin() + index);
2497 }
2498 
2499 #else // #if VMA_USE_STL_VECTOR
2500 
2501 /* Class with interface compatible with subset of std::vector.
2502 T must be POD because constructors and destructors are not called and memcpy is
2503 used for these objects. */
2504 template<typename T, typename AllocatorT>
2505 class VmaVector
2506 {
2507 public:
2508  typedef T value_type;
2509 
2510  VmaVector(const AllocatorT& allocator) :
2511  m_Allocator(allocator),
2512  m_pArray(VMA_NULL),
2513  m_Count(0),
2514  m_Capacity(0)
2515  {
2516  }
2517 
2518  VmaVector(size_t count, const AllocatorT& allocator) :
2519  m_Allocator(allocator),
2520  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2521  m_Count(count),
2522  m_Capacity(count)
2523  {
2524  }
2525 
2526  VmaVector(const VmaVector<T, AllocatorT>& src) :
2527  m_Allocator(src.m_Allocator),
2528  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2529  m_Count(src.m_Count),
2530  m_Capacity(src.m_Count)
2531  {
2532  if(m_Count != 0)
2533  {
2534  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2535  }
2536  }
2537 
2538  ~VmaVector()
2539  {
2540  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2541  }
2542 
2543  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2544  {
2545  if(&rhs != this)
2546  {
2547  resize(rhs.m_Count);
2548  if(m_Count != 0)
2549  {
2550  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2551  }
2552  }
2553  return *this;
2554  }
2555 
2556  bool empty() const { return m_Count == 0; }
2557  size_t size() const { return m_Count; }
2558  T* data() { return m_pArray; }
2559  const T* data() const { return m_pArray; }
2560 
2561  T& operator[](size_t index)
2562  {
2563  VMA_HEAVY_ASSERT(index < m_Count);
2564  return m_pArray[index];
2565  }
2566  const T& operator[](size_t index) const
2567  {
2568  VMA_HEAVY_ASSERT(index < m_Count);
2569  return m_pArray[index];
2570  }
2571 
2572  T& front()
2573  {
2574  VMA_HEAVY_ASSERT(m_Count > 0);
2575  return m_pArray[0];
2576  }
2577  const T& front() const
2578  {
2579  VMA_HEAVY_ASSERT(m_Count > 0);
2580  return m_pArray[0];
2581  }
2582  T& back()
2583  {
2584  VMA_HEAVY_ASSERT(m_Count > 0);
2585  return m_pArray[m_Count - 1];
2586  }
2587  const T& back() const
2588  {
2589  VMA_HEAVY_ASSERT(m_Count > 0);
2590  return m_pArray[m_Count - 1];
2591  }
2592 
2593  void reserve(size_t newCapacity, bool freeMemory = false)
2594  {
2595  newCapacity = VMA_MAX(newCapacity, m_Count);
2596 
2597  if((newCapacity < m_Capacity) && !freeMemory)
2598  {
2599  newCapacity = m_Capacity;
2600  }
2601 
2602  if(newCapacity != m_Capacity)
2603  {
2604  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2605  if(m_Count != 0)
2606  {
2607  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2608  }
2609  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2610  m_Capacity = newCapacity;
2611  m_pArray = newArray;
2612  }
2613  }
2614 
2615  void resize(size_t newCount, bool freeMemory = false)
2616  {
2617  size_t newCapacity = m_Capacity;
2618  if(newCount > m_Capacity)
2619  {
2620  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2621  }
2622  else if(freeMemory)
2623  {
2624  newCapacity = newCount;
2625  }
2626 
2627  if(newCapacity != m_Capacity)
2628  {
2629  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2630  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2631  if(elementsToCopy != 0)
2632  {
2633  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2634  }
2635  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2636  m_Capacity = newCapacity;
2637  m_pArray = newArray;
2638  }
2639 
2640  m_Count = newCount;
2641  }
2642 
2643  void clear(bool freeMemory = false)
2644  {
2645  resize(0, freeMemory);
2646  }
2647 
2648  void insert(size_t index, const T& src)
2649  {
2650  VMA_HEAVY_ASSERT(index <= m_Count);
2651  const size_t oldCount = size();
2652  resize(oldCount + 1);
2653  if(index < oldCount)
2654  {
2655  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2656  }
2657  m_pArray[index] = src;
2658  }
2659 
2660  void remove(size_t index)
2661  {
2662  VMA_HEAVY_ASSERT(index < m_Count);
2663  const size_t oldCount = size();
2664  if(index < oldCount - 1)
2665  {
2666  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2667  }
2668  resize(oldCount - 1);
2669  }
2670 
2671  void push_back(const T& src)
2672  {
2673  const size_t newIndex = size();
2674  resize(newIndex + 1);
2675  m_pArray[newIndex] = src;
2676  }
2677 
2678  void pop_back()
2679  {
2680  VMA_HEAVY_ASSERT(m_Count > 0);
2681  resize(size() - 1);
2682  }
2683 
2684  void push_front(const T& src)
2685  {
2686  insert(0, src);
2687  }
2688 
2689  void pop_front()
2690  {
2691  VMA_HEAVY_ASSERT(m_Count > 0);
2692  remove(0);
2693  }
2694 
2695  typedef T* iterator;
2696 
2697  iterator begin() { return m_pArray; }
2698  iterator end() { return m_pArray + m_Count; }
2699 
2700 private:
2701  AllocatorT m_Allocator;
2702  T* m_pArray;
2703  size_t m_Count;
2704  size_t m_Capacity;
2705 };
2706 
2707 template<typename T, typename allocatorT>
2708 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2709 {
2710  vec.insert(index, item);
2711 }
2712 
2713 template<typename T, typename allocatorT>
2714 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2715 {
2716  vec.remove(index);
2717 }
2718 
2719 #endif // #if VMA_USE_STL_VECTOR
2720 
2721 template<typename CmpLess, typename VectorT>
2722 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2723 {
2724  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2725  vector.data(),
2726  vector.data() + vector.size(),
2727  value,
2728  CmpLess()) - vector.data();
2729  VmaVectorInsert(vector, indexToInsert, value);
2730  return indexToInsert;
2731 }
2732 
2733 template<typename CmpLess, typename VectorT>
2734 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2735 {
2736  CmpLess comparator;
2737  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2738  vector.begin(),
2739  vector.end(),
2740  value,
2741  comparator);
2742  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2743  {
2744  size_t indexToRemove = it - vector.begin();
2745  VmaVectorRemove(vector, indexToRemove);
2746  return true;
2747  }
2748  return false;
2749 }
2750 
2751 template<typename CmpLess, typename VectorT>
2752 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2753 {
2754  CmpLess comparator;
2755  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2756  vector.data(),
2757  vector.data() + vector.size(),
2758  value,
2759  comparator);
2760  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2761  {
2762  return it - vector.begin();
2763  }
2764  else
2765  {
2766  return vector.size();
2767  }
2768 }
2769 
2771 // class VmaPoolAllocator
2772 
2773 /*
2774 Allocator for objects of type T using a list of arrays (pools) to speed up
2775 allocation. Number of elements that can be allocated is not bounded because
2776 allocator can create multiple blocks.
2777 */
2778 template<typename T>
2779 class VmaPoolAllocator
2780 {
2781 public:
2782  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2783  ~VmaPoolAllocator();
2784  void Clear();
2785  T* Alloc();
2786  void Free(T* ptr);
2787 
2788 private:
2789  union Item
2790  {
2791  uint32_t NextFreeIndex;
2792  T Value;
2793  };
2794 
2795  struct ItemBlock
2796  {
2797  Item* pItems;
2798  uint32_t FirstFreeIndex;
2799  };
2800 
2801  const VkAllocationCallbacks* m_pAllocationCallbacks;
2802  size_t m_ItemsPerBlock;
2803  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2804 
2805  ItemBlock& CreateNewBlock();
2806 };
2807 
2808 template<typename T>
2809 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2810  m_pAllocationCallbacks(pAllocationCallbacks),
2811  m_ItemsPerBlock(itemsPerBlock),
2812  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2813 {
2814  VMA_ASSERT(itemsPerBlock > 0);
2815 }
2816 
2817 template<typename T>
2818 VmaPoolAllocator<T>::~VmaPoolAllocator()
2819 {
2820  Clear();
2821 }
2822 
2823 template<typename T>
2824 void VmaPoolAllocator<T>::Clear()
2825 {
2826  for(size_t i = m_ItemBlocks.size(); i--; )
2827  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2828  m_ItemBlocks.clear();
2829 }
2830 
2831 template<typename T>
2832 T* VmaPoolAllocator<T>::Alloc()
2833 {
2834  for(size_t i = m_ItemBlocks.size(); i--; )
2835  {
2836  ItemBlock& block = m_ItemBlocks[i];
2837  // This block has some free items: Use first one.
2838  if(block.FirstFreeIndex != UINT32_MAX)
2839  {
2840  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2841  block.FirstFreeIndex = pItem->NextFreeIndex;
2842  return &pItem->Value;
2843  }
2844  }
2845 
2846  // No block has free item: Create new one and use it.
2847  ItemBlock& newBlock = CreateNewBlock();
2848  Item* const pItem = &newBlock.pItems[0];
2849  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2850  return &pItem->Value;
2851 }
2852 
2853 template<typename T>
2854 void VmaPoolAllocator<T>::Free(T* ptr)
2855 {
2856  // Search all memory blocks to find ptr.
2857  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2858  {
2859  ItemBlock& block = m_ItemBlocks[i];
2860 
2861  // Casting to union.
2862  Item* pItemPtr;
2863  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2864 
2865  // Check if pItemPtr is in address range of this block.
2866  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2867  {
2868  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2869  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2870  block.FirstFreeIndex = index;
2871  return;
2872  }
2873  }
2874  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2875 }
2876 
2877 template<typename T>
2878 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2879 {
2880  ItemBlock newBlock = {
2881  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2882 
2883  m_ItemBlocks.push_back(newBlock);
2884 
2885  // Setup singly-linked list of all free items in this block.
2886  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2887  newBlock.pItems[i].NextFreeIndex = i + 1;
2888  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2889  return m_ItemBlocks.back();
2890 }
2891 
2893 // class VmaRawList, VmaList
2894 
2895 #if VMA_USE_STL_LIST
2896 
2897 #define VmaList std::list
2898 
2899 #else // #if VMA_USE_STL_LIST
2900 
2901 template<typename T>
2902 struct VmaListItem
2903 {
2904  VmaListItem* pPrev;
2905  VmaListItem* pNext;
2906  T Value;
2907 };
2908 
2909 // Doubly linked list.
2910 template<typename T>
2911 class VmaRawList
2912 {
2913 public:
2914  typedef VmaListItem<T> ItemType;
2915 
2916  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2917  ~VmaRawList();
2918  void Clear();
2919 
2920  size_t GetCount() const { return m_Count; }
2921  bool IsEmpty() const { return m_Count == 0; }
2922 
2923  ItemType* Front() { return m_pFront; }
2924  const ItemType* Front() const { return m_pFront; }
2925  ItemType* Back() { return m_pBack; }
2926  const ItemType* Back() const { return m_pBack; }
2927 
2928  ItemType* PushBack();
2929  ItemType* PushFront();
2930  ItemType* PushBack(const T& value);
2931  ItemType* PushFront(const T& value);
2932  void PopBack();
2933  void PopFront();
2934 
2935  // Item can be null - it means PushBack.
2936  ItemType* InsertBefore(ItemType* pItem);
2937  // Item can be null - it means PushFront.
2938  ItemType* InsertAfter(ItemType* pItem);
2939 
2940  ItemType* InsertBefore(ItemType* pItem, const T& value);
2941  ItemType* InsertAfter(ItemType* pItem, const T& value);
2942 
2943  void Remove(ItemType* pItem);
2944 
2945 private:
2946  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2947  VmaPoolAllocator<ItemType> m_ItemAllocator;
2948  ItemType* m_pFront;
2949  ItemType* m_pBack;
2950  size_t m_Count;
2951 
2952  // Declared not defined, to block copy constructor and assignment operator.
2953  VmaRawList(const VmaRawList<T>& src);
2954  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2955 };
2956 
2957 template<typename T>
2958 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2959  m_pAllocationCallbacks(pAllocationCallbacks),
2960  m_ItemAllocator(pAllocationCallbacks, 128),
2961  m_pFront(VMA_NULL),
2962  m_pBack(VMA_NULL),
2963  m_Count(0)
2964 {
2965 }
2966 
2967 template<typename T>
2968 VmaRawList<T>::~VmaRawList()
2969 {
2970  // Intentionally not calling Clear, because that would be unnecessary
2971  // computations to return all items to m_ItemAllocator as free.
2972 }
2973 
2974 template<typename T>
2975 void VmaRawList<T>::Clear()
2976 {
2977  if(IsEmpty() == false)
2978  {
2979  ItemType* pItem = m_pBack;
2980  while(pItem != VMA_NULL)
2981  {
2982  ItemType* const pPrevItem = pItem->pPrev;
2983  m_ItemAllocator.Free(pItem);
2984  pItem = pPrevItem;
2985  }
2986  m_pFront = VMA_NULL;
2987  m_pBack = VMA_NULL;
2988  m_Count = 0;
2989  }
2990 }
2991 
2992 template<typename T>
2993 VmaListItem<T>* VmaRawList<T>::PushBack()
2994 {
2995  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2996  pNewItem->pNext = VMA_NULL;
2997  if(IsEmpty())
2998  {
2999  pNewItem->pPrev = VMA_NULL;
3000  m_pFront = pNewItem;
3001  m_pBack = pNewItem;
3002  m_Count = 1;
3003  }
3004  else
3005  {
3006  pNewItem->pPrev = m_pBack;
3007  m_pBack->pNext = pNewItem;
3008  m_pBack = pNewItem;
3009  ++m_Count;
3010  }
3011  return pNewItem;
3012 }
3013 
3014 template<typename T>
3015 VmaListItem<T>* VmaRawList<T>::PushFront()
3016 {
3017  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3018  pNewItem->pPrev = VMA_NULL;
3019  if(IsEmpty())
3020  {
3021  pNewItem->pNext = VMA_NULL;
3022  m_pFront = pNewItem;
3023  m_pBack = pNewItem;
3024  m_Count = 1;
3025  }
3026  else
3027  {
3028  pNewItem->pNext = m_pFront;
3029  m_pFront->pPrev = pNewItem;
3030  m_pFront = pNewItem;
3031  ++m_Count;
3032  }
3033  return pNewItem;
3034 }
3035 
3036 template<typename T>
3037 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3038 {
3039  ItemType* const pNewItem = PushBack();
3040  pNewItem->Value = value;
3041  return pNewItem;
3042 }
3043 
3044 template<typename T>
3045 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3046 {
3047  ItemType* const pNewItem = PushFront();
3048  pNewItem->Value = value;
3049  return pNewItem;
3050 }
3051 
3052 template<typename T>
3053 void VmaRawList<T>::PopBack()
3054 {
3055  VMA_HEAVY_ASSERT(m_Count > 0);
3056  ItemType* const pBackItem = m_pBack;
3057  ItemType* const pPrevItem = pBackItem->pPrev;
3058  if(pPrevItem != VMA_NULL)
3059  {
3060  pPrevItem->pNext = VMA_NULL;
3061  }
3062  m_pBack = pPrevItem;
3063  m_ItemAllocator.Free(pBackItem);
3064  --m_Count;
3065 }
3066 
3067 template<typename T>
3068 void VmaRawList<T>::PopFront()
3069 {
3070  VMA_HEAVY_ASSERT(m_Count > 0);
3071  ItemType* const pFrontItem = m_pFront;
3072  ItemType* const pNextItem = pFrontItem->pNext;
3073  if(pNextItem != VMA_NULL)
3074  {
3075  pNextItem->pPrev = VMA_NULL;
3076  }
3077  m_pFront = pNextItem;
3078  m_ItemAllocator.Free(pFrontItem);
3079  --m_Count;
3080 }
3081 
3082 template<typename T>
3083 void VmaRawList<T>::Remove(ItemType* pItem)
3084 {
3085  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3086  VMA_HEAVY_ASSERT(m_Count > 0);
3087 
3088  if(pItem->pPrev != VMA_NULL)
3089  {
3090  pItem->pPrev->pNext = pItem->pNext;
3091  }
3092  else
3093  {
3094  VMA_HEAVY_ASSERT(m_pFront == pItem);
3095  m_pFront = pItem->pNext;
3096  }
3097 
3098  if(pItem->pNext != VMA_NULL)
3099  {
3100  pItem->pNext->pPrev = pItem->pPrev;
3101  }
3102  else
3103  {
3104  VMA_HEAVY_ASSERT(m_pBack == pItem);
3105  m_pBack = pItem->pPrev;
3106  }
3107 
3108  m_ItemAllocator.Free(pItem);
3109  --m_Count;
3110 }
3111 
3112 template<typename T>
3113 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3114 {
3115  if(pItem != VMA_NULL)
3116  {
3117  ItemType* const prevItem = pItem->pPrev;
3118  ItemType* const newItem = m_ItemAllocator.Alloc();
3119  newItem->pPrev = prevItem;
3120  newItem->pNext = pItem;
3121  pItem->pPrev = newItem;
3122  if(prevItem != VMA_NULL)
3123  {
3124  prevItem->pNext = newItem;
3125  }
3126  else
3127  {
3128  VMA_HEAVY_ASSERT(m_pFront == pItem);
3129  m_pFront = newItem;
3130  }
3131  ++m_Count;
3132  return newItem;
3133  }
3134  else
3135  return PushBack();
3136 }
3137 
3138 template<typename T>
3139 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3140 {
3141  if(pItem != VMA_NULL)
3142  {
3143  ItemType* const nextItem = pItem->pNext;
3144  ItemType* const newItem = m_ItemAllocator.Alloc();
3145  newItem->pNext = nextItem;
3146  newItem->pPrev = pItem;
3147  pItem->pNext = newItem;
3148  if(nextItem != VMA_NULL)
3149  {
3150  nextItem->pPrev = newItem;
3151  }
3152  else
3153  {
3154  VMA_HEAVY_ASSERT(m_pBack == pItem);
3155  m_pBack = newItem;
3156  }
3157  ++m_Count;
3158  return newItem;
3159  }
3160  else
3161  return PushFront();
3162 }
3163 
3164 template<typename T>
3165 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3166 {
3167  ItemType* const newItem = InsertBefore(pItem);
3168  newItem->Value = value;
3169  return newItem;
3170 }
3171 
3172 template<typename T>
3173 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3174 {
3175  ItemType* const newItem = InsertAfter(pItem);
3176  newItem->Value = value;
3177  return newItem;
3178 }
3179 
3180 template<typename T, typename AllocatorT>
3181 class VmaList
3182 {
3183 public:
3184  class iterator
3185  {
3186  public:
3187  iterator() :
3188  m_pList(VMA_NULL),
3189  m_pItem(VMA_NULL)
3190  {
3191  }
3192 
3193  T& operator*() const
3194  {
3195  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3196  return m_pItem->Value;
3197  }
3198  T* operator->() const
3199  {
3200  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3201  return &m_pItem->Value;
3202  }
3203 
3204  iterator& operator++()
3205  {
3206  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3207  m_pItem = m_pItem->pNext;
3208  return *this;
3209  }
3210  iterator& operator--()
3211  {
3212  if(m_pItem != VMA_NULL)
3213  {
3214  m_pItem = m_pItem->pPrev;
3215  }
3216  else
3217  {
3218  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3219  m_pItem = m_pList->Back();
3220  }
3221  return *this;
3222  }
3223 
3224  iterator operator++(int)
3225  {
3226  iterator result = *this;
3227  ++*this;
3228  return result;
3229  }
3230  iterator operator--(int)
3231  {
3232  iterator result = *this;
3233  --*this;
3234  return result;
3235  }
3236 
3237  bool operator==(const iterator& rhs) const
3238  {
3239  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3240  return m_pItem == rhs.m_pItem;
3241  }
3242  bool operator!=(const iterator& rhs) const
3243  {
3244  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3245  return m_pItem != rhs.m_pItem;
3246  }
3247 
3248  private:
3249  VmaRawList<T>* m_pList;
3250  VmaListItem<T>* m_pItem;
3251 
3252  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3253  m_pList(pList),
3254  m_pItem(pItem)
3255  {
3256  }
3257 
3258  friend class VmaList<T, AllocatorT>;
3259  };
3260 
3261  class const_iterator
3262  {
3263  public:
3264  const_iterator() :
3265  m_pList(VMA_NULL),
3266  m_pItem(VMA_NULL)
3267  {
3268  }
3269 
3270  const_iterator(const iterator& src) :
3271  m_pList(src.m_pList),
3272  m_pItem(src.m_pItem)
3273  {
3274  }
3275 
3276  const T& operator*() const
3277  {
3278  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3279  return m_pItem->Value;
3280  }
3281  const T* operator->() const
3282  {
3283  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3284  return &m_pItem->Value;
3285  }
3286 
3287  const_iterator& operator++()
3288  {
3289  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3290  m_pItem = m_pItem->pNext;
3291  return *this;
3292  }
3293  const_iterator& operator--()
3294  {
3295  if(m_pItem != VMA_NULL)
3296  {
3297  m_pItem = m_pItem->pPrev;
3298  }
3299  else
3300  {
3301  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3302  m_pItem = m_pList->Back();
3303  }
3304  return *this;
3305  }
3306 
3307  const_iterator operator++(int)
3308  {
3309  const_iterator result = *this;
3310  ++*this;
3311  return result;
3312  }
3313  const_iterator operator--(int)
3314  {
3315  const_iterator result = *this;
3316  --*this;
3317  return result;
3318  }
3319 
3320  bool operator==(const const_iterator& rhs) const
3321  {
3322  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3323  return m_pItem == rhs.m_pItem;
3324  }
3325  bool operator!=(const const_iterator& rhs) const
3326  {
3327  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3328  return m_pItem != rhs.m_pItem;
3329  }
3330 
3331  private:
3332  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3333  m_pList(pList),
3334  m_pItem(pItem)
3335  {
3336  }
3337 
3338  const VmaRawList<T>* m_pList;
3339  const VmaListItem<T>* m_pItem;
3340 
3341  friend class VmaList<T, AllocatorT>;
3342  };
3343 
3344  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3345 
3346  bool empty() const { return m_RawList.IsEmpty(); }
3347  size_t size() const { return m_RawList.GetCount(); }
3348 
3349  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3350  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3351 
3352  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3353  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3354 
3355  void clear() { m_RawList.Clear(); }
3356  void push_back(const T& value) { m_RawList.PushBack(value); }
3357  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3358  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3359 
3360 private:
3361  VmaRawList<T> m_RawList;
3362 };
3363 
3364 #endif // #if VMA_USE_STL_LIST
3365 
3367 // class VmaMap
3368 
3369 // Unused in this version.
3370 #if 0
3371 
3372 #if VMA_USE_STL_UNORDERED_MAP
3373 
3374 #define VmaPair std::pair
3375 
3376 #define VMA_MAP_TYPE(KeyT, ValueT) \
3377  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3378 
3379 #else // #if VMA_USE_STL_UNORDERED_MAP
3380 
3381 template<typename T1, typename T2>
3382 struct VmaPair
3383 {
3384  T1 first;
3385  T2 second;
3386 
3387  VmaPair() : first(), second() { }
3388  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3389 };
3390 
3391 /* Class compatible with subset of interface of std::unordered_map.
3392 KeyT, ValueT must be POD because they will be stored in VmaVector.
3393 */
3394 template<typename KeyT, typename ValueT>
3395 class VmaMap
3396 {
3397 public:
3398  typedef VmaPair<KeyT, ValueT> PairType;
3399  typedef PairType* iterator;
3400 
3401  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3402 
3403  iterator begin() { return m_Vector.begin(); }
3404  iterator end() { return m_Vector.end(); }
3405 
3406  void insert(const PairType& pair);
3407  iterator find(const KeyT& key);
3408  void erase(iterator it);
3409 
3410 private:
3411  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3412 };
3413 
3414 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3415 
3416 template<typename FirstT, typename SecondT>
3417 struct VmaPairFirstLess
3418 {
3419  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3420  {
3421  return lhs.first < rhs.first;
3422  }
3423  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3424  {
3425  return lhs.first < rhsFirst;
3426  }
3427 };
3428 
3429 template<typename KeyT, typename ValueT>
3430 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3431 {
3432  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3433  m_Vector.data(),
3434  m_Vector.data() + m_Vector.size(),
3435  pair,
3436  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3437  VmaVectorInsert(m_Vector, indexToInsert, pair);
3438 }
3439 
3440 template<typename KeyT, typename ValueT>
3441 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3442 {
3443  PairType* it = VmaBinaryFindFirstNotLess(
3444  m_Vector.data(),
3445  m_Vector.data() + m_Vector.size(),
3446  key,
3447  VmaPairFirstLess<KeyT, ValueT>());
3448  if((it != m_Vector.end()) && (it->first == key))
3449  {
3450  return it;
3451  }
3452  else
3453  {
3454  return m_Vector.end();
3455  }
3456 }
3457 
3458 template<typename KeyT, typename ValueT>
3459 void VmaMap<KeyT, ValueT>::erase(iterator it)
3460 {
3461  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3462 }
3463 
3464 #endif // #if VMA_USE_STL_UNORDERED_MAP
3465 
3466 #endif // #if 0
3467 
3469 
3470 class VmaDeviceMemoryBlock;
3471 
3472 struct VmaAllocation_T
3473 {
3474 private:
3475  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3476 
3477  enum FLAGS
3478  {
3479  FLAG_USER_DATA_STRING = 0x01,
3480  };
3481 
3482 public:
3483  enum ALLOCATION_TYPE
3484  {
3485  ALLOCATION_TYPE_NONE,
3486  ALLOCATION_TYPE_BLOCK,
3487  ALLOCATION_TYPE_DEDICATED,
3488  };
3489 
3490  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3491  m_Alignment(1),
3492  m_Size(0),
3493  m_pUserData(VMA_NULL),
3494  m_LastUseFrameIndex(currentFrameIndex),
3495  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3496  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3497  m_MapCount(0),
3498  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3499  {
3500  }
3501 
3502  ~VmaAllocation_T()
3503  {
3504  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3505 
3506  // Check if owned string was freed.
3507  VMA_ASSERT(m_pUserData == VMA_NULL);
3508  }
3509 
3510  void InitBlockAllocation(
3511  VmaPool hPool,
3512  VmaDeviceMemoryBlock* block,
3513  VkDeviceSize offset,
3514  VkDeviceSize alignment,
3515  VkDeviceSize size,
3516  VmaSuballocationType suballocationType,
3517  bool mapped,
3518  bool canBecomeLost)
3519  {
3520  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3521  VMA_ASSERT(block != VMA_NULL);
3522  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3523  m_Alignment = alignment;
3524  m_Size = size;
3525  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3526  m_SuballocationType = (uint8_t)suballocationType;
3527  m_BlockAllocation.m_hPool = hPool;
3528  m_BlockAllocation.m_Block = block;
3529  m_BlockAllocation.m_Offset = offset;
3530  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3531  }
3532 
3533  void InitLost()
3534  {
3535  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3536  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3537  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3538  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3539  m_BlockAllocation.m_Block = VMA_NULL;
3540  m_BlockAllocation.m_Offset = 0;
3541  m_BlockAllocation.m_CanBecomeLost = true;
3542  }
3543 
3544  void ChangeBlockAllocation(
3545  VmaAllocator hAllocator,
3546  VmaDeviceMemoryBlock* block,
3547  VkDeviceSize offset);
3548 
3549  // pMappedData not null means allocation is created with MAPPED flag.
3550  void InitDedicatedAllocation(
3551  uint32_t memoryTypeIndex,
3552  VkDeviceMemory hMemory,
3553  VmaSuballocationType suballocationType,
3554  void* pMappedData,
3555  VkDeviceSize size)
3556  {
3557  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3558  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3559  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3560  m_Alignment = 0;
3561  m_Size = size;
3562  m_SuballocationType = (uint8_t)suballocationType;
3563  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3564  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3565  m_DedicatedAllocation.m_hMemory = hMemory;
3566  m_DedicatedAllocation.m_pMappedData = pMappedData;
3567  }
3568 
3569  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3570  VkDeviceSize GetAlignment() const { return m_Alignment; }
3571  VkDeviceSize GetSize() const { return m_Size; }
3572  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3573  void* GetUserData() const { return m_pUserData; }
3574  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3575  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3576 
3577  VmaDeviceMemoryBlock* GetBlock() const
3578  {
3579  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3580  return m_BlockAllocation.m_Block;
3581  }
3582  VkDeviceSize GetOffset() const;
3583  VkDeviceMemory GetMemory() const;
3584  uint32_t GetMemoryTypeIndex() const;
3585  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3586  void* GetMappedData() const;
3587  bool CanBecomeLost() const;
3588  VmaPool GetPool() const;
3589 
3590  uint32_t GetLastUseFrameIndex() const
3591  {
3592  return m_LastUseFrameIndex.load();
3593  }
3594  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3595  {
3596  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3597  }
3598  /*
3599  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3600  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3601  - Else, returns false.
3602 
3603  If hAllocation is already lost, assert - you should not call it then.
3604  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3605  */
3606  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3607 
3608  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3609  {
3610  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3611  outInfo.blockCount = 1;
3612  outInfo.allocationCount = 1;
3613  outInfo.unusedRangeCount = 0;
3614  outInfo.usedBytes = m_Size;
3615  outInfo.unusedBytes = 0;
3616  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3617  outInfo.unusedRangeSizeMin = UINT64_MAX;
3618  outInfo.unusedRangeSizeMax = 0;
3619  }
3620 
3621  void BlockAllocMap();
3622  void BlockAllocUnmap();
3623  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3624  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3625 
3626 private:
3627  VkDeviceSize m_Alignment;
3628  VkDeviceSize m_Size;
3629  void* m_pUserData;
3630  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3631  uint8_t m_Type; // ALLOCATION_TYPE
3632  uint8_t m_SuballocationType; // VmaSuballocationType
3633  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3634  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3635  uint8_t m_MapCount;
3636  uint8_t m_Flags; // enum FLAGS
3637 
3638  // Allocation out of VmaDeviceMemoryBlock.
3639  struct BlockAllocation
3640  {
3641  VmaPool m_hPool; // Null if belongs to general memory.
3642  VmaDeviceMemoryBlock* m_Block;
3643  VkDeviceSize m_Offset;
3644  bool m_CanBecomeLost;
3645  };
3646 
3647  // Allocation for an object that has its own private VkDeviceMemory.
3648  struct DedicatedAllocation
3649  {
3650  uint32_t m_MemoryTypeIndex;
3651  VkDeviceMemory m_hMemory;
3652  void* m_pMappedData; // Not null means memory is mapped.
3653  };
3654 
3655  union
3656  {
3657  // Allocation out of VmaDeviceMemoryBlock.
3658  BlockAllocation m_BlockAllocation;
3659  // Allocation for an object that has its own private VkDeviceMemory.
3660  DedicatedAllocation m_DedicatedAllocation;
3661  };
3662 
3663  void FreeUserDataString(VmaAllocator hAllocator);
3664 };
3665 
3666 /*
3667 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3668 allocated memory block or free.
3669 */
3670 struct VmaSuballocation
3671 {
3672  VkDeviceSize offset;
3673  VkDeviceSize size;
3674  VmaAllocation hAllocation;
3675  VmaSuballocationType type;
3676 };
3677 
3678 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3679 
3680 // Cost of one additional allocation lost, as equivalent in bytes.
3681 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3682 
3683 /*
3684 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3685 
3686 If canMakeOtherLost was false:
3687 - item points to a FREE suballocation.
3688 - itemsToMakeLostCount is 0.
3689 
3690 If canMakeOtherLost was true:
3691 - item points to first of sequence of suballocations, which are either FREE,
3692  or point to VmaAllocations that can become lost.
3693 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3694  the requested allocation to succeed.
3695 */
3696 struct VmaAllocationRequest
3697 {
3698  VkDeviceSize offset;
3699  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3700  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3701  VmaSuballocationList::iterator item;
3702  size_t itemsToMakeLostCount;
3703 
3704  VkDeviceSize CalcCost() const
3705  {
3706  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3707  }
3708 };
3709 
3710 /*
3711 Data structure used for bookkeeping of allocations and unused ranges of memory
3712 in a single VkDeviceMemory block.
3713 */
3714 class VmaBlockMetadata
3715 {
3716 public:
3717  VmaBlockMetadata(VmaAllocator hAllocator);
3718  ~VmaBlockMetadata();
3719  void Init(VkDeviceSize size);
3720 
3721  // Validates all data structures inside this object. If not valid, returns false.
3722  bool Validate() const;
3723  VkDeviceSize GetSize() const { return m_Size; }
3724  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3725  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3726  VkDeviceSize GetUnusedRangeSizeMax() const;
3727  // Returns true if this block is empty - contains only single free suballocation.
3728  bool IsEmpty() const;
3729 
3730  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3731  void AddPoolStats(VmaPoolStats& inoutStats) const;
3732 
3733 #if VMA_STATS_STRING_ENABLED
3734  void PrintDetailedMap(class VmaJsonWriter& json) const;
3735 #endif
3736 
3737  // Creates trivial request for case when block is empty.
3738  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3739 
3740  // Tries to find a place for suballocation with given parameters inside this block.
3741  // If succeeded, fills pAllocationRequest and returns true.
3742  // If failed, returns false.
3743  bool CreateAllocationRequest(
3744  uint32_t currentFrameIndex,
3745  uint32_t frameInUseCount,
3746  VkDeviceSize bufferImageGranularity,
3747  VkDeviceSize allocSize,
3748  VkDeviceSize allocAlignment,
3749  VmaSuballocationType allocType,
3750  bool canMakeOtherLost,
3751  VmaAllocationRequest* pAllocationRequest);
3752 
3753  bool MakeRequestedAllocationsLost(
3754  uint32_t currentFrameIndex,
3755  uint32_t frameInUseCount,
3756  VmaAllocationRequest* pAllocationRequest);
3757 
3758  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3759 
3760  // Makes actual allocation based on request. Request must already be checked and valid.
3761  void Alloc(
3762  const VmaAllocationRequest& request,
3763  VmaSuballocationType type,
3764  VkDeviceSize allocSize,
3765  VmaAllocation hAllocation);
3766 
3767  // Frees suballocation assigned to given memory region.
3768  void Free(const VmaAllocation allocation);
3769  void FreeAtOffset(VkDeviceSize offset);
3770 
3771 private:
3772  VkDeviceSize m_Size;
3773  uint32_t m_FreeCount;
3774  VkDeviceSize m_SumFreeSize;
3775  VmaSuballocationList m_Suballocations;
3776  // Suballocations that are free and have size greater than certain threshold.
3777  // Sorted by size, ascending.
3778  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3779 
3780  bool ValidateFreeSuballocationList() const;
3781 
3782  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3783  // If yes, fills pOffset and returns true. If no, returns false.
3784  bool CheckAllocation(
3785  uint32_t currentFrameIndex,
3786  uint32_t frameInUseCount,
3787  VkDeviceSize bufferImageGranularity,
3788  VkDeviceSize allocSize,
3789  VkDeviceSize allocAlignment,
3790  VmaSuballocationType allocType,
3791  VmaSuballocationList::const_iterator suballocItem,
3792  bool canMakeOtherLost,
3793  VkDeviceSize* pOffset,
3794  size_t* itemsToMakeLostCount,
3795  VkDeviceSize* pSumFreeSize,
3796  VkDeviceSize* pSumItemSize) const;
3797  // Given free suballocation, it merges it with following one, which must also be free.
3798  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3799  // Releases given suballocation, making it free.
3800  // Merges it with adjacent free suballocations if applicable.
3801  // Returns iterator to new free suballocation at this place.
3802  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3803  // Given free suballocation, it inserts it into sorted list of
3804  // m_FreeSuballocationsBySize if it's suitable.
3805  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3806  // Given free suballocation, it removes it from sorted list of
3807  // m_FreeSuballocationsBySize if it's suitable.
3808  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3809 };
3810 
3811 // Helper class that represents mapped memory. Synchronized internally.
3812 class VmaDeviceMemoryMapping
3813 {
3814 public:
3815  VmaDeviceMemoryMapping();
3816  ~VmaDeviceMemoryMapping();
3817 
3818  void* GetMappedData() const { return m_pMappedData; }
3819 
3820  // ppData can be null.
3821  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3822  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3823 
3824 private:
3825  VMA_MUTEX m_Mutex;
3826  uint32_t m_MapCount;
3827  void* m_pMappedData;
3828 };
3829 
3830 /*
3831 Represents a single block of device memory (`VkDeviceMemory`) with all the
3832 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3833 
3834 Thread-safety: This class must be externally synchronized.
3835 */
3836 class VmaDeviceMemoryBlock
3837 {
3838 public:
3839  uint32_t m_MemoryTypeIndex;
3840  VkDeviceMemory m_hMemory;
3841  VmaDeviceMemoryMapping m_Mapping;
3842  VmaBlockMetadata m_Metadata;
3843 
3844  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3845 
3846  ~VmaDeviceMemoryBlock()
3847  {
3848  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3849  }
3850 
3851  // Always call after construction.
3852  void Init(
3853  uint32_t newMemoryTypeIndex,
3854  VkDeviceMemory newMemory,
3855  VkDeviceSize newSize);
3856  // Always call before destruction.
3857  void Destroy(VmaAllocator allocator);
3858 
3859  // Validates all data structures inside this object. If not valid, returns false.
3860  bool Validate() const;
3861 
3862  // ppData can be null.
3863  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3864  void Unmap(VmaAllocator hAllocator, uint32_t count);
3865 };
3866 
3867 struct VmaPointerLess
3868 {
3869  bool operator()(const void* lhs, const void* rhs) const
3870  {
3871  return lhs < rhs;
3872  }
3873 };
3874 
3875 class VmaDefragmentator;
3876 
3877 /*
3878 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3879 Vulkan memory type.
3880 
3881 Synchronized internally with a mutex.
3882 */
3883 struct VmaBlockVector
3884 {
3885  VmaBlockVector(
3886  VmaAllocator hAllocator,
3887  uint32_t memoryTypeIndex,
3888  VkDeviceSize preferredBlockSize,
3889  size_t minBlockCount,
3890  size_t maxBlockCount,
3891  VkDeviceSize bufferImageGranularity,
3892  uint32_t frameInUseCount,
3893  bool isCustomPool);
3894  ~VmaBlockVector();
3895 
3896  VkResult CreateMinBlocks();
3897 
3898  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3899  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3900  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3901  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3902 
3903  void GetPoolStats(VmaPoolStats* pStats);
3904 
3905  bool IsEmpty() const { return m_Blocks.empty(); }
3906 
3907  VkResult Allocate(
3908  VmaPool hCurrentPool,
3909  uint32_t currentFrameIndex,
3910  const VkMemoryRequirements& vkMemReq,
3911  const VmaAllocationCreateInfo& createInfo,
3912  VmaSuballocationType suballocType,
3913  VmaAllocation* pAllocation);
3914 
3915  void Free(
3916  VmaAllocation hAllocation);
3917 
3918  // Adds statistics of this BlockVector to pStats.
3919  void AddStats(VmaStats* pStats);
3920 
3921 #if VMA_STATS_STRING_ENABLED
3922  void PrintDetailedMap(class VmaJsonWriter& json);
3923 #endif
3924 
3925  void MakePoolAllocationsLost(
3926  uint32_t currentFrameIndex,
3927  size_t* pLostAllocationCount);
3928 
3929  VmaDefragmentator* EnsureDefragmentator(
3930  VmaAllocator hAllocator,
3931  uint32_t currentFrameIndex);
3932 
3933  VkResult Defragment(
3934  VmaDefragmentationStats* pDefragmentationStats,
3935  VkDeviceSize& maxBytesToMove,
3936  uint32_t& maxAllocationsToMove);
3937 
3938  void DestroyDefragmentator();
3939 
3940 private:
3941  friend class VmaDefragmentator;
3942 
3943  const VmaAllocator m_hAllocator;
3944  const uint32_t m_MemoryTypeIndex;
3945  const VkDeviceSize m_PreferredBlockSize;
3946  const size_t m_MinBlockCount;
3947  const size_t m_MaxBlockCount;
3948  const VkDeviceSize m_BufferImageGranularity;
3949  const uint32_t m_FrameInUseCount;
3950  const bool m_IsCustomPool;
3951  VMA_MUTEX m_Mutex;
3952  // Incrementally sorted by sumFreeSize, ascending.
3953  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3954  /* There can be at most one allocation that is completely empty - a
3955  hysteresis to avoid pessimistic case of alternating creation and destruction
3956  of a VkDeviceMemory. */
3957  bool m_HasEmptyBlock;
3958  VmaDefragmentator* m_pDefragmentator;
3959 
3960  size_t CalcMaxBlockSize() const;
3961 
3962  // Finds and removes given block from vector.
3963  void Remove(VmaDeviceMemoryBlock* pBlock);
3964 
3965  // Performs single step in sorting m_Blocks. They may not be fully sorted
3966  // after this call.
3967  void IncrementallySortBlocks();
3968 
3969  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3970 };
3971 
3972 struct VmaPool_T
3973 {
3974 public:
3975  VmaBlockVector m_BlockVector;
3976 
3977  // Takes ownership.
3978  VmaPool_T(
3979  VmaAllocator hAllocator,
3980  const VmaPoolCreateInfo& createInfo);
3981  ~VmaPool_T();
3982 
3983  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3984 
3985 #if VMA_STATS_STRING_ENABLED
3986  //void PrintDetailedMap(class VmaStringBuilder& sb);
3987 #endif
3988 };
3989 
3990 class VmaDefragmentator
3991 {
3992  const VmaAllocator m_hAllocator;
3993  VmaBlockVector* const m_pBlockVector;
3994  uint32_t m_CurrentFrameIndex;
3995  VkDeviceSize m_BytesMoved;
3996  uint32_t m_AllocationsMoved;
3997 
3998  struct AllocationInfo
3999  {
4000  VmaAllocation m_hAllocation;
4001  VkBool32* m_pChanged;
4002 
4003  AllocationInfo() :
4004  m_hAllocation(VK_NULL_HANDLE),
4005  m_pChanged(VMA_NULL)
4006  {
4007  }
4008  };
4009 
4010  struct AllocationInfoSizeGreater
4011  {
4012  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4013  {
4014  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4015  }
4016  };
4017 
4018  // Used between AddAllocation and Defragment.
4019  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4020 
4021  struct BlockInfo
4022  {
4023  VmaDeviceMemoryBlock* m_pBlock;
4024  bool m_HasNonMovableAllocations;
4025  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4026 
4027  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4028  m_pBlock(VMA_NULL),
4029  m_HasNonMovableAllocations(true),
4030  m_Allocations(pAllocationCallbacks),
4031  m_pMappedDataForDefragmentation(VMA_NULL)
4032  {
4033  }
4034 
4035  void CalcHasNonMovableAllocations()
4036  {
4037  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4038  const size_t defragmentAllocCount = m_Allocations.size();
4039  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4040  }
4041 
4042  void SortAllocationsBySizeDescecnding()
4043  {
4044  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4045  }
4046 
4047  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4048  void Unmap(VmaAllocator hAllocator);
4049 
4050  private:
4051  // Not null if mapped for defragmentation only, not originally mapped.
4052  void* m_pMappedDataForDefragmentation;
4053  };
4054 
4055  struct BlockPointerLess
4056  {
4057  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4058  {
4059  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4060  }
4061  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4062  {
4063  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4064  }
4065  };
4066 
4067  // 1. Blocks with some non-movable allocations go first.
4068  // 2. Blocks with smaller sumFreeSize go first.
4069  struct BlockInfoCompareMoveDestination
4070  {
4071  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4072  {
4073  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4074  {
4075  return true;
4076  }
4077  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4078  {
4079  return false;
4080  }
4081  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4082  {
4083  return true;
4084  }
4085  return false;
4086  }
4087  };
4088 
4089  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4090  BlockInfoVector m_Blocks;
4091 
4092  VkResult DefragmentRound(
4093  VkDeviceSize maxBytesToMove,
4094  uint32_t maxAllocationsToMove);
4095 
4096  static bool MoveMakesSense(
4097  size_t dstBlockIndex, VkDeviceSize dstOffset,
4098  size_t srcBlockIndex, VkDeviceSize srcOffset);
4099 
4100 public:
4101  VmaDefragmentator(
4102  VmaAllocator hAllocator,
4103  VmaBlockVector* pBlockVector,
4104  uint32_t currentFrameIndex);
4105 
4106  ~VmaDefragmentator();
4107 
4108  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4109  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4110 
4111  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4112 
4113  VkResult Defragment(
4114  VkDeviceSize maxBytesToMove,
4115  uint32_t maxAllocationsToMove);
4116 };
4117 
4118 // Main allocator object.
4119 struct VmaAllocator_T
4120 {
4121  bool m_UseMutex;
4122  bool m_UseKhrDedicatedAllocation;
4123  VkDevice m_hDevice;
4124  bool m_AllocationCallbacksSpecified;
4125  VkAllocationCallbacks m_AllocationCallbacks;
4126  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4127 
4128  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4129  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4130  VMA_MUTEX m_HeapSizeLimitMutex;
4131 
4132  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4133  VkPhysicalDeviceMemoryProperties m_MemProps;
4134 
4135  // Default pools.
4136  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4137 
4138  // Each vector is sorted by memory (handle value).
4139  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4140  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4141  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4142 
4143  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4144  ~VmaAllocator_T();
4145 
4146  const VkAllocationCallbacks* GetAllocationCallbacks() const
4147  {
4148  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4149  }
4150  const VmaVulkanFunctions& GetVulkanFunctions() const
4151  {
4152  return m_VulkanFunctions;
4153  }
4154 
4155  VkDeviceSize GetBufferImageGranularity() const
4156  {
4157  return VMA_MAX(
4158  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4159  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4160  }
4161 
4162  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4163  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4164 
4165  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4166  {
4167  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4168  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4169  }
4170 
4171  void GetBufferMemoryRequirements(
4172  VkBuffer hBuffer,
4173  VkMemoryRequirements& memReq,
4174  bool& requiresDedicatedAllocation,
4175  bool& prefersDedicatedAllocation) const;
4176  void GetImageMemoryRequirements(
4177  VkImage hImage,
4178  VkMemoryRequirements& memReq,
4179  bool& requiresDedicatedAllocation,
4180  bool& prefersDedicatedAllocation) const;
4181 
4182  // Main allocation function.
4183  VkResult AllocateMemory(
4184  const VkMemoryRequirements& vkMemReq,
4185  bool requiresDedicatedAllocation,
4186  bool prefersDedicatedAllocation,
4187  VkBuffer dedicatedBuffer,
4188  VkImage dedicatedImage,
4189  const VmaAllocationCreateInfo& createInfo,
4190  VmaSuballocationType suballocType,
4191  VmaAllocation* pAllocation);
4192 
4193  // Main deallocation function.
4194  void FreeMemory(const VmaAllocation allocation);
4195 
4196  void CalculateStats(VmaStats* pStats);
4197 
4198 #if VMA_STATS_STRING_ENABLED
4199  void PrintDetailedMap(class VmaJsonWriter& json);
4200 #endif
4201 
4202  VkResult Defragment(
4203  VmaAllocation* pAllocations,
4204  size_t allocationCount,
4205  VkBool32* pAllocationsChanged,
4206  const VmaDefragmentationInfo* pDefragmentationInfo,
4207  VmaDefragmentationStats* pDefragmentationStats);
4208 
4209  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4210  bool TouchAllocation(VmaAllocation hAllocation);
4211 
4212  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4213  void DestroyPool(VmaPool pool);
4214  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4215 
4216  void SetCurrentFrameIndex(uint32_t frameIndex);
4217 
4218  void MakePoolAllocationsLost(
4219  VmaPool hPool,
4220  size_t* pLostAllocationCount);
4221 
4222  void CreateLostAllocation(VmaAllocation* pAllocation);
4223 
4224  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4225  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4226 
4227  VkResult Map(VmaAllocation hAllocation, void** ppData);
4228  void Unmap(VmaAllocation hAllocation);
4229 
4230 private:
4231  VkDeviceSize m_PreferredLargeHeapBlockSize;
4232 
4233  VkPhysicalDevice m_PhysicalDevice;
4234  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4235 
4236  VMA_MUTEX m_PoolsMutex;
4237  // Protected by m_PoolsMutex. Sorted by pointer value.
4238  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4239 
4240  VmaVulkanFunctions m_VulkanFunctions;
4241 
4242  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4243 
4244  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4245 
4246  VkResult AllocateMemoryOfType(
4247  const VkMemoryRequirements& vkMemReq,
4248  bool dedicatedAllocation,
4249  VkBuffer dedicatedBuffer,
4250  VkImage dedicatedImage,
4251  const VmaAllocationCreateInfo& createInfo,
4252  uint32_t memTypeIndex,
4253  VmaSuballocationType suballocType,
4254  VmaAllocation* pAllocation);
4255 
4256  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4257  VkResult AllocateDedicatedMemory(
4258  VkDeviceSize size,
4259  VmaSuballocationType suballocType,
4260  uint32_t memTypeIndex,
4261  bool map,
4262  bool isUserDataString,
4263  void* pUserData,
4264  VkBuffer dedicatedBuffer,
4265  VkImage dedicatedImage,
4266  VmaAllocation* pAllocation);
4267 
4268  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4269  void FreeDedicatedMemory(VmaAllocation allocation);
4270 };
4271 
4273 // Memory allocation #2 after VmaAllocator_T definition
4274 
4275 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4276 {
4277  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4278 }
4279 
4280 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4281 {
4282  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4283 }
4284 
4285 template<typename T>
4286 static T* VmaAllocate(VmaAllocator hAllocator)
4287 {
4288  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4289 }
4290 
4291 template<typename T>
4292 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4293 {
4294  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4295 }
4296 
4297 template<typename T>
4298 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4299 {
4300  if(ptr != VMA_NULL)
4301  {
4302  ptr->~T();
4303  VmaFree(hAllocator, ptr);
4304  }
4305 }
4306 
4307 template<typename T>
4308 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4309 {
4310  if(ptr != VMA_NULL)
4311  {
4312  for(size_t i = count; i--; )
4313  ptr[i].~T();
4314  VmaFree(hAllocator, ptr);
4315  }
4316 }
4317 
4319 // VmaStringBuilder
4320 
4321 #if VMA_STATS_STRING_ENABLED
4322 
4323 class VmaStringBuilder
4324 {
4325 public:
4326  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4327  size_t GetLength() const { return m_Data.size(); }
4328  const char* GetData() const { return m_Data.data(); }
4329 
4330  void Add(char ch) { m_Data.push_back(ch); }
4331  void Add(const char* pStr);
4332  void AddNewLine() { Add('\n'); }
4333  void AddNumber(uint32_t num);
4334  void AddNumber(uint64_t num);
4335  void AddPointer(const void* ptr);
4336 
4337 private:
4338  VmaVector< char, VmaStlAllocator<char> > m_Data;
4339 };
4340 
4341 void VmaStringBuilder::Add(const char* pStr)
4342 {
4343  const size_t strLen = strlen(pStr);
4344  if(strLen > 0)
4345  {
4346  const size_t oldCount = m_Data.size();
4347  m_Data.resize(oldCount + strLen);
4348  memcpy(m_Data.data() + oldCount, pStr, strLen);
4349  }
4350 }
4351 
4352 void VmaStringBuilder::AddNumber(uint32_t num)
4353 {
4354  char buf[11];
4355  VmaUint32ToStr(buf, sizeof(buf), num);
4356  Add(buf);
4357 }
4358 
4359 void VmaStringBuilder::AddNumber(uint64_t num)
4360 {
4361  char buf[21];
4362  VmaUint64ToStr(buf, sizeof(buf), num);
4363  Add(buf);
4364 }
4365 
4366 void VmaStringBuilder::AddPointer(const void* ptr)
4367 {
4368  char buf[21];
4369  VmaPtrToStr(buf, sizeof(buf), ptr);
4370  Add(buf);
4371 }
4372 
4373 #endif // #if VMA_STATS_STRING_ENABLED
4374 
4376 // VmaJsonWriter
4377 
4378 #if VMA_STATS_STRING_ENABLED
4379 
4380 class VmaJsonWriter
4381 {
4382 public:
4383  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4384  ~VmaJsonWriter();
4385 
4386  void BeginObject(bool singleLine = false);
4387  void EndObject();
4388 
4389  void BeginArray(bool singleLine = false);
4390  void EndArray();
4391 
4392  void WriteString(const char* pStr);
4393  void BeginString(const char* pStr = VMA_NULL);
4394  void ContinueString(const char* pStr);
4395  void ContinueString(uint32_t n);
4396  void ContinueString(uint64_t n);
4397  void ContinueString_Pointer(const void* ptr);
4398  void EndString(const char* pStr = VMA_NULL);
4399 
4400  void WriteNumber(uint32_t n);
4401  void WriteNumber(uint64_t n);
4402  void WriteBool(bool b);
4403  void WriteNull();
4404 
4405 private:
4406  static const char* const INDENT;
4407 
4408  enum COLLECTION_TYPE
4409  {
4410  COLLECTION_TYPE_OBJECT,
4411  COLLECTION_TYPE_ARRAY,
4412  };
4413  struct StackItem
4414  {
4415  COLLECTION_TYPE type;
4416  uint32_t valueCount;
4417  bool singleLineMode;
4418  };
4419 
4420  VmaStringBuilder& m_SB;
4421  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4422  bool m_InsideString;
4423 
4424  void BeginValue(bool isString);
4425  void WriteIndent(bool oneLess = false);
4426 };
4427 
4428 const char* const VmaJsonWriter::INDENT = " ";
4429 
4430 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4431  m_SB(sb),
4432  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4433  m_InsideString(false)
4434 {
4435 }
4436 
4437 VmaJsonWriter::~VmaJsonWriter()
4438 {
4439  VMA_ASSERT(!m_InsideString);
4440  VMA_ASSERT(m_Stack.empty());
4441 }
4442 
4443 void VmaJsonWriter::BeginObject(bool singleLine)
4444 {
4445  VMA_ASSERT(!m_InsideString);
4446 
4447  BeginValue(false);
4448  m_SB.Add('{');
4449 
4450  StackItem item;
4451  item.type = COLLECTION_TYPE_OBJECT;
4452  item.valueCount = 0;
4453  item.singleLineMode = singleLine;
4454  m_Stack.push_back(item);
4455 }
4456 
4457 void VmaJsonWriter::EndObject()
4458 {
4459  VMA_ASSERT(!m_InsideString);
4460 
4461  WriteIndent(true);
4462  m_SB.Add('}');
4463 
4464  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4465  m_Stack.pop_back();
4466 }
4467 
4468 void VmaJsonWriter::BeginArray(bool singleLine)
4469 {
4470  VMA_ASSERT(!m_InsideString);
4471 
4472  BeginValue(false);
4473  m_SB.Add('[');
4474 
4475  StackItem item;
4476  item.type = COLLECTION_TYPE_ARRAY;
4477  item.valueCount = 0;
4478  item.singleLineMode = singleLine;
4479  m_Stack.push_back(item);
4480 }
4481 
4482 void VmaJsonWriter::EndArray()
4483 {
4484  VMA_ASSERT(!m_InsideString);
4485 
4486  WriteIndent(true);
4487  m_SB.Add(']');
4488 
4489  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4490  m_Stack.pop_back();
4491 }
4492 
4493 void VmaJsonWriter::WriteString(const char* pStr)
4494 {
4495  BeginString(pStr);
4496  EndString();
4497 }
4498 
4499 void VmaJsonWriter::BeginString(const char* pStr)
4500 {
4501  VMA_ASSERT(!m_InsideString);
4502 
4503  BeginValue(true);
4504  m_SB.Add('"');
4505  m_InsideString = true;
4506  if(pStr != VMA_NULL && pStr[0] != '\0')
4507  {
4508  ContinueString(pStr);
4509  }
4510 }
4511 
4512 void VmaJsonWriter::ContinueString(const char* pStr)
4513 {
4514  VMA_ASSERT(m_InsideString);
4515 
4516  const size_t strLen = strlen(pStr);
4517  for(size_t i = 0; i < strLen; ++i)
4518  {
4519  char ch = pStr[i];
4520  if(ch == '\'')
4521  {
4522  m_SB.Add("\\\\");
4523  }
4524  else if(ch == '"')
4525  {
4526  m_SB.Add("\\\"");
4527  }
4528  else if(ch >= 32)
4529  {
4530  m_SB.Add(ch);
4531  }
4532  else switch(ch)
4533  {
4534  case '\b':
4535  m_SB.Add("\\b");
4536  break;
4537  case '\f':
4538  m_SB.Add("\\f");
4539  break;
4540  case '\n':
4541  m_SB.Add("\\n");
4542  break;
4543  case '\r':
4544  m_SB.Add("\\r");
4545  break;
4546  case '\t':
4547  m_SB.Add("\\t");
4548  break;
4549  default:
4550  VMA_ASSERT(0 && "Character not currently supported.");
4551  break;
4552  }
4553  }
4554 }
4555 
4556 void VmaJsonWriter::ContinueString(uint32_t n)
4557 {
4558  VMA_ASSERT(m_InsideString);
4559  m_SB.AddNumber(n);
4560 }
4561 
4562 void VmaJsonWriter::ContinueString(uint64_t n)
4563 {
4564  VMA_ASSERT(m_InsideString);
4565  m_SB.AddNumber(n);
4566 }
4567 
4568 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4569 {
4570  VMA_ASSERT(m_InsideString);
4571  m_SB.AddPointer(ptr);
4572 }
4573 
4574 void VmaJsonWriter::EndString(const char* pStr)
4575 {
4576  VMA_ASSERT(m_InsideString);
4577  if(pStr != VMA_NULL && pStr[0] != '\0')
4578  {
4579  ContinueString(pStr);
4580  }
4581  m_SB.Add('"');
4582  m_InsideString = false;
4583 }
4584 
4585 void VmaJsonWriter::WriteNumber(uint32_t n)
4586 {
4587  VMA_ASSERT(!m_InsideString);
4588  BeginValue(false);
4589  m_SB.AddNumber(n);
4590 }
4591 
4592 void VmaJsonWriter::WriteNumber(uint64_t n)
4593 {
4594  VMA_ASSERT(!m_InsideString);
4595  BeginValue(false);
4596  m_SB.AddNumber(n);
4597 }
4598 
4599 void VmaJsonWriter::WriteBool(bool b)
4600 {
4601  VMA_ASSERT(!m_InsideString);
4602  BeginValue(false);
4603  m_SB.Add(b ? "true" : "false");
4604 }
4605 
4606 void VmaJsonWriter::WriteNull()
4607 {
4608  VMA_ASSERT(!m_InsideString);
4609  BeginValue(false);
4610  m_SB.Add("null");
4611 }
4612 
4613 void VmaJsonWriter::BeginValue(bool isString)
4614 {
4615  if(!m_Stack.empty())
4616  {
4617  StackItem& currItem = m_Stack.back();
4618  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4619  currItem.valueCount % 2 == 0)
4620  {
4621  VMA_ASSERT(isString);
4622  }
4623 
4624  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4625  currItem.valueCount % 2 != 0)
4626  {
4627  m_SB.Add(": ");
4628  }
4629  else if(currItem.valueCount > 0)
4630  {
4631  m_SB.Add(", ");
4632  WriteIndent();
4633  }
4634  else
4635  {
4636  WriteIndent();
4637  }
4638  ++currItem.valueCount;
4639  }
4640 }
4641 
4642 void VmaJsonWriter::WriteIndent(bool oneLess)
4643 {
4644  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4645  {
4646  m_SB.AddNewLine();
4647 
4648  size_t count = m_Stack.size();
4649  if(count > 0 && oneLess)
4650  {
4651  --count;
4652  }
4653  for(size_t i = 0; i < count; ++i)
4654  {
4655  m_SB.Add(INDENT);
4656  }
4657  }
4658 }
4659 
4660 #endif // #if VMA_STATS_STRING_ENABLED
4661 
4663 
4664 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4665 {
4666  if(IsUserDataString())
4667  {
4668  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4669 
4670  FreeUserDataString(hAllocator);
4671 
4672  if(pUserData != VMA_NULL)
4673  {
4674  const char* const newStrSrc = (char*)pUserData;
4675  const size_t newStrLen = strlen(newStrSrc);
4676  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4677  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4678  m_pUserData = newStrDst;
4679  }
4680  }
4681  else
4682  {
4683  m_pUserData = pUserData;
4684  }
4685 }
4686 
4687 void VmaAllocation_T::ChangeBlockAllocation(
4688  VmaAllocator hAllocator,
4689  VmaDeviceMemoryBlock* block,
4690  VkDeviceSize offset)
4691 {
4692  VMA_ASSERT(block != VMA_NULL);
4693  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4694 
4695  // Move mapping reference counter from old block to new block.
4696  if(block != m_BlockAllocation.m_Block)
4697  {
4698  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4699  if(IsPersistentMap())
4700  ++mapRefCount;
4701  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4702  block->Map(hAllocator, mapRefCount, VMA_NULL);
4703  }
4704 
4705  m_BlockAllocation.m_Block = block;
4706  m_BlockAllocation.m_Offset = offset;
4707 }
4708 
4709 VkDeviceSize VmaAllocation_T::GetOffset() const
4710 {
4711  switch(m_Type)
4712  {
4713  case ALLOCATION_TYPE_BLOCK:
4714  return m_BlockAllocation.m_Offset;
4715  case ALLOCATION_TYPE_DEDICATED:
4716  return 0;
4717  default:
4718  VMA_ASSERT(0);
4719  return 0;
4720  }
4721 }
4722 
4723 VkDeviceMemory VmaAllocation_T::GetMemory() const
4724 {
4725  switch(m_Type)
4726  {
4727  case ALLOCATION_TYPE_BLOCK:
4728  return m_BlockAllocation.m_Block->m_hMemory;
4729  case ALLOCATION_TYPE_DEDICATED:
4730  return m_DedicatedAllocation.m_hMemory;
4731  default:
4732  VMA_ASSERT(0);
4733  return VK_NULL_HANDLE;
4734  }
4735 }
4736 
4737 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4738 {
4739  switch(m_Type)
4740  {
4741  case ALLOCATION_TYPE_BLOCK:
4742  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4743  case ALLOCATION_TYPE_DEDICATED:
4744  return m_DedicatedAllocation.m_MemoryTypeIndex;
4745  default:
4746  VMA_ASSERT(0);
4747  return UINT32_MAX;
4748  }
4749 }
4750 
4751 void* VmaAllocation_T::GetMappedData() const
4752 {
4753  switch(m_Type)
4754  {
4755  case ALLOCATION_TYPE_BLOCK:
4756  if(m_MapCount != 0)
4757  {
4758  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4759  VMA_ASSERT(pBlockData != VMA_NULL);
4760  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4761  }
4762  else
4763  {
4764  return VMA_NULL;
4765  }
4766  break;
4767  case ALLOCATION_TYPE_DEDICATED:
4768  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4769  return m_DedicatedAllocation.m_pMappedData;
4770  default:
4771  VMA_ASSERT(0);
4772  return VMA_NULL;
4773  }
4774 }
4775 
4776 bool VmaAllocation_T::CanBecomeLost() const
4777 {
4778  switch(m_Type)
4779  {
4780  case ALLOCATION_TYPE_BLOCK:
4781  return m_BlockAllocation.m_CanBecomeLost;
4782  case ALLOCATION_TYPE_DEDICATED:
4783  return false;
4784  default:
4785  VMA_ASSERT(0);
4786  return false;
4787  }
4788 }
4789 
4790 VmaPool VmaAllocation_T::GetPool() const
4791 {
4792  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4793  return m_BlockAllocation.m_hPool;
4794 }
4795 
4796 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4797 {
4798  VMA_ASSERT(CanBecomeLost());
4799 
4800  /*
4801  Warning: This is a carefully designed algorithm.
4802  Do not modify unless you really know what you're doing :)
4803  */
4804  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4805  for(;;)
4806  {
4807  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4808  {
4809  VMA_ASSERT(0);
4810  return false;
4811  }
4812  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4813  {
4814  return false;
4815  }
4816  else // Last use time earlier than current time.
4817  {
4818  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4819  {
4820  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4821  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4822  return true;
4823  }
4824  }
4825  }
4826 }
4827 
4828 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4829 {
4830  VMA_ASSERT(IsUserDataString());
4831  if(m_pUserData != VMA_NULL)
4832  {
4833  char* const oldStr = (char*)m_pUserData;
4834  const size_t oldStrLen = strlen(oldStr);
4835  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4836  m_pUserData = VMA_NULL;
4837  }
4838 }
4839 
4840 void VmaAllocation_T::BlockAllocMap()
4841 {
4842  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4843 
4844  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4845  {
4846  ++m_MapCount;
4847  }
4848  else
4849  {
4850  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4851  }
4852 }
4853 
4854 void VmaAllocation_T::BlockAllocUnmap()
4855 {
4856  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4857 
4858  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4859  {
4860  --m_MapCount;
4861  }
4862  else
4863  {
4864  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4865  }
4866 }
4867 
4868 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4869 {
4870  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4871 
4872  if(m_MapCount != 0)
4873  {
4874  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4875  {
4876  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4877  *ppData = m_DedicatedAllocation.m_pMappedData;
4878  ++m_MapCount;
4879  return VK_SUCCESS;
4880  }
4881  else
4882  {
4883  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4884  return VK_ERROR_MEMORY_MAP_FAILED;
4885  }
4886  }
4887  else
4888  {
4889  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4890  hAllocator->m_hDevice,
4891  m_DedicatedAllocation.m_hMemory,
4892  0, // offset
4893  VK_WHOLE_SIZE,
4894  0, // flags
4895  ppData);
4896  if(result == VK_SUCCESS)
4897  {
4898  m_DedicatedAllocation.m_pMappedData = *ppData;
4899  m_MapCount = 1;
4900  }
4901  return result;
4902  }
4903 }
4904 
4905 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4906 {
4907  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4908 
4909  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4910  {
4911  --m_MapCount;
4912  if(m_MapCount == 0)
4913  {
4914  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4915  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4916  hAllocator->m_hDevice,
4917  m_DedicatedAllocation.m_hMemory);
4918  }
4919  }
4920  else
4921  {
4922  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4923  }
4924 }
4925 
4926 #if VMA_STATS_STRING_ENABLED
4927 
4928 // Correspond to values of enum VmaSuballocationType.
4929 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4930  "FREE",
4931  "UNKNOWN",
4932  "BUFFER",
4933  "IMAGE_UNKNOWN",
4934  "IMAGE_LINEAR",
4935  "IMAGE_OPTIMAL",
4936 };
4937 
4938 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4939 {
4940  json.BeginObject();
4941 
4942  json.WriteString("Blocks");
4943  json.WriteNumber(stat.blockCount);
4944 
4945  json.WriteString("Allocations");
4946  json.WriteNumber(stat.allocationCount);
4947 
4948  json.WriteString("UnusedRanges");
4949  json.WriteNumber(stat.unusedRangeCount);
4950 
4951  json.WriteString("UsedBytes");
4952  json.WriteNumber(stat.usedBytes);
4953 
4954  json.WriteString("UnusedBytes");
4955  json.WriteNumber(stat.unusedBytes);
4956 
4957  if(stat.allocationCount > 1)
4958  {
4959  json.WriteString("AllocationSize");
4960  json.BeginObject(true);
4961  json.WriteString("Min");
4962  json.WriteNumber(stat.allocationSizeMin);
4963  json.WriteString("Avg");
4964  json.WriteNumber(stat.allocationSizeAvg);
4965  json.WriteString("Max");
4966  json.WriteNumber(stat.allocationSizeMax);
4967  json.EndObject();
4968  }
4969 
4970  if(stat.unusedRangeCount > 1)
4971  {
4972  json.WriteString("UnusedRangeSize");
4973  json.BeginObject(true);
4974  json.WriteString("Min");
4975  json.WriteNumber(stat.unusedRangeSizeMin);
4976  json.WriteString("Avg");
4977  json.WriteNumber(stat.unusedRangeSizeAvg);
4978  json.WriteString("Max");
4979  json.WriteNumber(stat.unusedRangeSizeMax);
4980  json.EndObject();
4981  }
4982 
4983  json.EndObject();
4984 }
4985 
4986 #endif // #if VMA_STATS_STRING_ENABLED
4987 
4988 struct VmaSuballocationItemSizeLess
4989 {
4990  bool operator()(
4991  const VmaSuballocationList::iterator lhs,
4992  const VmaSuballocationList::iterator rhs) const
4993  {
4994  return lhs->size < rhs->size;
4995  }
4996  bool operator()(
4997  const VmaSuballocationList::iterator lhs,
4998  VkDeviceSize rhsSize) const
4999  {
5000  return lhs->size < rhsSize;
5001  }
5002 };
5003 
5005 // class VmaBlockMetadata
5006 
5007 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5008  m_Size(0),
5009  m_FreeCount(0),
5010  m_SumFreeSize(0),
5011  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5012  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5013 {
5014 }
5015 
5016 VmaBlockMetadata::~VmaBlockMetadata()
5017 {
5018 }
5019 
5020 void VmaBlockMetadata::Init(VkDeviceSize size)
5021 {
5022  m_Size = size;
5023  m_FreeCount = 1;
5024  m_SumFreeSize = size;
5025 
5026  VmaSuballocation suballoc = {};
5027  suballoc.offset = 0;
5028  suballoc.size = size;
5029  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5030  suballoc.hAllocation = VK_NULL_HANDLE;
5031 
5032  m_Suballocations.push_back(suballoc);
5033  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5034  --suballocItem;
5035  m_FreeSuballocationsBySize.push_back(suballocItem);
5036 }
5037 
5038 bool VmaBlockMetadata::Validate() const
5039 {
5040  if(m_Suballocations.empty())
5041  {
5042  return false;
5043  }
5044 
5045  // Expected offset of new suballocation as calculates from previous ones.
5046  VkDeviceSize calculatedOffset = 0;
5047  // Expected number of free suballocations as calculated from traversing their list.
5048  uint32_t calculatedFreeCount = 0;
5049  // Expected sum size of free suballocations as calculated from traversing their list.
5050  VkDeviceSize calculatedSumFreeSize = 0;
5051  // Expected number of free suballocations that should be registered in
5052  // m_FreeSuballocationsBySize calculated from traversing their list.
5053  size_t freeSuballocationsToRegister = 0;
5054  // True if previous visisted suballocation was free.
5055  bool prevFree = false;
5056 
5057  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5058  suballocItem != m_Suballocations.cend();
5059  ++suballocItem)
5060  {
5061  const VmaSuballocation& subAlloc = *suballocItem;
5062 
5063  // Actual offset of this suballocation doesn't match expected one.
5064  if(subAlloc.offset != calculatedOffset)
5065  {
5066  return false;
5067  }
5068 
5069  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5070  // Two adjacent free suballocations are invalid. They should be merged.
5071  if(prevFree && currFree)
5072  {
5073  return false;
5074  }
5075 
5076  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5077  {
5078  return false;
5079  }
5080 
5081  if(currFree)
5082  {
5083  calculatedSumFreeSize += subAlloc.size;
5084  ++calculatedFreeCount;
5085  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5086  {
5087  ++freeSuballocationsToRegister;
5088  }
5089  }
5090  else
5091  {
5092  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5093  {
5094  return false;
5095  }
5096  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5097  {
5098  return false;
5099  }
5100  }
5101 
5102  calculatedOffset += subAlloc.size;
5103  prevFree = currFree;
5104  }
5105 
5106  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5107  // match expected one.
5108  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5109  {
5110  return false;
5111  }
5112 
5113  VkDeviceSize lastSize = 0;
5114  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5115  {
5116  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5117 
5118  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5119  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5120  {
5121  return false;
5122  }
5123  // They must be sorted by size ascending.
5124  if(suballocItem->size < lastSize)
5125  {
5126  return false;
5127  }
5128 
5129  lastSize = suballocItem->size;
5130  }
5131 
5132  // Check if totals match calculacted values.
5133  if(!ValidateFreeSuballocationList() ||
5134  (calculatedOffset != m_Size) ||
5135  (calculatedSumFreeSize != m_SumFreeSize) ||
5136  (calculatedFreeCount != m_FreeCount))
5137  {
5138  return false;
5139  }
5140 
5141  return true;
5142 }
5143 
5144 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5145 {
5146  if(!m_FreeSuballocationsBySize.empty())
5147  {
5148  return m_FreeSuballocationsBySize.back()->size;
5149  }
5150  else
5151  {
5152  return 0;
5153  }
5154 }
5155 
5156 bool VmaBlockMetadata::IsEmpty() const
5157 {
5158  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5159 }
5160 
5161 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5162 {
5163  outInfo.blockCount = 1;
5164 
5165  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5166  outInfo.allocationCount = rangeCount - m_FreeCount;
5167  outInfo.unusedRangeCount = m_FreeCount;
5168 
5169  outInfo.unusedBytes = m_SumFreeSize;
5170  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5171 
5172  outInfo.allocationSizeMin = UINT64_MAX;
5173  outInfo.allocationSizeMax = 0;
5174  outInfo.unusedRangeSizeMin = UINT64_MAX;
5175  outInfo.unusedRangeSizeMax = 0;
5176 
5177  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5178  suballocItem != m_Suballocations.cend();
5179  ++suballocItem)
5180  {
5181  const VmaSuballocation& suballoc = *suballocItem;
5182  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5183  {
5184  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5185  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5186  }
5187  else
5188  {
5189  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5190  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5191  }
5192  }
5193 }
5194 
5195 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5196 {
5197  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5198 
5199  inoutStats.size += m_Size;
5200  inoutStats.unusedSize += m_SumFreeSize;
5201  inoutStats.allocationCount += rangeCount - m_FreeCount;
5202  inoutStats.unusedRangeCount += m_FreeCount;
5203  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5204 }
5205 
5206 #if VMA_STATS_STRING_ENABLED
5207 
5208 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5209 {
5210  json.BeginObject();
5211 
5212  json.WriteString("TotalBytes");
5213  json.WriteNumber(m_Size);
5214 
5215  json.WriteString("UnusedBytes");
5216  json.WriteNumber(m_SumFreeSize);
5217 
5218  json.WriteString("Allocations");
5219  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5220 
5221  json.WriteString("UnusedRanges");
5222  json.WriteNumber(m_FreeCount);
5223 
5224  json.WriteString("Suballocations");
5225  json.BeginArray();
5226  size_t i = 0;
5227  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5228  suballocItem != m_Suballocations.cend();
5229  ++suballocItem, ++i)
5230  {
5231  json.BeginObject(true);
5232 
5233  json.WriteString("Type");
5234  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5235 
5236  json.WriteString("Size");
5237  json.WriteNumber(suballocItem->size);
5238 
5239  json.WriteString("Offset");
5240  json.WriteNumber(suballocItem->offset);
5241 
5242  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5243  {
5244  const void* pUserData = suballocItem->hAllocation->GetUserData();
5245  if(pUserData != VMA_NULL)
5246  {
5247  json.WriteString("UserData");
5248  if(suballocItem->hAllocation->IsUserDataString())
5249  {
5250  json.WriteString((const char*)pUserData);
5251  }
5252  else
5253  {
5254  json.BeginString();
5255  json.ContinueString_Pointer(pUserData);
5256  json.EndString();
5257  }
5258  }
5259  }
5260 
5261  json.EndObject();
5262  }
5263  json.EndArray();
5264 
5265  json.EndObject();
5266 }
5267 
5268 #endif // #if VMA_STATS_STRING_ENABLED
5269 
5270 /*
5271 How many suitable free suballocations to analyze before choosing best one.
5272 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5273  be chosen.
5274 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5275  suballocations will be analized and best one will be chosen.
5276 - Any other value is also acceptable.
5277 */
5278 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5279 
5280 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5281 {
5282  VMA_ASSERT(IsEmpty());
5283  pAllocationRequest->offset = 0;
5284  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5285  pAllocationRequest->sumItemSize = 0;
5286  pAllocationRequest->item = m_Suballocations.begin();
5287  pAllocationRequest->itemsToMakeLostCount = 0;
5288 }
5289 
5290 bool VmaBlockMetadata::CreateAllocationRequest(
5291  uint32_t currentFrameIndex,
5292  uint32_t frameInUseCount,
5293  VkDeviceSize bufferImageGranularity,
5294  VkDeviceSize allocSize,
5295  VkDeviceSize allocAlignment,
5296  VmaSuballocationType allocType,
5297  bool canMakeOtherLost,
5298  VmaAllocationRequest* pAllocationRequest)
5299 {
5300  VMA_ASSERT(allocSize > 0);
5301  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5302  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5303  VMA_HEAVY_ASSERT(Validate());
5304 
5305  // There is not enough total free space in this block to fullfill the request: Early return.
5306  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5307  {
5308  return false;
5309  }
5310 
5311  // New algorithm, efficiently searching freeSuballocationsBySize.
5312  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5313  if(freeSuballocCount > 0)
5314  {
5315  if(VMA_BEST_FIT)
5316  {
5317  // Find first free suballocation with size not less than allocSize.
5318  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5319  m_FreeSuballocationsBySize.data(),
5320  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5321  allocSize,
5322  VmaSuballocationItemSizeLess());
5323  size_t index = it - m_FreeSuballocationsBySize.data();
5324  for(; index < freeSuballocCount; ++index)
5325  {
5326  if(CheckAllocation(
5327  currentFrameIndex,
5328  frameInUseCount,
5329  bufferImageGranularity,
5330  allocSize,
5331  allocAlignment,
5332  allocType,
5333  m_FreeSuballocationsBySize[index],
5334  false, // canMakeOtherLost
5335  &pAllocationRequest->offset,
5336  &pAllocationRequest->itemsToMakeLostCount,
5337  &pAllocationRequest->sumFreeSize,
5338  &pAllocationRequest->sumItemSize))
5339  {
5340  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5341  return true;
5342  }
5343  }
5344  }
5345  else
5346  {
5347  // Search staring from biggest suballocations.
5348  for(size_t index = freeSuballocCount; index--; )
5349  {
5350  if(CheckAllocation(
5351  currentFrameIndex,
5352  frameInUseCount,
5353  bufferImageGranularity,
5354  allocSize,
5355  allocAlignment,
5356  allocType,
5357  m_FreeSuballocationsBySize[index],
5358  false, // canMakeOtherLost
5359  &pAllocationRequest->offset,
5360  &pAllocationRequest->itemsToMakeLostCount,
5361  &pAllocationRequest->sumFreeSize,
5362  &pAllocationRequest->sumItemSize))
5363  {
5364  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5365  return true;
5366  }
5367  }
5368  }
5369  }
5370 
5371  if(canMakeOtherLost)
5372  {
5373  // Brute-force algorithm. TODO: Come up with something better.
5374 
5375  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5376  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5377 
5378  VmaAllocationRequest tmpAllocRequest = {};
5379  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5380  suballocIt != m_Suballocations.end();
5381  ++suballocIt)
5382  {
5383  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5384  suballocIt->hAllocation->CanBecomeLost())
5385  {
5386  if(CheckAllocation(
5387  currentFrameIndex,
5388  frameInUseCount,
5389  bufferImageGranularity,
5390  allocSize,
5391  allocAlignment,
5392  allocType,
5393  suballocIt,
5394  canMakeOtherLost,
5395  &tmpAllocRequest.offset,
5396  &tmpAllocRequest.itemsToMakeLostCount,
5397  &tmpAllocRequest.sumFreeSize,
5398  &tmpAllocRequest.sumItemSize))
5399  {
5400  tmpAllocRequest.item = suballocIt;
5401 
5402  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5403  {
5404  *pAllocationRequest = tmpAllocRequest;
5405  }
5406  }
5407  }
5408  }
5409 
5410  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5411  {
5412  return true;
5413  }
5414  }
5415 
5416  return false;
5417 }
5418 
5419 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5420  uint32_t currentFrameIndex,
5421  uint32_t frameInUseCount,
5422  VmaAllocationRequest* pAllocationRequest)
5423 {
5424  while(pAllocationRequest->itemsToMakeLostCount > 0)
5425  {
5426  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5427  {
5428  ++pAllocationRequest->item;
5429  }
5430  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5431  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5432  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5433  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5434  {
5435  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5436  --pAllocationRequest->itemsToMakeLostCount;
5437  }
5438  else
5439  {
5440  return false;
5441  }
5442  }
5443 
5444  VMA_HEAVY_ASSERT(Validate());
5445  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5446  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5447 
5448  return true;
5449 }
5450 
5451 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5452 {
5453  uint32_t lostAllocationCount = 0;
5454  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5455  it != m_Suballocations.end();
5456  ++it)
5457  {
5458  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5459  it->hAllocation->CanBecomeLost() &&
5460  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5461  {
5462  it = FreeSuballocation(it);
5463  ++lostAllocationCount;
5464  }
5465  }
5466  return lostAllocationCount;
5467 }
5468 
5469 void VmaBlockMetadata::Alloc(
5470  const VmaAllocationRequest& request,
5471  VmaSuballocationType type,
5472  VkDeviceSize allocSize,
5473  VmaAllocation hAllocation)
5474 {
5475  VMA_ASSERT(request.item != m_Suballocations.end());
5476  VmaSuballocation& suballoc = *request.item;
5477  // Given suballocation is a free block.
5478  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5479  // Given offset is inside this suballocation.
5480  VMA_ASSERT(request.offset >= suballoc.offset);
5481  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5482  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5483  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5484 
5485  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5486  // it to become used.
5487  UnregisterFreeSuballocation(request.item);
5488 
5489  suballoc.offset = request.offset;
5490  suballoc.size = allocSize;
5491  suballoc.type = type;
5492  suballoc.hAllocation = hAllocation;
5493 
5494  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5495  if(paddingEnd)
5496  {
5497  VmaSuballocation paddingSuballoc = {};
5498  paddingSuballoc.offset = request.offset + allocSize;
5499  paddingSuballoc.size = paddingEnd;
5500  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5501  VmaSuballocationList::iterator next = request.item;
5502  ++next;
5503  const VmaSuballocationList::iterator paddingEndItem =
5504  m_Suballocations.insert(next, paddingSuballoc);
5505  RegisterFreeSuballocation(paddingEndItem);
5506  }
5507 
5508  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5509  if(paddingBegin)
5510  {
5511  VmaSuballocation paddingSuballoc = {};
5512  paddingSuballoc.offset = request.offset - paddingBegin;
5513  paddingSuballoc.size = paddingBegin;
5514  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5515  const VmaSuballocationList::iterator paddingBeginItem =
5516  m_Suballocations.insert(request.item, paddingSuballoc);
5517  RegisterFreeSuballocation(paddingBeginItem);
5518  }
5519 
5520  // Update totals.
5521  m_FreeCount = m_FreeCount - 1;
5522  if(paddingBegin > 0)
5523  {
5524  ++m_FreeCount;
5525  }
5526  if(paddingEnd > 0)
5527  {
5528  ++m_FreeCount;
5529  }
5530  m_SumFreeSize -= allocSize;
5531 }
5532 
5533 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5534 {
5535  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5536  suballocItem != m_Suballocations.end();
5537  ++suballocItem)
5538  {
5539  VmaSuballocation& suballoc = *suballocItem;
5540  if(suballoc.hAllocation == allocation)
5541  {
5542  FreeSuballocation(suballocItem);
5543  VMA_HEAVY_ASSERT(Validate());
5544  return;
5545  }
5546  }
5547  VMA_ASSERT(0 && "Not found!");
5548 }
5549 
5550 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5551 {
5552  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5553  suballocItem != m_Suballocations.end();
5554  ++suballocItem)
5555  {
5556  VmaSuballocation& suballoc = *suballocItem;
5557  if(suballoc.offset == offset)
5558  {
5559  FreeSuballocation(suballocItem);
5560  return;
5561  }
5562  }
5563  VMA_ASSERT(0 && "Not found!");
5564 }
5565 
5566 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5567 {
5568  VkDeviceSize lastSize = 0;
5569  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5570  {
5571  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5572 
5573  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5574  {
5575  VMA_ASSERT(0);
5576  return false;
5577  }
5578  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5579  {
5580  VMA_ASSERT(0);
5581  return false;
5582  }
5583  if(it->size < lastSize)
5584  {
5585  VMA_ASSERT(0);
5586  return false;
5587  }
5588 
5589  lastSize = it->size;
5590  }
5591  return true;
5592 }
5593 
5594 bool VmaBlockMetadata::CheckAllocation(
5595  uint32_t currentFrameIndex,
5596  uint32_t frameInUseCount,
5597  VkDeviceSize bufferImageGranularity,
5598  VkDeviceSize allocSize,
5599  VkDeviceSize allocAlignment,
5600  VmaSuballocationType allocType,
5601  VmaSuballocationList::const_iterator suballocItem,
5602  bool canMakeOtherLost,
5603  VkDeviceSize* pOffset,
5604  size_t* itemsToMakeLostCount,
5605  VkDeviceSize* pSumFreeSize,
5606  VkDeviceSize* pSumItemSize) const
5607 {
5608  VMA_ASSERT(allocSize > 0);
5609  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5610  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5611  VMA_ASSERT(pOffset != VMA_NULL);
5612 
5613  *itemsToMakeLostCount = 0;
5614  *pSumFreeSize = 0;
5615  *pSumItemSize = 0;
5616 
5617  if(canMakeOtherLost)
5618  {
5619  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5620  {
5621  *pSumFreeSize = suballocItem->size;
5622  }
5623  else
5624  {
5625  if(suballocItem->hAllocation->CanBecomeLost() &&
5626  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5627  {
5628  ++*itemsToMakeLostCount;
5629  *pSumItemSize = suballocItem->size;
5630  }
5631  else
5632  {
5633  return false;
5634  }
5635  }
5636 
5637  // Remaining size is too small for this request: Early return.
5638  if(m_Size - suballocItem->offset < allocSize)
5639  {
5640  return false;
5641  }
5642 
5643  // Start from offset equal to beginning of this suballocation.
5644  *pOffset = suballocItem->offset;
5645 
5646  // Apply VMA_DEBUG_MARGIN at the beginning.
5647  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5648  {
5649  *pOffset += VMA_DEBUG_MARGIN;
5650  }
5651 
5652  // Apply alignment.
5653  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5654  *pOffset = VmaAlignUp(*pOffset, alignment);
5655 
5656  // Check previous suballocations for BufferImageGranularity conflicts.
5657  // Make bigger alignment if necessary.
5658  if(bufferImageGranularity > 1)
5659  {
5660  bool bufferImageGranularityConflict = false;
5661  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5662  while(prevSuballocItem != m_Suballocations.cbegin())
5663  {
5664  --prevSuballocItem;
5665  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5666  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5667  {
5668  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5669  {
5670  bufferImageGranularityConflict = true;
5671  break;
5672  }
5673  }
5674  else
5675  // Already on previous page.
5676  break;
5677  }
5678  if(bufferImageGranularityConflict)
5679  {
5680  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5681  }
5682  }
5683 
5684  // Now that we have final *pOffset, check if we are past suballocItem.
5685  // If yes, return false - this function should be called for another suballocItem as starting point.
5686  if(*pOffset >= suballocItem->offset + suballocItem->size)
5687  {
5688  return false;
5689  }
5690 
5691  // Calculate padding at the beginning based on current offset.
5692  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5693 
5694  // Calculate required margin at the end if this is not last suballocation.
5695  VmaSuballocationList::const_iterator next = suballocItem;
5696  ++next;
5697  const VkDeviceSize requiredEndMargin =
5698  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5699 
5700  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5701  // Another early return check.
5702  if(suballocItem->offset + totalSize > m_Size)
5703  {
5704  return false;
5705  }
5706 
5707  // Advance lastSuballocItem until desired size is reached.
5708  // Update itemsToMakeLostCount.
5709  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5710  if(totalSize > suballocItem->size)
5711  {
5712  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5713  while(remainingSize > 0)
5714  {
5715  ++lastSuballocItem;
5716  if(lastSuballocItem == m_Suballocations.cend())
5717  {
5718  return false;
5719  }
5720  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5721  {
5722  *pSumFreeSize += lastSuballocItem->size;
5723  }
5724  else
5725  {
5726  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5727  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5728  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5729  {
5730  ++*itemsToMakeLostCount;
5731  *pSumItemSize += lastSuballocItem->size;
5732  }
5733  else
5734  {
5735  return false;
5736  }
5737  }
5738  remainingSize = (lastSuballocItem->size < remainingSize) ?
5739  remainingSize - lastSuballocItem->size : 0;
5740  }
5741  }
5742 
5743  // Check next suballocations for BufferImageGranularity conflicts.
5744  // If conflict exists, we must mark more allocations lost or fail.
5745  if(bufferImageGranularity > 1)
5746  {
5747  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5748  ++nextSuballocItem;
5749  while(nextSuballocItem != m_Suballocations.cend())
5750  {
5751  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5752  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5753  {
5754  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5755  {
5756  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5757  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5758  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5759  {
5760  ++*itemsToMakeLostCount;
5761  }
5762  else
5763  {
5764  return false;
5765  }
5766  }
5767  }
5768  else
5769  {
5770  // Already on next page.
5771  break;
5772  }
5773  ++nextSuballocItem;
5774  }
5775  }
5776  }
5777  else
5778  {
5779  const VmaSuballocation& suballoc = *suballocItem;
5780  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5781 
5782  *pSumFreeSize = suballoc.size;
5783 
5784  // Size of this suballocation is too small for this request: Early return.
5785  if(suballoc.size < allocSize)
5786  {
5787  return false;
5788  }
5789 
5790  // Start from offset equal to beginning of this suballocation.
5791  *pOffset = suballoc.offset;
5792 
5793  // Apply VMA_DEBUG_MARGIN at the beginning.
5794  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5795  {
5796  *pOffset += VMA_DEBUG_MARGIN;
5797  }
5798 
5799  // Apply alignment.
5800  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5801  *pOffset = VmaAlignUp(*pOffset, alignment);
5802 
5803  // Check previous suballocations for BufferImageGranularity conflicts.
5804  // Make bigger alignment if necessary.
5805  if(bufferImageGranularity > 1)
5806  {
5807  bool bufferImageGranularityConflict = false;
5808  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5809  while(prevSuballocItem != m_Suballocations.cbegin())
5810  {
5811  --prevSuballocItem;
5812  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5813  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5814  {
5815  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5816  {
5817  bufferImageGranularityConflict = true;
5818  break;
5819  }
5820  }
5821  else
5822  // Already on previous page.
5823  break;
5824  }
5825  if(bufferImageGranularityConflict)
5826  {
5827  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5828  }
5829  }
5830 
5831  // Calculate padding at the beginning based on current offset.
5832  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5833 
5834  // Calculate required margin at the end if this is not last suballocation.
5835  VmaSuballocationList::const_iterator next = suballocItem;
5836  ++next;
5837  const VkDeviceSize requiredEndMargin =
5838  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5839 
5840  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5841  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5842  {
5843  return false;
5844  }
5845 
5846  // Check next suballocations for BufferImageGranularity conflicts.
5847  // If conflict exists, allocation cannot be made here.
5848  if(bufferImageGranularity > 1)
5849  {
5850  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5851  ++nextSuballocItem;
5852  while(nextSuballocItem != m_Suballocations.cend())
5853  {
5854  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5855  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5856  {
5857  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5858  {
5859  return false;
5860  }
5861  }
5862  else
5863  {
5864  // Already on next page.
5865  break;
5866  }
5867  ++nextSuballocItem;
5868  }
5869  }
5870  }
5871 
5872  // All tests passed: Success. pOffset is already filled.
5873  return true;
5874 }
5875 
5876 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5877 {
5878  VMA_ASSERT(item != m_Suballocations.end());
5879  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5880 
5881  VmaSuballocationList::iterator nextItem = item;
5882  ++nextItem;
5883  VMA_ASSERT(nextItem != m_Suballocations.end());
5884  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5885 
5886  item->size += nextItem->size;
5887  --m_FreeCount;
5888  m_Suballocations.erase(nextItem);
5889 }
5890 
5891 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5892 {
5893  // Change this suballocation to be marked as free.
5894  VmaSuballocation& suballoc = *suballocItem;
5895  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5896  suballoc.hAllocation = VK_NULL_HANDLE;
5897 
5898  // Update totals.
5899  ++m_FreeCount;
5900  m_SumFreeSize += suballoc.size;
5901 
5902  // Merge with previous and/or next suballocation if it's also free.
5903  bool mergeWithNext = false;
5904  bool mergeWithPrev = false;
5905 
5906  VmaSuballocationList::iterator nextItem = suballocItem;
5907  ++nextItem;
5908  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5909  {
5910  mergeWithNext = true;
5911  }
5912 
5913  VmaSuballocationList::iterator prevItem = suballocItem;
5914  if(suballocItem != m_Suballocations.begin())
5915  {
5916  --prevItem;
5917  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5918  {
5919  mergeWithPrev = true;
5920  }
5921  }
5922 
5923  if(mergeWithNext)
5924  {
5925  UnregisterFreeSuballocation(nextItem);
5926  MergeFreeWithNext(suballocItem);
5927  }
5928 
5929  if(mergeWithPrev)
5930  {
5931  UnregisterFreeSuballocation(prevItem);
5932  MergeFreeWithNext(prevItem);
5933  RegisterFreeSuballocation(prevItem);
5934  return prevItem;
5935  }
5936  else
5937  {
5938  RegisterFreeSuballocation(suballocItem);
5939  return suballocItem;
5940  }
5941 }
5942 
5943 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5944 {
5945  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5946  VMA_ASSERT(item->size > 0);
5947 
5948  // You may want to enable this validation at the beginning or at the end of
5949  // this function, depending on what do you want to check.
5950  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5951 
5952  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5953  {
5954  if(m_FreeSuballocationsBySize.empty())
5955  {
5956  m_FreeSuballocationsBySize.push_back(item);
5957  }
5958  else
5959  {
5960  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5961  }
5962  }
5963 
5964  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5965 }
5966 
5967 
5968 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5969 {
5970  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5971  VMA_ASSERT(item->size > 0);
5972 
5973  // You may want to enable this validation at the beginning or at the end of
5974  // this function, depending on what do you want to check.
5975  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5976 
5977  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5978  {
5979  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5980  m_FreeSuballocationsBySize.data(),
5981  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5982  item,
5983  VmaSuballocationItemSizeLess());
5984  for(size_t index = it - m_FreeSuballocationsBySize.data();
5985  index < m_FreeSuballocationsBySize.size();
5986  ++index)
5987  {
5988  if(m_FreeSuballocationsBySize[index] == item)
5989  {
5990  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5991  return;
5992  }
5993  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5994  }
5995  VMA_ASSERT(0 && "Not found.");
5996  }
5997 
5998  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5999 }
6000 
6002 // class VmaDeviceMemoryMapping
6003 
6004 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
6005  m_MapCount(0),
6006  m_pMappedData(VMA_NULL)
6007 {
6008 }
6009 
6010 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
6011 {
6012  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6013 }
6014 
6015 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
6016 {
6017  if(count == 0)
6018  {
6019  return VK_SUCCESS;
6020  }
6021 
6022  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6023  if(m_MapCount != 0)
6024  {
6025  m_MapCount += count;
6026  VMA_ASSERT(m_pMappedData != VMA_NULL);
6027  if(ppData != VMA_NULL)
6028  {
6029  *ppData = m_pMappedData;
6030  }
6031  return VK_SUCCESS;
6032  }
6033  else
6034  {
6035  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6036  hAllocator->m_hDevice,
6037  hMemory,
6038  0, // offset
6039  VK_WHOLE_SIZE,
6040  0, // flags
6041  &m_pMappedData);
6042  if(result == VK_SUCCESS)
6043  {
6044  if(ppData != VMA_NULL)
6045  {
6046  *ppData = m_pMappedData;
6047  }
6048  m_MapCount = count;
6049  }
6050  return result;
6051  }
6052 }
6053 
6054 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6055 {
6056  if(count == 0)
6057  {
6058  return;
6059  }
6060 
6061  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6062  if(m_MapCount >= count)
6063  {
6064  m_MapCount -= count;
6065  if(m_MapCount == 0)
6066  {
6067  m_pMappedData = VMA_NULL;
6068  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6069  }
6070  }
6071  else
6072  {
6073  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6074  }
6075 }
6076 
6078 // class VmaDeviceMemoryBlock
6079 
6080 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6081  m_MemoryTypeIndex(UINT32_MAX),
6082  m_hMemory(VK_NULL_HANDLE),
6083  m_Metadata(hAllocator)
6084 {
6085 }
6086 
6087 void VmaDeviceMemoryBlock::Init(
6088  uint32_t newMemoryTypeIndex,
6089  VkDeviceMemory newMemory,
6090  VkDeviceSize newSize)
6091 {
6092  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6093 
6094  m_MemoryTypeIndex = newMemoryTypeIndex;
6095  m_hMemory = newMemory;
6096 
6097  m_Metadata.Init(newSize);
6098 }
6099 
6100 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6101 {
6102  // This is the most important assert in the entire library.
6103  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6104  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6105 
6106  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6107  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6108  m_hMemory = VK_NULL_HANDLE;
6109 }
6110 
6111 bool VmaDeviceMemoryBlock::Validate() const
6112 {
6113  if((m_hMemory == VK_NULL_HANDLE) ||
6114  (m_Metadata.GetSize() == 0))
6115  {
6116  return false;
6117  }
6118 
6119  return m_Metadata.Validate();
6120 }
6121 
6122 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6123 {
6124  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6125 }
6126 
6127 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6128 {
6129  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6130 }
6131 
6132 static void InitStatInfo(VmaStatInfo& outInfo)
6133 {
6134  memset(&outInfo, 0, sizeof(outInfo));
6135  outInfo.allocationSizeMin = UINT64_MAX;
6136  outInfo.unusedRangeSizeMin = UINT64_MAX;
6137 }
6138 
6139 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6140 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6141 {
6142  inoutInfo.blockCount += srcInfo.blockCount;
6143  inoutInfo.allocationCount += srcInfo.allocationCount;
6144  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6145  inoutInfo.usedBytes += srcInfo.usedBytes;
6146  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6147  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6148  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6149  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6150  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6151 }
6152 
6153 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6154 {
6155  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6156  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6157  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6158  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6159 }
6160 
6161 VmaPool_T::VmaPool_T(
6162  VmaAllocator hAllocator,
6163  const VmaPoolCreateInfo& createInfo) :
6164  m_BlockVector(
6165  hAllocator,
6166  createInfo.memoryTypeIndex,
6167  createInfo.blockSize,
6168  createInfo.minBlockCount,
6169  createInfo.maxBlockCount,
6170  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6171  createInfo.frameInUseCount,
6172  true) // isCustomPool
6173 {
6174 }
6175 
6176 VmaPool_T::~VmaPool_T()
6177 {
6178 }
6179 
6180 #if VMA_STATS_STRING_ENABLED
6181 
6182 #endif // #if VMA_STATS_STRING_ENABLED
6183 
6184 VmaBlockVector::VmaBlockVector(
6185  VmaAllocator hAllocator,
6186  uint32_t memoryTypeIndex,
6187  VkDeviceSize preferredBlockSize,
6188  size_t minBlockCount,
6189  size_t maxBlockCount,
6190  VkDeviceSize bufferImageGranularity,
6191  uint32_t frameInUseCount,
6192  bool isCustomPool) :
6193  m_hAllocator(hAllocator),
6194  m_MemoryTypeIndex(memoryTypeIndex),
6195  m_PreferredBlockSize(preferredBlockSize),
6196  m_MinBlockCount(minBlockCount),
6197  m_MaxBlockCount(maxBlockCount),
6198  m_BufferImageGranularity(bufferImageGranularity),
6199  m_FrameInUseCount(frameInUseCount),
6200  m_IsCustomPool(isCustomPool),
6201  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6202  m_HasEmptyBlock(false),
6203  m_pDefragmentator(VMA_NULL)
6204 {
6205 }
6206 
6207 VmaBlockVector::~VmaBlockVector()
6208 {
6209  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6210 
6211  for(size_t i = m_Blocks.size(); i--; )
6212  {
6213  m_Blocks[i]->Destroy(m_hAllocator);
6214  vma_delete(m_hAllocator, m_Blocks[i]);
6215  }
6216 }
6217 
6218 VkResult VmaBlockVector::CreateMinBlocks()
6219 {
6220  for(size_t i = 0; i < m_MinBlockCount; ++i)
6221  {
6222  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6223  if(res != VK_SUCCESS)
6224  {
6225  return res;
6226  }
6227  }
6228  return VK_SUCCESS;
6229 }
6230 
6231 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6232 {
6233  pStats->size = 0;
6234  pStats->unusedSize = 0;
6235  pStats->allocationCount = 0;
6236  pStats->unusedRangeCount = 0;
6237  pStats->unusedRangeSizeMax = 0;
6238 
6239  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6240 
6241  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6242  {
6243  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6244  VMA_ASSERT(pBlock);
6245  VMA_HEAVY_ASSERT(pBlock->Validate());
6246  pBlock->m_Metadata.AddPoolStats(*pStats);
6247  }
6248 }
6249 
6250 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6251 
6252 VkResult VmaBlockVector::Allocate(
6253  VmaPool hCurrentPool,
6254  uint32_t currentFrameIndex,
6255  const VkMemoryRequirements& vkMemReq,
6256  const VmaAllocationCreateInfo& createInfo,
6257  VmaSuballocationType suballocType,
6258  VmaAllocation* pAllocation)
6259 {
6260  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6261  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6262 
6263  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6264 
6265  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6266  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6267  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6268  {
6269  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6270  VMA_ASSERT(pCurrBlock);
6271  VmaAllocationRequest currRequest = {};
6272  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6273  currentFrameIndex,
6274  m_FrameInUseCount,
6275  m_BufferImageGranularity,
6276  vkMemReq.size,
6277  vkMemReq.alignment,
6278  suballocType,
6279  false, // canMakeOtherLost
6280  &currRequest))
6281  {
6282  // Allocate from pCurrBlock.
6283  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6284 
6285  if(mapped)
6286  {
6287  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6288  if(res != VK_SUCCESS)
6289  {
6290  return res;
6291  }
6292  }
6293 
6294  // We no longer have an empty Allocation.
6295  if(pCurrBlock->m_Metadata.IsEmpty())
6296  {
6297  m_HasEmptyBlock = false;
6298  }
6299 
6300  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6301  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6302  (*pAllocation)->InitBlockAllocation(
6303  hCurrentPool,
6304  pCurrBlock,
6305  currRequest.offset,
6306  vkMemReq.alignment,
6307  vkMemReq.size,
6308  suballocType,
6309  mapped,
6310  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6311  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6312  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6313  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6314  return VK_SUCCESS;
6315  }
6316  }
6317 
6318  const bool canCreateNewBlock =
6319  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6320  (m_Blocks.size() < m_MaxBlockCount);
6321 
6322  // 2. Try to create new block.
6323  if(canCreateNewBlock)
6324  {
6325  // Calculate optimal size for new block.
6326  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6327  uint32_t newBlockSizeShift = 0;
6328  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6329 
6330  // Allocating blocks of other sizes is allowed only in default pools.
6331  // In custom pools block size is fixed.
6332  if(m_IsCustomPool == false)
6333  {
6334  // Allocate 1/8, 1/4, 1/2 as first blocks.
6335  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6336  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6337  {
6338  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6339  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6340  {
6341  newBlockSize = smallerNewBlockSize;
6342  ++newBlockSizeShift;
6343  }
6344  else
6345  {
6346  break;
6347  }
6348  }
6349  }
6350 
6351  size_t newBlockIndex = 0;
6352  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6353  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6354  if(m_IsCustomPool == false)
6355  {
6356  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6357  {
6358  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6359  if(smallerNewBlockSize >= vkMemReq.size)
6360  {
6361  newBlockSize = smallerNewBlockSize;
6362  ++newBlockSizeShift;
6363  res = CreateBlock(newBlockSize, &newBlockIndex);
6364  }
6365  else
6366  {
6367  break;
6368  }
6369  }
6370  }
6371 
6372  if(res == VK_SUCCESS)
6373  {
6374  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6375  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6376 
6377  if(mapped)
6378  {
6379  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6380  if(res != VK_SUCCESS)
6381  {
6382  return res;
6383  }
6384  }
6385 
6386  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6387  VmaAllocationRequest allocRequest;
6388  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6389  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6390  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6391  (*pAllocation)->InitBlockAllocation(
6392  hCurrentPool,
6393  pBlock,
6394  allocRequest.offset,
6395  vkMemReq.alignment,
6396  vkMemReq.size,
6397  suballocType,
6398  mapped,
6399  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6400  VMA_HEAVY_ASSERT(pBlock->Validate());
6401  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6402  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6403  return VK_SUCCESS;
6404  }
6405  }
6406 
6407  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6408 
6409  // 3. Try to allocate from existing blocks with making other allocations lost.
6410  if(canMakeOtherLost)
6411  {
6412  uint32_t tryIndex = 0;
6413  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6414  {
6415  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6416  VmaAllocationRequest bestRequest = {};
6417  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6418 
6419  // 1. Search existing allocations.
6420  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6421  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6422  {
6423  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6424  VMA_ASSERT(pCurrBlock);
6425  VmaAllocationRequest currRequest = {};
6426  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6427  currentFrameIndex,
6428  m_FrameInUseCount,
6429  m_BufferImageGranularity,
6430  vkMemReq.size,
6431  vkMemReq.alignment,
6432  suballocType,
6433  canMakeOtherLost,
6434  &currRequest))
6435  {
6436  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6437  if(pBestRequestBlock == VMA_NULL ||
6438  currRequestCost < bestRequestCost)
6439  {
6440  pBestRequestBlock = pCurrBlock;
6441  bestRequest = currRequest;
6442  bestRequestCost = currRequestCost;
6443 
6444  if(bestRequestCost == 0)
6445  {
6446  break;
6447  }
6448  }
6449  }
6450  }
6451 
6452  if(pBestRequestBlock != VMA_NULL)
6453  {
6454  if(mapped)
6455  {
6456  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6457  if(res != VK_SUCCESS)
6458  {
6459  return res;
6460  }
6461  }
6462 
6463  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6464  currentFrameIndex,
6465  m_FrameInUseCount,
6466  &bestRequest))
6467  {
6468  // We no longer have an empty Allocation.
6469  if(pBestRequestBlock->m_Metadata.IsEmpty())
6470  {
6471  m_HasEmptyBlock = false;
6472  }
6473  // Allocate from this pBlock.
6474  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6475  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6476  (*pAllocation)->InitBlockAllocation(
6477  hCurrentPool,
6478  pBestRequestBlock,
6479  bestRequest.offset,
6480  vkMemReq.alignment,
6481  vkMemReq.size,
6482  suballocType,
6483  mapped,
6484  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6485  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6486  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6487  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6488  return VK_SUCCESS;
6489  }
6490  // else: Some allocations must have been touched while we are here. Next try.
6491  }
6492  else
6493  {
6494  // Could not find place in any of the blocks - break outer loop.
6495  break;
6496  }
6497  }
6498  /* Maximum number of tries exceeded - a very unlike event when many other
6499  threads are simultaneously touching allocations making it impossible to make
6500  lost at the same time as we try to allocate. */
6501  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6502  {
6503  return VK_ERROR_TOO_MANY_OBJECTS;
6504  }
6505  }
6506 
6507  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6508 }
6509 
6510 void VmaBlockVector::Free(
6511  VmaAllocation hAllocation)
6512 {
6513  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6514 
6515  // Scope for lock.
6516  {
6517  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6518 
6519  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6520 
6521  if(hAllocation->IsPersistentMap())
6522  {
6523  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6524  }
6525 
6526  pBlock->m_Metadata.Free(hAllocation);
6527  VMA_HEAVY_ASSERT(pBlock->Validate());
6528 
6529  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6530 
6531  // pBlock became empty after this deallocation.
6532  if(pBlock->m_Metadata.IsEmpty())
6533  {
6534  // Already has empty Allocation. We don't want to have two, so delete this one.
6535  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6536  {
6537  pBlockToDelete = pBlock;
6538  Remove(pBlock);
6539  }
6540  // We now have first empty Allocation.
6541  else
6542  {
6543  m_HasEmptyBlock = true;
6544  }
6545  }
6546  // pBlock didn't become empty, but we have another empty block - find and free that one.
6547  // (This is optional, heuristics.)
6548  else if(m_HasEmptyBlock)
6549  {
6550  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6551  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6552  {
6553  pBlockToDelete = pLastBlock;
6554  m_Blocks.pop_back();
6555  m_HasEmptyBlock = false;
6556  }
6557  }
6558 
6559  IncrementallySortBlocks();
6560  }
6561 
6562  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6563  // lock, for performance reason.
6564  if(pBlockToDelete != VMA_NULL)
6565  {
6566  VMA_DEBUG_LOG(" Deleted empty allocation");
6567  pBlockToDelete->Destroy(m_hAllocator);
6568  vma_delete(m_hAllocator, pBlockToDelete);
6569  }
6570 }
6571 
6572 size_t VmaBlockVector::CalcMaxBlockSize() const
6573 {
6574  size_t result = 0;
6575  for(size_t i = m_Blocks.size(); i--; )
6576  {
6577  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6578  if(result >= m_PreferredBlockSize)
6579  {
6580  break;
6581  }
6582  }
6583  return result;
6584 }
6585 
6586 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6587 {
6588  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6589  {
6590  if(m_Blocks[blockIndex] == pBlock)
6591  {
6592  VmaVectorRemove(m_Blocks, blockIndex);
6593  return;
6594  }
6595  }
6596  VMA_ASSERT(0);
6597 }
6598 
6599 void VmaBlockVector::IncrementallySortBlocks()
6600 {
6601  // Bubble sort only until first swap.
6602  for(size_t i = 1; i < m_Blocks.size(); ++i)
6603  {
6604  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6605  {
6606  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6607  return;
6608  }
6609  }
6610 }
6611 
6612 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6613 {
6614  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6615  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6616  allocInfo.allocationSize = blockSize;
6617  VkDeviceMemory mem = VK_NULL_HANDLE;
6618  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6619  if(res < 0)
6620  {
6621  return res;
6622  }
6623 
6624  // New VkDeviceMemory successfully created.
6625 
6626  // Create new Allocation for it.
6627  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6628  pBlock->Init(
6629  m_MemoryTypeIndex,
6630  mem,
6631  allocInfo.allocationSize);
6632 
6633  m_Blocks.push_back(pBlock);
6634  if(pNewBlockIndex != VMA_NULL)
6635  {
6636  *pNewBlockIndex = m_Blocks.size() - 1;
6637  }
6638 
6639  return VK_SUCCESS;
6640 }
6641 
6642 #if VMA_STATS_STRING_ENABLED
6643 
6644 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6645 {
6646  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6647 
6648  json.BeginObject();
6649 
6650  if(m_IsCustomPool)
6651  {
6652  json.WriteString("MemoryTypeIndex");
6653  json.WriteNumber(m_MemoryTypeIndex);
6654 
6655  json.WriteString("BlockSize");
6656  json.WriteNumber(m_PreferredBlockSize);
6657 
6658  json.WriteString("BlockCount");
6659  json.BeginObject(true);
6660  if(m_MinBlockCount > 0)
6661  {
6662  json.WriteString("Min");
6663  json.WriteNumber((uint64_t)m_MinBlockCount);
6664  }
6665  if(m_MaxBlockCount < SIZE_MAX)
6666  {
6667  json.WriteString("Max");
6668  json.WriteNumber((uint64_t)m_MaxBlockCount);
6669  }
6670  json.WriteString("Cur");
6671  json.WriteNumber((uint64_t)m_Blocks.size());
6672  json.EndObject();
6673 
6674  if(m_FrameInUseCount > 0)
6675  {
6676  json.WriteString("FrameInUseCount");
6677  json.WriteNumber(m_FrameInUseCount);
6678  }
6679  }
6680  else
6681  {
6682  json.WriteString("PreferredBlockSize");
6683  json.WriteNumber(m_PreferredBlockSize);
6684  }
6685 
6686  json.WriteString("Blocks");
6687  json.BeginArray();
6688  for(size_t i = 0; i < m_Blocks.size(); ++i)
6689  {
6690  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6691  }
6692  json.EndArray();
6693 
6694  json.EndObject();
6695 }
6696 
6697 #endif // #if VMA_STATS_STRING_ENABLED
6698 
6699 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6700  VmaAllocator hAllocator,
6701  uint32_t currentFrameIndex)
6702 {
6703  if(m_pDefragmentator == VMA_NULL)
6704  {
6705  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6706  hAllocator,
6707  this,
6708  currentFrameIndex);
6709  }
6710 
6711  return m_pDefragmentator;
6712 }
6713 
6714 VkResult VmaBlockVector::Defragment(
6715  VmaDefragmentationStats* pDefragmentationStats,
6716  VkDeviceSize& maxBytesToMove,
6717  uint32_t& maxAllocationsToMove)
6718 {
6719  if(m_pDefragmentator == VMA_NULL)
6720  {
6721  return VK_SUCCESS;
6722  }
6723 
6724  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6725 
6726  // Defragment.
6727  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6728 
6729  // Accumulate statistics.
6730  if(pDefragmentationStats != VMA_NULL)
6731  {
6732  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6733  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6734  pDefragmentationStats->bytesMoved += bytesMoved;
6735  pDefragmentationStats->allocationsMoved += allocationsMoved;
6736  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6737  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6738  maxBytesToMove -= bytesMoved;
6739  maxAllocationsToMove -= allocationsMoved;
6740  }
6741 
6742  // Free empty blocks.
6743  m_HasEmptyBlock = false;
6744  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6745  {
6746  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6747  if(pBlock->m_Metadata.IsEmpty())
6748  {
6749  if(m_Blocks.size() > m_MinBlockCount)
6750  {
6751  if(pDefragmentationStats != VMA_NULL)
6752  {
6753  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6754  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6755  }
6756 
6757  VmaVectorRemove(m_Blocks, blockIndex);
6758  pBlock->Destroy(m_hAllocator);
6759  vma_delete(m_hAllocator, pBlock);
6760  }
6761  else
6762  {
6763  m_HasEmptyBlock = true;
6764  }
6765  }
6766  }
6767 
6768  return result;
6769 }
6770 
6771 void VmaBlockVector::DestroyDefragmentator()
6772 {
6773  if(m_pDefragmentator != VMA_NULL)
6774  {
6775  vma_delete(m_hAllocator, m_pDefragmentator);
6776  m_pDefragmentator = VMA_NULL;
6777  }
6778 }
6779 
6780 void VmaBlockVector::MakePoolAllocationsLost(
6781  uint32_t currentFrameIndex,
6782  size_t* pLostAllocationCount)
6783 {
6784  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6785  size_t lostAllocationCount = 0;
6786  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6787  {
6788  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6789  VMA_ASSERT(pBlock);
6790  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6791  }
6792  if(pLostAllocationCount != VMA_NULL)
6793  {
6794  *pLostAllocationCount = lostAllocationCount;
6795  }
6796 }
6797 
6798 void VmaBlockVector::AddStats(VmaStats* pStats)
6799 {
6800  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6801  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6802 
6803  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6804 
6805  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6806  {
6807  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6808  VMA_ASSERT(pBlock);
6809  VMA_HEAVY_ASSERT(pBlock->Validate());
6810  VmaStatInfo allocationStatInfo;
6811  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6812  VmaAddStatInfo(pStats->total, allocationStatInfo);
6813  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6814  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6815  }
6816 }
6817 
6819 // VmaDefragmentator members definition
6820 
6821 VmaDefragmentator::VmaDefragmentator(
6822  VmaAllocator hAllocator,
6823  VmaBlockVector* pBlockVector,
6824  uint32_t currentFrameIndex) :
6825  m_hAllocator(hAllocator),
6826  m_pBlockVector(pBlockVector),
6827  m_CurrentFrameIndex(currentFrameIndex),
6828  m_BytesMoved(0),
6829  m_AllocationsMoved(0),
6830  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6831  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6832 {
6833 }
6834 
6835 VmaDefragmentator::~VmaDefragmentator()
6836 {
6837  for(size_t i = m_Blocks.size(); i--; )
6838  {
6839  vma_delete(m_hAllocator, m_Blocks[i]);
6840  }
6841 }
6842 
6843 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6844 {
6845  AllocationInfo allocInfo;
6846  allocInfo.m_hAllocation = hAlloc;
6847  allocInfo.m_pChanged = pChanged;
6848  m_Allocations.push_back(allocInfo);
6849 }
6850 
6851 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6852 {
6853  // It has already been mapped for defragmentation.
6854  if(m_pMappedDataForDefragmentation)
6855  {
6856  *ppMappedData = m_pMappedDataForDefragmentation;
6857  return VK_SUCCESS;
6858  }
6859 
6860  // It is originally mapped.
6861  if(m_pBlock->m_Mapping.GetMappedData())
6862  {
6863  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6864  return VK_SUCCESS;
6865  }
6866 
6867  // Map on first usage.
6868  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6869  *ppMappedData = m_pMappedDataForDefragmentation;
6870  return res;
6871 }
6872 
6873 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6874 {
6875  if(m_pMappedDataForDefragmentation != VMA_NULL)
6876  {
6877  m_pBlock->Unmap(hAllocator, 1);
6878  }
6879 }
6880 
6881 VkResult VmaDefragmentator::DefragmentRound(
6882  VkDeviceSize maxBytesToMove,
6883  uint32_t maxAllocationsToMove)
6884 {
6885  if(m_Blocks.empty())
6886  {
6887  return VK_SUCCESS;
6888  }
6889 
6890  size_t srcBlockIndex = m_Blocks.size() - 1;
6891  size_t srcAllocIndex = SIZE_MAX;
6892  for(;;)
6893  {
6894  // 1. Find next allocation to move.
6895  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6896  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6897  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6898  {
6899  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6900  {
6901  // Finished: no more allocations to process.
6902  if(srcBlockIndex == 0)
6903  {
6904  return VK_SUCCESS;
6905  }
6906  else
6907  {
6908  --srcBlockIndex;
6909  srcAllocIndex = SIZE_MAX;
6910  }
6911  }
6912  else
6913  {
6914  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6915  }
6916  }
6917 
6918  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6919  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6920 
6921  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6922  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6923  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6924  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6925 
6926  // 2. Try to find new place for this allocation in preceding or current block.
6927  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6928  {
6929  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6930  VmaAllocationRequest dstAllocRequest;
6931  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6932  m_CurrentFrameIndex,
6933  m_pBlockVector->GetFrameInUseCount(),
6934  m_pBlockVector->GetBufferImageGranularity(),
6935  size,
6936  alignment,
6937  suballocType,
6938  false, // canMakeOtherLost
6939  &dstAllocRequest) &&
6940  MoveMakesSense(
6941  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6942  {
6943  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6944 
6945  // Reached limit on number of allocations or bytes to move.
6946  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6947  (m_BytesMoved + size > maxBytesToMove))
6948  {
6949  return VK_INCOMPLETE;
6950  }
6951 
6952  void* pDstMappedData = VMA_NULL;
6953  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6954  if(res != VK_SUCCESS)
6955  {
6956  return res;
6957  }
6958 
6959  void* pSrcMappedData = VMA_NULL;
6960  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6961  if(res != VK_SUCCESS)
6962  {
6963  return res;
6964  }
6965 
6966  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6967  memcpy(
6968  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6969  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6970  static_cast<size_t>(size));
6971 
6972  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6973  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6974 
6975  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6976 
6977  if(allocInfo.m_pChanged != VMA_NULL)
6978  {
6979  *allocInfo.m_pChanged = VK_TRUE;
6980  }
6981 
6982  ++m_AllocationsMoved;
6983  m_BytesMoved += size;
6984 
6985  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6986 
6987  break;
6988  }
6989  }
6990 
6991  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6992 
6993  if(srcAllocIndex > 0)
6994  {
6995  --srcAllocIndex;
6996  }
6997  else
6998  {
6999  if(srcBlockIndex > 0)
7000  {
7001  --srcBlockIndex;
7002  srcAllocIndex = SIZE_MAX;
7003  }
7004  else
7005  {
7006  return VK_SUCCESS;
7007  }
7008  }
7009  }
7010 }
7011 
7012 VkResult VmaDefragmentator::Defragment(
7013  VkDeviceSize maxBytesToMove,
7014  uint32_t maxAllocationsToMove)
7015 {
7016  if(m_Allocations.empty())
7017  {
7018  return VK_SUCCESS;
7019  }
7020 
7021  // Create block info for each block.
7022  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7023  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7024  {
7025  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7026  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7027  m_Blocks.push_back(pBlockInfo);
7028  }
7029 
7030  // Sort them by m_pBlock pointer value.
7031  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7032 
7033  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7034  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7035  {
7036  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7037  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7038  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7039  {
7040  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7041  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7042  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7043  {
7044  (*it)->m_Allocations.push_back(allocInfo);
7045  }
7046  else
7047  {
7048  VMA_ASSERT(0);
7049  }
7050  }
7051  }
7052  m_Allocations.clear();
7053 
7054  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7055  {
7056  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7057  pBlockInfo->CalcHasNonMovableAllocations();
7058  pBlockInfo->SortAllocationsBySizeDescecnding();
7059  }
7060 
7061  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7062  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7063 
7064  // Execute defragmentation rounds (the main part).
7065  VkResult result = VK_SUCCESS;
7066  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7067  {
7068  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7069  }
7070 
7071  // Unmap blocks that were mapped for defragmentation.
7072  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7073  {
7074  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7075  }
7076 
7077  return result;
7078 }
7079 
7080 bool VmaDefragmentator::MoveMakesSense(
7081  size_t dstBlockIndex, VkDeviceSize dstOffset,
7082  size_t srcBlockIndex, VkDeviceSize srcOffset)
7083 {
7084  if(dstBlockIndex < srcBlockIndex)
7085  {
7086  return true;
7087  }
7088  if(dstBlockIndex > srcBlockIndex)
7089  {
7090  return false;
7091  }
7092  if(dstOffset < srcOffset)
7093  {
7094  return true;
7095  }
7096  return false;
7097 }
7098 
7100 // VmaAllocator_T
7101 
7102 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7103  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7104  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7105  m_hDevice(pCreateInfo->device),
7106  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7107  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7108  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7109  m_PreferredLargeHeapBlockSize(0),
7110  m_PhysicalDevice(pCreateInfo->physicalDevice),
7111  m_CurrentFrameIndex(0),
7112  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7113 {
7114  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7115 
7116  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7117  memset(&m_MemProps, 0, sizeof(m_MemProps));
7118  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7119 
7120  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7121  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7122 
7123  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7124  {
7125  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7126  }
7127 
7128  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7129  {
7130  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7131  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7132  }
7133 
7134  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7135 
7136  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7137  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7138 
7139  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7140  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7141 
7142  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7143  {
7144  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7145  {
7146  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7147  if(limit != VK_WHOLE_SIZE)
7148  {
7149  m_HeapSizeLimit[heapIndex] = limit;
7150  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7151  {
7152  m_MemProps.memoryHeaps[heapIndex].size = limit;
7153  }
7154  }
7155  }
7156  }
7157 
7158  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7159  {
7160  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7161 
7162  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7163  this,
7164  memTypeIndex,
7165  preferredBlockSize,
7166  0,
7167  SIZE_MAX,
7168  GetBufferImageGranularity(),
7169  pCreateInfo->frameInUseCount,
7170  false); // isCustomPool
7171  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7172  // becase minBlockCount is 0.
7173  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7174  }
7175 }
7176 
7177 VmaAllocator_T::~VmaAllocator_T()
7178 {
7179  VMA_ASSERT(m_Pools.empty());
7180 
7181  for(size_t i = GetMemoryTypeCount(); i--; )
7182  {
7183  vma_delete(this, m_pDedicatedAllocations[i]);
7184  vma_delete(this, m_pBlockVectors[i]);
7185  }
7186 }
7187 
7188 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7189 {
7190 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7191  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7192  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7193  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7194  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7195  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7196  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7197  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7198  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7199  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7200  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7201  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7202  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7203  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7204  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7205  if(m_UseKhrDedicatedAllocation)
7206  {
7207  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7208  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7209  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7210  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7211  }
7212 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7213 
7214 #define VMA_COPY_IF_NOT_NULL(funcName) \
7215  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7216 
7217  if(pVulkanFunctions != VMA_NULL)
7218  {
7219  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7220  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7221  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7222  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7223  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7224  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7225  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7226  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7227  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7228  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7229  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7230  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7231  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7232  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7233  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7234  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7235  }
7236 
7237 #undef VMA_COPY_IF_NOT_NULL
7238 
7239  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7240  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7241  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7242  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7243  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7244  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7245  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7246  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7247  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7248  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7249  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7250  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7251  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7252  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7253  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7254  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7255  if(m_UseKhrDedicatedAllocation)
7256  {
7257  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7258  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7259  }
7260 }
7261 
7262 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7263 {
7264  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7265  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7266  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7267  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7268 }
7269 
7270 VkResult VmaAllocator_T::AllocateMemoryOfType(
7271  const VkMemoryRequirements& vkMemReq,
7272  bool dedicatedAllocation,
7273  VkBuffer dedicatedBuffer,
7274  VkImage dedicatedImage,
7275  const VmaAllocationCreateInfo& createInfo,
7276  uint32_t memTypeIndex,
7277  VmaSuballocationType suballocType,
7278  VmaAllocation* pAllocation)
7279 {
7280  VMA_ASSERT(pAllocation != VMA_NULL);
7281  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7282 
7283  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7284 
7285  // If memory type is not HOST_VISIBLE, disable MAPPED.
7286  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7287  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7288  {
7289  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7290  }
7291 
7292  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7293  VMA_ASSERT(blockVector);
7294 
7295  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7296  bool preferDedicatedMemory =
7297  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7298  dedicatedAllocation ||
7299  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7300  vkMemReq.size > preferredBlockSize / 2;
7301 
7302  if(preferDedicatedMemory &&
7303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7304  finalCreateInfo.pool == VK_NULL_HANDLE)
7305  {
7307  }
7308 
7309  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7310  {
7311  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7312  {
7313  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7314  }
7315  else
7316  {
7317  return AllocateDedicatedMemory(
7318  vkMemReq.size,
7319  suballocType,
7320  memTypeIndex,
7321  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7322  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7323  finalCreateInfo.pUserData,
7324  dedicatedBuffer,
7325  dedicatedImage,
7326  pAllocation);
7327  }
7328  }
7329  else
7330  {
7331  VkResult res = blockVector->Allocate(
7332  VK_NULL_HANDLE, // hCurrentPool
7333  m_CurrentFrameIndex.load(),
7334  vkMemReq,
7335  finalCreateInfo,
7336  suballocType,
7337  pAllocation);
7338  if(res == VK_SUCCESS)
7339  {
7340  return res;
7341  }
7342 
7343  // 5. Try dedicated memory.
7344  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7345  {
7346  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7347  }
7348  else
7349  {
7350  res = AllocateDedicatedMemory(
7351  vkMemReq.size,
7352  suballocType,
7353  memTypeIndex,
7354  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7355  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7356  finalCreateInfo.pUserData,
7357  dedicatedBuffer,
7358  dedicatedImage,
7359  pAllocation);
7360  if(res == VK_SUCCESS)
7361  {
7362  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7363  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7364  return VK_SUCCESS;
7365  }
7366  else
7367  {
7368  // Everything failed: Return error code.
7369  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7370  return res;
7371  }
7372  }
7373  }
7374 }
7375 
7376 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7377  VkDeviceSize size,
7378  VmaSuballocationType suballocType,
7379  uint32_t memTypeIndex,
7380  bool map,
7381  bool isUserDataString,
7382  void* pUserData,
7383  VkBuffer dedicatedBuffer,
7384  VkImage dedicatedImage,
7385  VmaAllocation* pAllocation)
7386 {
7387  VMA_ASSERT(pAllocation);
7388 
7389  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7390  allocInfo.memoryTypeIndex = memTypeIndex;
7391  allocInfo.allocationSize = size;
7392 
7393  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7394  if(m_UseKhrDedicatedAllocation)
7395  {
7396  if(dedicatedBuffer != VK_NULL_HANDLE)
7397  {
7398  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7399  dedicatedAllocInfo.buffer = dedicatedBuffer;
7400  allocInfo.pNext = &dedicatedAllocInfo;
7401  }
7402  else if(dedicatedImage != VK_NULL_HANDLE)
7403  {
7404  dedicatedAllocInfo.image = dedicatedImage;
7405  allocInfo.pNext = &dedicatedAllocInfo;
7406  }
7407  }
7408 
7409  // Allocate VkDeviceMemory.
7410  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7411  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7412  if(res < 0)
7413  {
7414  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7415  return res;
7416  }
7417 
7418  void* pMappedData = VMA_NULL;
7419  if(map)
7420  {
7421  res = (*m_VulkanFunctions.vkMapMemory)(
7422  m_hDevice,
7423  hMemory,
7424  0,
7425  VK_WHOLE_SIZE,
7426  0,
7427  &pMappedData);
7428  if(res < 0)
7429  {
7430  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7431  FreeVulkanMemory(memTypeIndex, size, hMemory);
7432  return res;
7433  }
7434  }
7435 
7436  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7437  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7438  (*pAllocation)->SetUserData(this, pUserData);
7439 
7440  // Register it in m_pDedicatedAllocations.
7441  {
7442  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7443  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7444  VMA_ASSERT(pDedicatedAllocations);
7445  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7446  }
7447 
7448  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7449 
7450  return VK_SUCCESS;
7451 }
7452 
7453 void VmaAllocator_T::GetBufferMemoryRequirements(
7454  VkBuffer hBuffer,
7455  VkMemoryRequirements& memReq,
7456  bool& requiresDedicatedAllocation,
7457  bool& prefersDedicatedAllocation) const
7458 {
7459  if(m_UseKhrDedicatedAllocation)
7460  {
7461  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7462  memReqInfo.buffer = hBuffer;
7463 
7464  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7465 
7466  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7467  memReq2.pNext = &memDedicatedReq;
7468 
7469  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7470 
7471  memReq = memReq2.memoryRequirements;
7472  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7473  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7474  }
7475  else
7476  {
7477  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7478  requiresDedicatedAllocation = false;
7479  prefersDedicatedAllocation = false;
7480  }
7481 }
7482 
7483 void VmaAllocator_T::GetImageMemoryRequirements(
7484  VkImage hImage,
7485  VkMemoryRequirements& memReq,
7486  bool& requiresDedicatedAllocation,
7487  bool& prefersDedicatedAllocation) const
7488 {
7489  if(m_UseKhrDedicatedAllocation)
7490  {
7491  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7492  memReqInfo.image = hImage;
7493 
7494  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7495 
7496  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7497  memReq2.pNext = &memDedicatedReq;
7498 
7499  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7500 
7501  memReq = memReq2.memoryRequirements;
7502  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7503  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7504  }
7505  else
7506  {
7507  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7508  requiresDedicatedAllocation = false;
7509  prefersDedicatedAllocation = false;
7510  }
7511 }
7512 
7513 VkResult VmaAllocator_T::AllocateMemory(
7514  const VkMemoryRequirements& vkMemReq,
7515  bool requiresDedicatedAllocation,
7516  bool prefersDedicatedAllocation,
7517  VkBuffer dedicatedBuffer,
7518  VkImage dedicatedImage,
7519  const VmaAllocationCreateInfo& createInfo,
7520  VmaSuballocationType suballocType,
7521  VmaAllocation* pAllocation)
7522 {
7523  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7524  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7525  {
7526  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7527  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7528  }
7529  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7531  {
7532  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7533  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7534  }
7535  if(requiresDedicatedAllocation)
7536  {
7537  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7538  {
7539  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7540  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7541  }
7542  if(createInfo.pool != VK_NULL_HANDLE)
7543  {
7544  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7545  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7546  }
7547  }
7548  if((createInfo.pool != VK_NULL_HANDLE) &&
7549  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7550  {
7551  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7552  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7553  }
7554 
7555  if(createInfo.pool != VK_NULL_HANDLE)
7556  {
7557  return createInfo.pool->m_BlockVector.Allocate(
7558  createInfo.pool,
7559  m_CurrentFrameIndex.load(),
7560  vkMemReq,
7561  createInfo,
7562  suballocType,
7563  pAllocation);
7564  }
7565  else
7566  {
7567  // Bit mask of memory Vulkan types acceptable for this allocation.
7568  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7569  uint32_t memTypeIndex = UINT32_MAX;
7570  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7571  if(res == VK_SUCCESS)
7572  {
7573  res = AllocateMemoryOfType(
7574  vkMemReq,
7575  requiresDedicatedAllocation || prefersDedicatedAllocation,
7576  dedicatedBuffer,
7577  dedicatedImage,
7578  createInfo,
7579  memTypeIndex,
7580  suballocType,
7581  pAllocation);
7582  // Succeeded on first try.
7583  if(res == VK_SUCCESS)
7584  {
7585  return res;
7586  }
7587  // Allocation from this memory type failed. Try other compatible memory types.
7588  else
7589  {
7590  for(;;)
7591  {
7592  // Remove old memTypeIndex from list of possibilities.
7593  memoryTypeBits &= ~(1u << memTypeIndex);
7594  // Find alternative memTypeIndex.
7595  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7596  if(res == VK_SUCCESS)
7597  {
7598  res = AllocateMemoryOfType(
7599  vkMemReq,
7600  requiresDedicatedAllocation || prefersDedicatedAllocation,
7601  dedicatedBuffer,
7602  dedicatedImage,
7603  createInfo,
7604  memTypeIndex,
7605  suballocType,
7606  pAllocation);
7607  // Allocation from this alternative memory type succeeded.
7608  if(res == VK_SUCCESS)
7609  {
7610  return res;
7611  }
7612  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7613  }
7614  // No other matching memory type index could be found.
7615  else
7616  {
7617  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7618  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7619  }
7620  }
7621  }
7622  }
7623  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7624  else
7625  return res;
7626  }
7627 }
7628 
7629 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7630 {
7631  VMA_ASSERT(allocation);
7632 
7633  if(allocation->CanBecomeLost() == false ||
7634  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7635  {
7636  switch(allocation->GetType())
7637  {
7638  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7639  {
7640  VmaBlockVector* pBlockVector = VMA_NULL;
7641  VmaPool hPool = allocation->GetPool();
7642  if(hPool != VK_NULL_HANDLE)
7643  {
7644  pBlockVector = &hPool->m_BlockVector;
7645  }
7646  else
7647  {
7648  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7649  pBlockVector = m_pBlockVectors[memTypeIndex];
7650  }
7651  pBlockVector->Free(allocation);
7652  }
7653  break;
7654  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7655  FreeDedicatedMemory(allocation);
7656  break;
7657  default:
7658  VMA_ASSERT(0);
7659  }
7660  }
7661 
7662  allocation->SetUserData(this, VMA_NULL);
7663  vma_delete(this, allocation);
7664 }
7665 
7666 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7667 {
7668  // Initialize.
7669  InitStatInfo(pStats->total);
7670  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7671  InitStatInfo(pStats->memoryType[i]);
7672  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7673  InitStatInfo(pStats->memoryHeap[i]);
7674 
7675  // Process default pools.
7676  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7677  {
7678  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7679  VMA_ASSERT(pBlockVector);
7680  pBlockVector->AddStats(pStats);
7681  }
7682 
7683  // Process custom pools.
7684  {
7685  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7686  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7687  {
7688  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7689  }
7690  }
7691 
7692  // Process dedicated allocations.
7693  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7694  {
7695  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7696  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7697  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7698  VMA_ASSERT(pDedicatedAllocVector);
7699  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7700  {
7701  VmaStatInfo allocationStatInfo;
7702  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7703  VmaAddStatInfo(pStats->total, allocationStatInfo);
7704  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7705  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7706  }
7707  }
7708 
7709  // Postprocess.
7710  VmaPostprocessCalcStatInfo(pStats->total);
7711  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7712  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7713  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7714  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7715 }
7716 
7717 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7718 
7719 VkResult VmaAllocator_T::Defragment(
7720  VmaAllocation* pAllocations,
7721  size_t allocationCount,
7722  VkBool32* pAllocationsChanged,
7723  const VmaDefragmentationInfo* pDefragmentationInfo,
7724  VmaDefragmentationStats* pDefragmentationStats)
7725 {
7726  if(pAllocationsChanged != VMA_NULL)
7727  {
7728  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7729  }
7730  if(pDefragmentationStats != VMA_NULL)
7731  {
7732  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7733  }
7734 
7735  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7736 
7737  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7738 
7739  const size_t poolCount = m_Pools.size();
7740 
7741  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7742  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7743  {
7744  VmaAllocation hAlloc = pAllocations[allocIndex];
7745  VMA_ASSERT(hAlloc);
7746  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7747  // DedicatedAlloc cannot be defragmented.
7748  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7749  // Only HOST_VISIBLE memory types can be defragmented.
7750  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7751  // Lost allocation cannot be defragmented.
7752  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7753  {
7754  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7755 
7756  const VmaPool hAllocPool = hAlloc->GetPool();
7757  // This allocation belongs to custom pool.
7758  if(hAllocPool != VK_NULL_HANDLE)
7759  {
7760  pAllocBlockVector = &hAllocPool->GetBlockVector();
7761  }
7762  // This allocation belongs to general pool.
7763  else
7764  {
7765  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7766  }
7767 
7768  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7769 
7770  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7771  &pAllocationsChanged[allocIndex] : VMA_NULL;
7772  pDefragmentator->AddAllocation(hAlloc, pChanged);
7773  }
7774  }
7775 
7776  VkResult result = VK_SUCCESS;
7777 
7778  // ======== Main processing.
7779 
7780  VkDeviceSize maxBytesToMove = SIZE_MAX;
7781  uint32_t maxAllocationsToMove = UINT32_MAX;
7782  if(pDefragmentationInfo != VMA_NULL)
7783  {
7784  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7785  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7786  }
7787 
7788  // Process standard memory.
7789  for(uint32_t memTypeIndex = 0;
7790  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7791  ++memTypeIndex)
7792  {
7793  // Only HOST_VISIBLE memory types can be defragmented.
7794  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7795  {
7796  result = m_pBlockVectors[memTypeIndex]->Defragment(
7797  pDefragmentationStats,
7798  maxBytesToMove,
7799  maxAllocationsToMove);
7800  }
7801  }
7802 
7803  // Process custom pools.
7804  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7805  {
7806  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7807  pDefragmentationStats,
7808  maxBytesToMove,
7809  maxAllocationsToMove);
7810  }
7811 
7812  // ======== Destroy defragmentators.
7813 
7814  // Process custom pools.
7815  for(size_t poolIndex = poolCount; poolIndex--; )
7816  {
7817  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7818  }
7819 
7820  // Process standard memory.
7821  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7822  {
7823  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7824  {
7825  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7826  }
7827  }
7828 
7829  return result;
7830 }
7831 
7832 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7833 {
7834  if(hAllocation->CanBecomeLost())
7835  {
7836  /*
7837  Warning: This is a carefully designed algorithm.
7838  Do not modify unless you really know what you're doing :)
7839  */
7840  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7841  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7842  for(;;)
7843  {
7844  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7845  {
7846  pAllocationInfo->memoryType = UINT32_MAX;
7847  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7848  pAllocationInfo->offset = 0;
7849  pAllocationInfo->size = hAllocation->GetSize();
7850  pAllocationInfo->pMappedData = VMA_NULL;
7851  pAllocationInfo->pUserData = hAllocation->GetUserData();
7852  return;
7853  }
7854  else if(localLastUseFrameIndex == localCurrFrameIndex)
7855  {
7856  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7857  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7858  pAllocationInfo->offset = hAllocation->GetOffset();
7859  pAllocationInfo->size = hAllocation->GetSize();
7860  pAllocationInfo->pMappedData = VMA_NULL;
7861  pAllocationInfo->pUserData = hAllocation->GetUserData();
7862  return;
7863  }
7864  else // Last use time earlier than current time.
7865  {
7866  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7867  {
7868  localLastUseFrameIndex = localCurrFrameIndex;
7869  }
7870  }
7871  }
7872  }
7873  else
7874  {
7875  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7876  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7877  pAllocationInfo->offset = hAllocation->GetOffset();
7878  pAllocationInfo->size = hAllocation->GetSize();
7879  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7880  pAllocationInfo->pUserData = hAllocation->GetUserData();
7881  }
7882 }
7883 
7884 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7885 {
7886  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7887  if(hAllocation->CanBecomeLost())
7888  {
7889  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7890  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7891  for(;;)
7892  {
7893  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7894  {
7895  return false;
7896  }
7897  else if(localLastUseFrameIndex == localCurrFrameIndex)
7898  {
7899  return true;
7900  }
7901  else // Last use time earlier than current time.
7902  {
7903  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7904  {
7905  localLastUseFrameIndex = localCurrFrameIndex;
7906  }
7907  }
7908  }
7909  }
7910  else
7911  {
7912  return true;
7913  }
7914 }
7915 
7916 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7917 {
7918  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7919 
7920  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7921 
7922  if(newCreateInfo.maxBlockCount == 0)
7923  {
7924  newCreateInfo.maxBlockCount = SIZE_MAX;
7925  }
7926  if(newCreateInfo.blockSize == 0)
7927  {
7928  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7929  }
7930 
7931  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7932 
7933  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7934  if(res != VK_SUCCESS)
7935  {
7936  vma_delete(this, *pPool);
7937  *pPool = VMA_NULL;
7938  return res;
7939  }
7940 
7941  // Add to m_Pools.
7942  {
7943  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7944  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7945  }
7946 
7947  return VK_SUCCESS;
7948 }
7949 
7950 void VmaAllocator_T::DestroyPool(VmaPool pool)
7951 {
7952  // Remove from m_Pools.
7953  {
7954  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7955  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7956  VMA_ASSERT(success && "Pool not found in Allocator.");
7957  }
7958 
7959  vma_delete(this, pool);
7960 }
7961 
7962 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7963 {
7964  pool->m_BlockVector.GetPoolStats(pPoolStats);
7965 }
7966 
7967 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7968 {
7969  m_CurrentFrameIndex.store(frameIndex);
7970 }
7971 
7972 void VmaAllocator_T::MakePoolAllocationsLost(
7973  VmaPool hPool,
7974  size_t* pLostAllocationCount)
7975 {
7976  hPool->m_BlockVector.MakePoolAllocationsLost(
7977  m_CurrentFrameIndex.load(),
7978  pLostAllocationCount);
7979 }
7980 
7981 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7982 {
7983  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7984  (*pAllocation)->InitLost();
7985 }
7986 
7987 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7988 {
7989  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7990 
7991  VkResult res;
7992  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7993  {
7994  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7995  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7996  {
7997  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7998  if(res == VK_SUCCESS)
7999  {
8000  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8001  }
8002  }
8003  else
8004  {
8005  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8006  }
8007  }
8008  else
8009  {
8010  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8011  }
8012 
8013  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8014  {
8015  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8016  }
8017 
8018  return res;
8019 }
8020 
8021 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8022 {
8023  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8024  {
8025  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8026  }
8027 
8028  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8029 
8030  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8031  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8032  {
8033  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8034  m_HeapSizeLimit[heapIndex] += size;
8035  }
8036 }
8037 
8038 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8039 {
8040  if(hAllocation->CanBecomeLost())
8041  {
8042  return VK_ERROR_MEMORY_MAP_FAILED;
8043  }
8044 
8045  switch(hAllocation->GetType())
8046  {
8047  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8048  {
8049  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8050  char *pBytes = VMA_NULL;
8051  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8052  if(res == VK_SUCCESS)
8053  {
8054  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8055  hAllocation->BlockAllocMap();
8056  }
8057  return res;
8058  }
8059  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8060  return hAllocation->DedicatedAllocMap(this, ppData);
8061  default:
8062  VMA_ASSERT(0);
8063  return VK_ERROR_MEMORY_MAP_FAILED;
8064  }
8065 }
8066 
8067 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8068 {
8069  switch(hAllocation->GetType())
8070  {
8071  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8072  {
8073  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8074  hAllocation->BlockAllocUnmap();
8075  pBlock->Unmap(this, 1);
8076  }
8077  break;
8078  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8079  hAllocation->DedicatedAllocUnmap(this);
8080  break;
8081  default:
8082  VMA_ASSERT(0);
8083  }
8084 }
8085 
8086 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8087 {
8088  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8089 
8090  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8091  {
8092  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8093  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8094  VMA_ASSERT(pDedicatedAllocations);
8095  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8096  VMA_ASSERT(success);
8097  }
8098 
8099  VkDeviceMemory hMemory = allocation->GetMemory();
8100 
8101  if(allocation->GetMappedData() != VMA_NULL)
8102  {
8103  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8104  }
8105 
8106  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8107 
8108  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8109 }
8110 
8111 #if VMA_STATS_STRING_ENABLED
8112 
8113 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8114 {
8115  bool dedicatedAllocationsStarted = false;
8116  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8117  {
8118  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8119  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8120  VMA_ASSERT(pDedicatedAllocVector);
8121  if(pDedicatedAllocVector->empty() == false)
8122  {
8123  if(dedicatedAllocationsStarted == false)
8124  {
8125  dedicatedAllocationsStarted = true;
8126  json.WriteString("DedicatedAllocations");
8127  json.BeginObject();
8128  }
8129 
8130  json.BeginString("Type ");
8131  json.ContinueString(memTypeIndex);
8132  json.EndString();
8133 
8134  json.BeginArray();
8135 
8136  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8137  {
8138  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8139  json.BeginObject(true);
8140 
8141  json.WriteString("Type");
8142  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8143 
8144  json.WriteString("Size");
8145  json.WriteNumber(hAlloc->GetSize());
8146 
8147  const void* pUserData = hAlloc->GetUserData();
8148  if(pUserData != VMA_NULL)
8149  {
8150  json.WriteString("UserData");
8151  if(hAlloc->IsUserDataString())
8152  {
8153  json.WriteString((const char*)pUserData);
8154  }
8155  else
8156  {
8157  json.BeginString();
8158  json.ContinueString_Pointer(pUserData);
8159  json.EndString();
8160  }
8161  }
8162 
8163  json.EndObject();
8164  }
8165 
8166  json.EndArray();
8167  }
8168  }
8169  if(dedicatedAllocationsStarted)
8170  {
8171  json.EndObject();
8172  }
8173 
8174  {
8175  bool allocationsStarted = false;
8176  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8177  {
8178  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8179  {
8180  if(allocationsStarted == false)
8181  {
8182  allocationsStarted = true;
8183  json.WriteString("DefaultPools");
8184  json.BeginObject();
8185  }
8186 
8187  json.BeginString("Type ");
8188  json.ContinueString(memTypeIndex);
8189  json.EndString();
8190 
8191  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8192  }
8193  }
8194  if(allocationsStarted)
8195  {
8196  json.EndObject();
8197  }
8198  }
8199 
8200  {
8201  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8202  const size_t poolCount = m_Pools.size();
8203  if(poolCount > 0)
8204  {
8205  json.WriteString("Pools");
8206  json.BeginArray();
8207  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8208  {
8209  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8210  }
8211  json.EndArray();
8212  }
8213  }
8214 }
8215 
8216 #endif // #if VMA_STATS_STRING_ENABLED
8217 
8218 static VkResult AllocateMemoryForImage(
8219  VmaAllocator allocator,
8220  VkImage image,
8221  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8222  VmaSuballocationType suballocType,
8223  VmaAllocation* pAllocation)
8224 {
8225  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8226 
8227  VkMemoryRequirements vkMemReq = {};
8228  bool requiresDedicatedAllocation = false;
8229  bool prefersDedicatedAllocation = false;
8230  allocator->GetImageMemoryRequirements(image, vkMemReq,
8231  requiresDedicatedAllocation, prefersDedicatedAllocation);
8232 
8233  return allocator->AllocateMemory(
8234  vkMemReq,
8235  requiresDedicatedAllocation,
8236  prefersDedicatedAllocation,
8237  VK_NULL_HANDLE, // dedicatedBuffer
8238  image, // dedicatedImage
8239  *pAllocationCreateInfo,
8240  suballocType,
8241  pAllocation);
8242 }
8243 
8245 // Public interface
8246 
8247 VkResult vmaCreateAllocator(
8248  const VmaAllocatorCreateInfo* pCreateInfo,
8249  VmaAllocator* pAllocator)
8250 {
8251  VMA_ASSERT(pCreateInfo && pAllocator);
8252  VMA_DEBUG_LOG("vmaCreateAllocator");
8253  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8254  return VK_SUCCESS;
8255 }
8256 
8257 void vmaDestroyAllocator(
8258  VmaAllocator allocator)
8259 {
8260  if(allocator != VK_NULL_HANDLE)
8261  {
8262  VMA_DEBUG_LOG("vmaDestroyAllocator");
8263  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8264  vma_delete(&allocationCallbacks, allocator);
8265  }
8266 }
8267 
8269  VmaAllocator allocator,
8270  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8271 {
8272  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8273  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8274 }
8275 
8277  VmaAllocator allocator,
8278  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8279 {
8280  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8281  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8282 }
8283 
8285  VmaAllocator allocator,
8286  uint32_t memoryTypeIndex,
8287  VkMemoryPropertyFlags* pFlags)
8288 {
8289  VMA_ASSERT(allocator && pFlags);
8290  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8291  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8292 }
8293 
8295  VmaAllocator allocator,
8296  uint32_t frameIndex)
8297 {
8298  VMA_ASSERT(allocator);
8299  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8300 
8301  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8302 
8303  allocator->SetCurrentFrameIndex(frameIndex);
8304 }
8305 
8306 void vmaCalculateStats(
8307  VmaAllocator allocator,
8308  VmaStats* pStats)
8309 {
8310  VMA_ASSERT(allocator && pStats);
8311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8312  allocator->CalculateStats(pStats);
8313 }
8314 
8315 #if VMA_STATS_STRING_ENABLED
8316 
8317 void vmaBuildStatsString(
8318  VmaAllocator allocator,
8319  char** ppStatsString,
8320  VkBool32 detailedMap)
8321 {
8322  VMA_ASSERT(allocator && ppStatsString);
8323  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8324 
8325  VmaStringBuilder sb(allocator);
8326  {
8327  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8328  json.BeginObject();
8329 
8330  VmaStats stats;
8331  allocator->CalculateStats(&stats);
8332 
8333  json.WriteString("Total");
8334  VmaPrintStatInfo(json, stats.total);
8335 
8336  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8337  {
8338  json.BeginString("Heap ");
8339  json.ContinueString(heapIndex);
8340  json.EndString();
8341  json.BeginObject();
8342 
8343  json.WriteString("Size");
8344  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8345 
8346  json.WriteString("Flags");
8347  json.BeginArray(true);
8348  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8349  {
8350  json.WriteString("DEVICE_LOCAL");
8351  }
8352  json.EndArray();
8353 
8354  if(stats.memoryHeap[heapIndex].blockCount > 0)
8355  {
8356  json.WriteString("Stats");
8357  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8358  }
8359 
8360  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8361  {
8362  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8363  {
8364  json.BeginString("Type ");
8365  json.ContinueString(typeIndex);
8366  json.EndString();
8367 
8368  json.BeginObject();
8369 
8370  json.WriteString("Flags");
8371  json.BeginArray(true);
8372  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8373  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8374  {
8375  json.WriteString("DEVICE_LOCAL");
8376  }
8377  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8378  {
8379  json.WriteString("HOST_VISIBLE");
8380  }
8381  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8382  {
8383  json.WriteString("HOST_COHERENT");
8384  }
8385  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8386  {
8387  json.WriteString("HOST_CACHED");
8388  }
8389  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8390  {
8391  json.WriteString("LAZILY_ALLOCATED");
8392  }
8393  json.EndArray();
8394 
8395  if(stats.memoryType[typeIndex].blockCount > 0)
8396  {
8397  json.WriteString("Stats");
8398  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8399  }
8400 
8401  json.EndObject();
8402  }
8403  }
8404 
8405  json.EndObject();
8406  }
8407  if(detailedMap == VK_TRUE)
8408  {
8409  allocator->PrintDetailedMap(json);
8410  }
8411 
8412  json.EndObject();
8413  }
8414 
8415  const size_t len = sb.GetLength();
8416  char* const pChars = vma_new_array(allocator, char, len + 1);
8417  if(len > 0)
8418  {
8419  memcpy(pChars, sb.GetData(), len);
8420  }
8421  pChars[len] = '\0';
8422  *ppStatsString = pChars;
8423 }
8424 
8425 void vmaFreeStatsString(
8426  VmaAllocator allocator,
8427  char* pStatsString)
8428 {
8429  if(pStatsString != VMA_NULL)
8430  {
8431  VMA_ASSERT(allocator);
8432  size_t len = strlen(pStatsString);
8433  vma_delete_array(allocator, pStatsString, len + 1);
8434  }
8435 }
8436 
8437 #endif // #if VMA_STATS_STRING_ENABLED
8438 
8439 /*
8440 This function is not protected by any mutex because it just reads immutable data.
8441 */
8442 VkResult vmaFindMemoryTypeIndex(
8443  VmaAllocator allocator,
8444  uint32_t memoryTypeBits,
8445  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8446  uint32_t* pMemoryTypeIndex)
8447 {
8448  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8449  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8450  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8451 
8452  if(pAllocationCreateInfo->memoryTypeBits != 0)
8453  {
8454  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8455  }
8456 
8457  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8458  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8459 
8460  // Convert usage to requiredFlags and preferredFlags.
8461  switch(pAllocationCreateInfo->usage)
8462  {
8464  break;
8466  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8467  break;
8469  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8470  break;
8472  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8473  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8474  break;
8476  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8477  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8478  break;
8479  default:
8480  break;
8481  }
8482 
8483  *pMemoryTypeIndex = UINT32_MAX;
8484  uint32_t minCost = UINT32_MAX;
8485  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8486  memTypeIndex < allocator->GetMemoryTypeCount();
8487  ++memTypeIndex, memTypeBit <<= 1)
8488  {
8489  // This memory type is acceptable according to memoryTypeBits bitmask.
8490  if((memTypeBit & memoryTypeBits) != 0)
8491  {
8492  const VkMemoryPropertyFlags currFlags =
8493  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8494  // This memory type contains requiredFlags.
8495  if((requiredFlags & ~currFlags) == 0)
8496  {
8497  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8498  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8499  // Remember memory type with lowest cost.
8500  if(currCost < minCost)
8501  {
8502  *pMemoryTypeIndex = memTypeIndex;
8503  if(currCost == 0)
8504  {
8505  return VK_SUCCESS;
8506  }
8507  minCost = currCost;
8508  }
8509  }
8510  }
8511  }
8512  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8513 }
8514 
8516  VmaAllocator allocator,
8517  const VkBufferCreateInfo* pBufferCreateInfo,
8518  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8519  uint32_t* pMemoryTypeIndex)
8520 {
8521  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8522  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8523  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8524  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8525 
8526  const VkDevice hDev = allocator->m_hDevice;
8527  VkBuffer hBuffer = VK_NULL_HANDLE;
8528  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8529  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8530  if(res == VK_SUCCESS)
8531  {
8532  VkMemoryRequirements memReq = {};
8533  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8534  hDev, hBuffer, &memReq);
8535 
8536  res = vmaFindMemoryTypeIndex(
8537  allocator,
8538  memReq.memoryTypeBits,
8539  pAllocationCreateInfo,
8540  pMemoryTypeIndex);
8541 
8542  allocator->GetVulkanFunctions().vkDestroyBuffer(
8543  hDev, hBuffer, allocator->GetAllocationCallbacks());
8544  }
8545  return res;
8546 }
8547 
8549  VmaAllocator allocator,
8550  const VkImageCreateInfo* pImageCreateInfo,
8551  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8552  uint32_t* pMemoryTypeIndex)
8553 {
8554  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8555  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8556  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8557  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8558 
8559  const VkDevice hDev = allocator->m_hDevice;
8560  VkImage hImage = VK_NULL_HANDLE;
8561  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8562  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8563  if(res == VK_SUCCESS)
8564  {
8565  VkMemoryRequirements memReq = {};
8566  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8567  hDev, hImage, &memReq);
8568 
8569  res = vmaFindMemoryTypeIndex(
8570  allocator,
8571  memReq.memoryTypeBits,
8572  pAllocationCreateInfo,
8573  pMemoryTypeIndex);
8574 
8575  allocator->GetVulkanFunctions().vkDestroyImage(
8576  hDev, hImage, allocator->GetAllocationCallbacks());
8577  }
8578  return res;
8579 }
8580 
8581 VkResult vmaCreatePool(
8582  VmaAllocator allocator,
8583  const VmaPoolCreateInfo* pCreateInfo,
8584  VmaPool* pPool)
8585 {
8586  VMA_ASSERT(allocator && pCreateInfo && pPool);
8587 
8588  VMA_DEBUG_LOG("vmaCreatePool");
8589 
8590  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8591 
8592  return allocator->CreatePool(pCreateInfo, pPool);
8593 }
8594 
8595 void vmaDestroyPool(
8596  VmaAllocator allocator,
8597  VmaPool pool)
8598 {
8599  VMA_ASSERT(allocator);
8600 
8601  if(pool == VK_NULL_HANDLE)
8602  {
8603  return;
8604  }
8605 
8606  VMA_DEBUG_LOG("vmaDestroyPool");
8607 
8608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8609 
8610  allocator->DestroyPool(pool);
8611 }
8612 
8613 void vmaGetPoolStats(
8614  VmaAllocator allocator,
8615  VmaPool pool,
8616  VmaPoolStats* pPoolStats)
8617 {
8618  VMA_ASSERT(allocator && pool && pPoolStats);
8619 
8620  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8621 
8622  allocator->GetPoolStats(pool, pPoolStats);
8623 }
8624 
8626  VmaAllocator allocator,
8627  VmaPool pool,
8628  size_t* pLostAllocationCount)
8629 {
8630  VMA_ASSERT(allocator && pool);
8631 
8632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8633 
8634  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8635 }
8636 
8637 VkResult vmaAllocateMemory(
8638  VmaAllocator allocator,
8639  const VkMemoryRequirements* pVkMemoryRequirements,
8640  const VmaAllocationCreateInfo* pCreateInfo,
8641  VmaAllocation* pAllocation,
8642  VmaAllocationInfo* pAllocationInfo)
8643 {
8644  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8645 
8646  VMA_DEBUG_LOG("vmaAllocateMemory");
8647 
8648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8649 
8650  VkResult result = allocator->AllocateMemory(
8651  *pVkMemoryRequirements,
8652  false, // requiresDedicatedAllocation
8653  false, // prefersDedicatedAllocation
8654  VK_NULL_HANDLE, // dedicatedBuffer
8655  VK_NULL_HANDLE, // dedicatedImage
8656  *pCreateInfo,
8657  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8658  pAllocation);
8659 
8660  if(pAllocationInfo && result == VK_SUCCESS)
8661  {
8662  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8663  }
8664 
8665  return result;
8666 }
8667 
8669  VmaAllocator allocator,
8670  VkBuffer buffer,
8671  const VmaAllocationCreateInfo* pCreateInfo,
8672  VmaAllocation* pAllocation,
8673  VmaAllocationInfo* pAllocationInfo)
8674 {
8675  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8676 
8677  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8678 
8679  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8680 
8681  VkMemoryRequirements vkMemReq = {};
8682  bool requiresDedicatedAllocation = false;
8683  bool prefersDedicatedAllocation = false;
8684  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8685  requiresDedicatedAllocation,
8686  prefersDedicatedAllocation);
8687 
8688  VkResult result = allocator->AllocateMemory(
8689  vkMemReq,
8690  requiresDedicatedAllocation,
8691  prefersDedicatedAllocation,
8692  buffer, // dedicatedBuffer
8693  VK_NULL_HANDLE, // dedicatedImage
8694  *pCreateInfo,
8695  VMA_SUBALLOCATION_TYPE_BUFFER,
8696  pAllocation);
8697 
8698  if(pAllocationInfo && result == VK_SUCCESS)
8699  {
8700  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8701  }
8702 
8703  return result;
8704 }
8705 
8706 VkResult vmaAllocateMemoryForImage(
8707  VmaAllocator allocator,
8708  VkImage image,
8709  const VmaAllocationCreateInfo* pCreateInfo,
8710  VmaAllocation* pAllocation,
8711  VmaAllocationInfo* pAllocationInfo)
8712 {
8713  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8714 
8715  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8716 
8717  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8718 
8719  VkResult result = AllocateMemoryForImage(
8720  allocator,
8721  image,
8722  pCreateInfo,
8723  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8724  pAllocation);
8725 
8726  if(pAllocationInfo && result == VK_SUCCESS)
8727  {
8728  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8729  }
8730 
8731  return result;
8732 }
8733 
8734 void vmaFreeMemory(
8735  VmaAllocator allocator,
8736  VmaAllocation allocation)
8737 {
8738  VMA_ASSERT(allocator && allocation);
8739 
8740  VMA_DEBUG_LOG("vmaFreeMemory");
8741 
8742  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8743 
8744  allocator->FreeMemory(allocation);
8745 }
8746 
8748  VmaAllocator allocator,
8749  VmaAllocation allocation,
8750  VmaAllocationInfo* pAllocationInfo)
8751 {
8752  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8753 
8754  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8755 
8756  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8757 }
8758 
8759 VkBool32 vmaTouchAllocation(
8760  VmaAllocator allocator,
8761  VmaAllocation allocation)
8762 {
8763  VMA_ASSERT(allocator && allocation);
8764 
8765  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8766 
8767  return allocator->TouchAllocation(allocation);
8768 }
8769 
8771  VmaAllocator allocator,
8772  VmaAllocation allocation,
8773  void* pUserData)
8774 {
8775  VMA_ASSERT(allocator && allocation);
8776 
8777  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8778 
8779  allocation->SetUserData(allocator, pUserData);
8780 }
8781 
8783  VmaAllocator allocator,
8784  VmaAllocation* pAllocation)
8785 {
8786  VMA_ASSERT(allocator && pAllocation);
8787 
8788  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8789 
8790  allocator->CreateLostAllocation(pAllocation);
8791 }
8792 
8793 VkResult vmaMapMemory(
8794  VmaAllocator allocator,
8795  VmaAllocation allocation,
8796  void** ppData)
8797 {
8798  VMA_ASSERT(allocator && allocation && ppData);
8799 
8800  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8801 
8802  return allocator->Map(allocation, ppData);
8803 }
8804 
8805 void vmaUnmapMemory(
8806  VmaAllocator allocator,
8807  VmaAllocation allocation)
8808 {
8809  VMA_ASSERT(allocator && allocation);
8810 
8811  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8812 
8813  allocator->Unmap(allocation);
8814 }
8815 
8816 VkResult vmaDefragment(
8817  VmaAllocator allocator,
8818  VmaAllocation* pAllocations,
8819  size_t allocationCount,
8820  VkBool32* pAllocationsChanged,
8821  const VmaDefragmentationInfo *pDefragmentationInfo,
8822  VmaDefragmentationStats* pDefragmentationStats)
8823 {
8824  VMA_ASSERT(allocator && pAllocations);
8825 
8826  VMA_DEBUG_LOG("vmaDefragment");
8827 
8828  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8829 
8830  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8831 }
8832 
8833 VkResult vmaCreateBuffer(
8834  VmaAllocator allocator,
8835  const VkBufferCreateInfo* pBufferCreateInfo,
8836  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8837  VkBuffer* pBuffer,
8838  VmaAllocation* pAllocation,
8839  VmaAllocationInfo* pAllocationInfo)
8840 {
8841  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8842 
8843  VMA_DEBUG_LOG("vmaCreateBuffer");
8844 
8845  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8846 
8847  *pBuffer = VK_NULL_HANDLE;
8848  *pAllocation = VK_NULL_HANDLE;
8849 
8850  // 1. Create VkBuffer.
8851  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8852  allocator->m_hDevice,
8853  pBufferCreateInfo,
8854  allocator->GetAllocationCallbacks(),
8855  pBuffer);
8856  if(res >= 0)
8857  {
8858  // 2. vkGetBufferMemoryRequirements.
8859  VkMemoryRequirements vkMemReq = {};
8860  bool requiresDedicatedAllocation = false;
8861  bool prefersDedicatedAllocation = false;
8862  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8863  requiresDedicatedAllocation, prefersDedicatedAllocation);
8864 
8865  // Make sure alignment requirements for specific buffer usages reported
8866  // in Physical Device Properties are included in alignment reported by memory requirements.
8867  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8868  {
8869  VMA_ASSERT(vkMemReq.alignment %
8870  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8871  }
8872  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8873  {
8874  VMA_ASSERT(vkMemReq.alignment %
8875  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8876  }
8877  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8878  {
8879  VMA_ASSERT(vkMemReq.alignment %
8880  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8881  }
8882 
8883  // 3. Allocate memory using allocator.
8884  res = allocator->AllocateMemory(
8885  vkMemReq,
8886  requiresDedicatedAllocation,
8887  prefersDedicatedAllocation,
8888  *pBuffer, // dedicatedBuffer
8889  VK_NULL_HANDLE, // dedicatedImage
8890  *pAllocationCreateInfo,
8891  VMA_SUBALLOCATION_TYPE_BUFFER,
8892  pAllocation);
8893  if(res >= 0)
8894  {
8895  // 3. Bind buffer with memory.
8896  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8897  allocator->m_hDevice,
8898  *pBuffer,
8899  (*pAllocation)->GetMemory(),
8900  (*pAllocation)->GetOffset());
8901  if(res >= 0)
8902  {
8903  // All steps succeeded.
8904  if(pAllocationInfo != VMA_NULL)
8905  {
8906  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8907  }
8908  return VK_SUCCESS;
8909  }
8910  allocator->FreeMemory(*pAllocation);
8911  *pAllocation = VK_NULL_HANDLE;
8912  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8913  *pBuffer = VK_NULL_HANDLE;
8914  return res;
8915  }
8916  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8917  *pBuffer = VK_NULL_HANDLE;
8918  return res;
8919  }
8920  return res;
8921 }
8922 
8923 void vmaDestroyBuffer(
8924  VmaAllocator allocator,
8925  VkBuffer buffer,
8926  VmaAllocation allocation)
8927 {
8928  if(buffer != VK_NULL_HANDLE)
8929  {
8930  VMA_ASSERT(allocator);
8931 
8932  VMA_DEBUG_LOG("vmaDestroyBuffer");
8933 
8934  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8935 
8936  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8937 
8938  allocator->FreeMemory(allocation);
8939  }
8940 }
8941 
8942 VkResult vmaCreateImage(
8943  VmaAllocator allocator,
8944  const VkImageCreateInfo* pImageCreateInfo,
8945  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8946  VkImage* pImage,
8947  VmaAllocation* pAllocation,
8948  VmaAllocationInfo* pAllocationInfo)
8949 {
8950  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8951 
8952  VMA_DEBUG_LOG("vmaCreateImage");
8953 
8954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8955 
8956  *pImage = VK_NULL_HANDLE;
8957  *pAllocation = VK_NULL_HANDLE;
8958 
8959  // 1. Create VkImage.
8960  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8961  allocator->m_hDevice,
8962  pImageCreateInfo,
8963  allocator->GetAllocationCallbacks(),
8964  pImage);
8965  if(res >= 0)
8966  {
8967  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8968  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8969  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8970 
8971  // 2. Allocate memory using allocator.
8972  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8973  if(res >= 0)
8974  {
8975  // 3. Bind image with memory.
8976  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8977  allocator->m_hDevice,
8978  *pImage,
8979  (*pAllocation)->GetMemory(),
8980  (*pAllocation)->GetOffset());
8981  if(res >= 0)
8982  {
8983  // All steps succeeded.
8984  if(pAllocationInfo != VMA_NULL)
8985  {
8986  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8987  }
8988  return VK_SUCCESS;
8989  }
8990  allocator->FreeMemory(*pAllocation);
8991  *pAllocation = VK_NULL_HANDLE;
8992  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8993  *pImage = VK_NULL_HANDLE;
8994  return res;
8995  }
8996  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8997  *pImage = VK_NULL_HANDLE;
8998  return res;
8999  }
9000  return res;
9001 }
9002 
9003 void vmaDestroyImage(
9004  VmaAllocator allocator,
9005  VkImage image,
9006  VmaAllocation allocation)
9007 {
9008  if(image != VK_NULL_HANDLE)
9009  {
9010  VMA_ASSERT(allocator);
9011 
9012  VMA_DEBUG_LOG("vmaDestroyImage");
9013 
9014  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9015 
9016  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9017 
9018  allocator->FreeMemory(allocation);
9019  }
9020 }
9021 
9022 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:963
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1217
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:988
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:973
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1174
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:967
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1523
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:985
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1722
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1393
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1447
Definition: vk_mem_alloc.h:1254
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:956
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1292
Definition: vk_mem_alloc.h:1201
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:997
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1050
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:982
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1205
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1115
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:970
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1114
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:978
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1726
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1014
VmaStatInfo total
Definition: vk_mem_alloc.h:1124
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1734
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1276
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1717
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:971
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:898
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:991
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1401
Definition: vk_mem_alloc.h:1395
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1533
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:968
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1313
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1417
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1453
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:954
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1404
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1152
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1712
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1730
Definition: vk_mem_alloc.h:1191
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1300
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:969
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1120
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:904
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:925
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:930
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1732
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1287
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1463
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:964
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1103
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1412
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:917
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1261
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1116
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:921
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1407
Definition: vk_mem_alloc.h:1200
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1282
Definition: vk_mem_alloc.h:1273
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1106
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:966
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1425
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1000
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1456
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1271
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1306
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1038
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1122
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1241
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1115
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:975
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:919
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:974
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1439
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1547
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:994
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1115
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1112
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1444
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1528
Definition: vk_mem_alloc.h:1269
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1728
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:962
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:977
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1110
Definition: vk_mem_alloc.h:1157
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1397
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1108
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:972
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:976
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1228
Definition: vk_mem_alloc.h:1184
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1542
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:952
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:965
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1509
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1375
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1116
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1123
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1450
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1116
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1514