Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
869 #include <vulkan/vulkan.h>
870 
871 VK_DEFINE_HANDLE(VmaAllocator)
872 
873 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
875  VmaAllocator allocator,
876  uint32_t memoryType,
877  VkDeviceMemory memory,
878  VkDeviceSize size);
880 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
881  VmaAllocator allocator,
882  uint32_t memoryType,
883  VkDeviceMemory memory,
884  VkDeviceSize size);
885 
893 typedef struct VmaDeviceMemoryCallbacks {
899 
929 
932 typedef VkFlags VmaAllocatorCreateFlags;
933 
938 typedef struct VmaVulkanFunctions {
939  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
940  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
941  PFN_vkAllocateMemory vkAllocateMemory;
942  PFN_vkFreeMemory vkFreeMemory;
943  PFN_vkMapMemory vkMapMemory;
944  PFN_vkUnmapMemory vkUnmapMemory;
945  PFN_vkBindBufferMemory vkBindBufferMemory;
946  PFN_vkBindImageMemory vkBindImageMemory;
947  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
948  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
949  PFN_vkCreateBuffer vkCreateBuffer;
950  PFN_vkDestroyBuffer vkDestroyBuffer;
951  PFN_vkCreateImage vkCreateImage;
952  PFN_vkDestroyImage vkDestroyImage;
953  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
954  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
956 
959 {
961  VmaAllocatorCreateFlags flags;
963 
964  VkPhysicalDevice physicalDevice;
966 
967  VkDevice device;
969 
972 
973  const VkAllocationCallbacks* pAllocationCallbacks;
975 
990  uint32_t frameInUseCount;
1014  const VkDeviceSize* pHeapSizeLimit;
1028 
1030 VkResult vmaCreateAllocator(
1031  const VmaAllocatorCreateInfo* pCreateInfo,
1032  VmaAllocator* pAllocator);
1033 
1035 void vmaDestroyAllocator(
1036  VmaAllocator allocator);
1037 
1043  VmaAllocator allocator,
1044  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1045 
1051  VmaAllocator allocator,
1052  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1053 
1061  VmaAllocator allocator,
1062  uint32_t memoryTypeIndex,
1063  VkMemoryPropertyFlags* pFlags);
1064 
1074  VmaAllocator allocator,
1075  uint32_t frameIndex);
1076 
1079 typedef struct VmaStatInfo
1080 {
1082  uint32_t blockCount;
1088  VkDeviceSize usedBytes;
1090  VkDeviceSize unusedBytes;
1091  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1092  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1093 } VmaStatInfo;
1094 
1096 typedef struct VmaStats
1097 {
1098  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1099  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1101 } VmaStats;
1102 
1104 void vmaCalculateStats(
1105  VmaAllocator allocator,
1106  VmaStats* pStats);
1107 
1108 #define VMA_STATS_STRING_ENABLED 1
1109 
1110 #if VMA_STATS_STRING_ENABLED
1111 
1113 
1115 void vmaBuildStatsString(
1116  VmaAllocator allocator,
1117  char** ppStatsString,
1118  VkBool32 detailedMap);
1119 
1120 void vmaFreeStatsString(
1121  VmaAllocator allocator,
1122  char* pStatsString);
1123 
1124 #endif // #if VMA_STATS_STRING_ENABLED
1125 
1126 VK_DEFINE_HANDLE(VmaPool)
1127 
1128 typedef enum VmaMemoryUsage
1129 {
1178 } VmaMemoryUsage;
1179 
1194 
1244 
1248 
1250 {
1252  VmaAllocationCreateFlags flags;
1263  VkMemoryPropertyFlags requiredFlags;
1268  VkMemoryPropertyFlags preferredFlags;
1276  uint32_t memoryTypeBits;
1282  VmaPool pool;
1289  void* pUserData;
1291 
1308 VkResult vmaFindMemoryTypeIndex(
1309  VmaAllocator allocator,
1310  uint32_t memoryTypeBits,
1311  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1312  uint32_t* pMemoryTypeIndex);
1313 
1327  VmaAllocator allocator,
1328  const VkBufferCreateInfo* pBufferCreateInfo,
1329  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1330  uint32_t* pMemoryTypeIndex);
1331 
1345  VmaAllocator allocator,
1346  const VkImageCreateInfo* pImageCreateInfo,
1347  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1348  uint32_t* pMemoryTypeIndex);
1349 
1370 
1373 typedef VkFlags VmaPoolCreateFlags;
1374 
1377 typedef struct VmaPoolCreateInfo {
1383  VmaPoolCreateFlags flags;
1388  VkDeviceSize blockSize;
1417 
1420 typedef struct VmaPoolStats {
1423  VkDeviceSize size;
1426  VkDeviceSize unusedSize;
1439  VkDeviceSize unusedRangeSizeMax;
1440 } VmaPoolStats;
1441 
1448 VkResult vmaCreatePool(
1449  VmaAllocator allocator,
1450  const VmaPoolCreateInfo* pCreateInfo,
1451  VmaPool* pPool);
1452 
1455 void vmaDestroyPool(
1456  VmaAllocator allocator,
1457  VmaPool pool);
1458 
1465 void vmaGetPoolStats(
1466  VmaAllocator allocator,
1467  VmaPool pool,
1468  VmaPoolStats* pPoolStats);
1469 
1477  VmaAllocator allocator,
1478  VmaPool pool,
1479  size_t* pLostAllocationCount);
1480 
1481 VK_DEFINE_HANDLE(VmaAllocation)
1482 
1483 
1485 typedef struct VmaAllocationInfo {
1490  uint32_t memoryType;
1499  VkDeviceMemory deviceMemory;
1504  VkDeviceSize offset;
1509  VkDeviceSize size;
1523  void* pUserData;
1525 
1536 VkResult vmaAllocateMemory(
1537  VmaAllocator allocator,
1538  const VkMemoryRequirements* pVkMemoryRequirements,
1539  const VmaAllocationCreateInfo* pCreateInfo,
1540  VmaAllocation* pAllocation,
1541  VmaAllocationInfo* pAllocationInfo);
1542 
1550  VmaAllocator allocator,
1551  VkBuffer buffer,
1552  const VmaAllocationCreateInfo* pCreateInfo,
1553  VmaAllocation* pAllocation,
1554  VmaAllocationInfo* pAllocationInfo);
1555 
1557 VkResult vmaAllocateMemoryForImage(
1558  VmaAllocator allocator,
1559  VkImage image,
1560  const VmaAllocationCreateInfo* pCreateInfo,
1561  VmaAllocation* pAllocation,
1562  VmaAllocationInfo* pAllocationInfo);
1563 
1565 void vmaFreeMemory(
1566  VmaAllocator allocator,
1567  VmaAllocation allocation);
1568 
1586  VmaAllocator allocator,
1587  VmaAllocation allocation,
1588  VmaAllocationInfo* pAllocationInfo);
1589 
1604 VkBool32 vmaTouchAllocation(
1605  VmaAllocator allocator,
1606  VmaAllocation allocation);
1607 
1622  VmaAllocator allocator,
1623  VmaAllocation allocation,
1624  void* pUserData);
1625 
1637  VmaAllocator allocator,
1638  VmaAllocation* pAllocation);
1639 
1674 VkResult vmaMapMemory(
1675  VmaAllocator allocator,
1676  VmaAllocation allocation,
1677  void** ppData);
1678 
1683 void vmaUnmapMemory(
1684  VmaAllocator allocator,
1685  VmaAllocation allocation);
1686 
1688 typedef struct VmaDefragmentationInfo {
1693  VkDeviceSize maxBytesToMove;
1700 
1702 typedef struct VmaDefragmentationStats {
1704  VkDeviceSize bytesMoved;
1706  VkDeviceSize bytesFreed;
1712 
1795 VkResult vmaDefragment(
1796  VmaAllocator allocator,
1797  VmaAllocation* pAllocations,
1798  size_t allocationCount,
1799  VkBool32* pAllocationsChanged,
1800  const VmaDefragmentationInfo *pDefragmentationInfo,
1801  VmaDefragmentationStats* pDefragmentationStats);
1802 
1829 VkResult vmaCreateBuffer(
1830  VmaAllocator allocator,
1831  const VkBufferCreateInfo* pBufferCreateInfo,
1832  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1833  VkBuffer* pBuffer,
1834  VmaAllocation* pAllocation,
1835  VmaAllocationInfo* pAllocationInfo);
1836 
1848 void vmaDestroyBuffer(
1849  VmaAllocator allocator,
1850  VkBuffer buffer,
1851  VmaAllocation allocation);
1852 
1854 VkResult vmaCreateImage(
1855  VmaAllocator allocator,
1856  const VkImageCreateInfo* pImageCreateInfo,
1857  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1858  VkImage* pImage,
1859  VmaAllocation* pAllocation,
1860  VmaAllocationInfo* pAllocationInfo);
1861 
1873 void vmaDestroyImage(
1874  VmaAllocator allocator,
1875  VkImage image,
1876  VmaAllocation allocation);
1877 
1878 #ifdef __cplusplus
1879 }
1880 #endif
1881 
1882 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1883 
1884 // For Visual Studio IntelliSense.
1885 #ifdef __INTELLISENSE__
1886 #define VMA_IMPLEMENTATION
1887 #endif
1888 
1889 #ifdef VMA_IMPLEMENTATION
1890 #undef VMA_IMPLEMENTATION
1891 
1892 #include <cstdint>
1893 #include <cstdlib>
1894 #include <cstring>
1895 
1896 /*******************************************************************************
1897 CONFIGURATION SECTION
1898 
1899 Define some of these macros before each #include of this header or change them
1900 here if you need other then default behavior depending on your environment.
1901 */
1902 
1903 /*
1904 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1905 internally, like:
1906 
1907  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1908 
1909 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1910 VmaAllocatorCreateInfo::pVulkanFunctions.
1911 */
1912 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1913 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1914 #endif
1915 
1916 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1917 //#define VMA_USE_STL_CONTAINERS 1
1918 
1919 /* Set this macro to 1 to make the library including and using STL containers:
1920 std::pair, std::vector, std::list, std::unordered_map.
1921 
1922 Set it to 0 or undefined to make the library using its own implementation of
1923 the containers.
1924 */
1925 #if VMA_USE_STL_CONTAINERS
1926  #define VMA_USE_STL_VECTOR 1
1927  #define VMA_USE_STL_UNORDERED_MAP 1
1928  #define VMA_USE_STL_LIST 1
1929 #endif
1930 
1931 #if VMA_USE_STL_VECTOR
1932  #include <vector>
1933 #endif
1934 
1935 #if VMA_USE_STL_UNORDERED_MAP
1936  #include <unordered_map>
1937 #endif
1938 
1939 #if VMA_USE_STL_LIST
1940  #include <list>
1941 #endif
1942 
1943 /*
1944 Following headers are used in this CONFIGURATION section only, so feel free to
1945 remove them if not needed.
1946 */
1947 #include <cassert> // for assert
1948 #include <algorithm> // for min, max
1949 #include <mutex> // for std::mutex
1950 #include <atomic> // for std::atomic
1951 
1952 #if !defined(_WIN32) && !defined(__APPLE__)
1953  #include <malloc.h> // for aligned_alloc()
1954 #endif
1955 
1956 #ifndef VMA_NULL
1957  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1958  #define VMA_NULL nullptr
1959 #endif
1960 
1961 #if defined(__APPLE__) || defined(__ANDROID__)
1962 #include <cstdlib>
1963 void *aligned_alloc(size_t alignment, size_t size)
1964 {
1965  // alignment must be >= sizeof(void*)
1966  if(alignment < sizeof(void*))
1967  {
1968  alignment = sizeof(void*);
1969  }
1970 
1971  void *pointer;
1972  if(posix_memalign(&pointer, alignment, size) == 0)
1973  return pointer;
1974  return VMA_NULL;
1975 }
1976 #endif
1977 
1978 // Normal assert to check for programmer's errors, especially in Debug configuration.
1979 #ifndef VMA_ASSERT
1980  #ifdef _DEBUG
1981  #define VMA_ASSERT(expr) assert(expr)
1982  #else
1983  #define VMA_ASSERT(expr)
1984  #endif
1985 #endif
1986 
1987 // Assert that will be called very often, like inside data structures e.g. operator[].
1988 // Making it non-empty can make program slow.
1989 #ifndef VMA_HEAVY_ASSERT
1990  #ifdef _DEBUG
1991  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1992  #else
1993  #define VMA_HEAVY_ASSERT(expr)
1994  #endif
1995 #endif
1996 
1997 #ifndef VMA_ALIGN_OF
1998  #define VMA_ALIGN_OF(type) (__alignof(type))
1999 #endif
2000 
2001 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2002  #if defined(_WIN32)
2003  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2004  #else
2005  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2006  #endif
2007 #endif
2008 
2009 #ifndef VMA_SYSTEM_FREE
2010  #if defined(_WIN32)
2011  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2012  #else
2013  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2014  #endif
2015 #endif
2016 
2017 #ifndef VMA_MIN
2018  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2019 #endif
2020 
2021 #ifndef VMA_MAX
2022  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2023 #endif
2024 
2025 #ifndef VMA_SWAP
2026  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2027 #endif
2028 
2029 #ifndef VMA_SORT
2030  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2031 #endif
2032 
2033 #ifndef VMA_DEBUG_LOG
2034  #define VMA_DEBUG_LOG(format, ...)
2035  /*
2036  #define VMA_DEBUG_LOG(format, ...) do { \
2037  printf(format, __VA_ARGS__); \
2038  printf("\n"); \
2039  } while(false)
2040  */
2041 #endif
2042 
2043 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2044 #if VMA_STATS_STRING_ENABLED
2045  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2046  {
2047  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2048  }
2049  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2050  {
2051  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2052  }
2053  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2054  {
2055  snprintf(outStr, strLen, "%p", ptr);
2056  }
2057 #endif
2058 
2059 #ifndef VMA_MUTEX
2060  class VmaMutex
2061  {
2062  public:
2063  VmaMutex() { }
2064  ~VmaMutex() { }
2065  void Lock() { m_Mutex.lock(); }
2066  void Unlock() { m_Mutex.unlock(); }
2067  private:
2068  std::mutex m_Mutex;
2069  };
2070  #define VMA_MUTEX VmaMutex
2071 #endif
2072 
2073 /*
2074 If providing your own implementation, you need to implement a subset of std::atomic:
2075 
2076 - Constructor(uint32_t desired)
2077 - uint32_t load() const
2078 - void store(uint32_t desired)
2079 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2080 */
2081 #ifndef VMA_ATOMIC_UINT32
2082  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2083 #endif
2084 
2085 #ifndef VMA_BEST_FIT
2086 
2098  #define VMA_BEST_FIT (1)
2099 #endif
2100 
2101 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2102 
2106  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2107 #endif
2108 
2109 #ifndef VMA_DEBUG_ALIGNMENT
2110 
2114  #define VMA_DEBUG_ALIGNMENT (1)
2115 #endif
2116 
2117 #ifndef VMA_DEBUG_MARGIN
2118 
2122  #define VMA_DEBUG_MARGIN (0)
2123 #endif
2124 
2125 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2126 
2130  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2131 #endif
2132 
2133 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2134 
2138  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2139 #endif
2140 
2141 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2142  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2144 #endif
2145 
2146 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2147  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2149 #endif
2150 
2151 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2152 
2153 /*******************************************************************************
2154 END OF CONFIGURATION
2155 */
2156 
2157 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2158  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2159 
2160 // Returns number of bits set to 1 in (v).
2161 static inline uint32_t VmaCountBitsSet(uint32_t v)
2162 {
2163  uint32_t c = v - ((v >> 1) & 0x55555555);
2164  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2165  c = ((c >> 4) + c) & 0x0F0F0F0F;
2166  c = ((c >> 8) + c) & 0x00FF00FF;
2167  c = ((c >> 16) + c) & 0x0000FFFF;
2168  return c;
2169 }
2170 
2171 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2172 // Use types like uint32_t, uint64_t as T.
2173 template <typename T>
2174 static inline T VmaAlignUp(T val, T align)
2175 {
2176  return (val + align - 1) / align * align;
2177 }
2178 
2179 // Division with mathematical rounding to nearest number.
2180 template <typename T>
2181 inline T VmaRoundDiv(T x, T y)
2182 {
2183  return (x + (y / (T)2)) / y;
2184 }
2185 
2186 #ifndef VMA_SORT
2187 
2188 template<typename Iterator, typename Compare>
2189 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2190 {
2191  Iterator centerValue = end; --centerValue;
2192  Iterator insertIndex = beg;
2193  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2194  {
2195  if(cmp(*memTypeIndex, *centerValue))
2196  {
2197  if(insertIndex != memTypeIndex)
2198  {
2199  VMA_SWAP(*memTypeIndex, *insertIndex);
2200  }
2201  ++insertIndex;
2202  }
2203  }
2204  if(insertIndex != centerValue)
2205  {
2206  VMA_SWAP(*insertIndex, *centerValue);
2207  }
2208  return insertIndex;
2209 }
2210 
2211 template<typename Iterator, typename Compare>
2212 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2213 {
2214  if(beg < end)
2215  {
2216  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2217  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2218  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2219  }
2220 }
2221 
2222 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2223 
2224 #endif // #ifndef VMA_SORT
2225 
2226 /*
2227 Returns true if two memory blocks occupy overlapping pages.
2228 ResourceA must be in less memory offset than ResourceB.
2229 
2230 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2231 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2232 */
2233 static inline bool VmaBlocksOnSamePage(
2234  VkDeviceSize resourceAOffset,
2235  VkDeviceSize resourceASize,
2236  VkDeviceSize resourceBOffset,
2237  VkDeviceSize pageSize)
2238 {
2239  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2240  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2241  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2242  VkDeviceSize resourceBStart = resourceBOffset;
2243  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2244  return resourceAEndPage == resourceBStartPage;
2245 }
2246 
2247 enum VmaSuballocationType
2248 {
2249  VMA_SUBALLOCATION_TYPE_FREE = 0,
2250  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2251  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2252  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2253  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2254  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2255  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2256 };
2257 
2258 /*
2259 Returns true if given suballocation types could conflict and must respect
2260 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2261 or linear image and another one is optimal image. If type is unknown, behave
2262 conservatively.
2263 */
2264 static inline bool VmaIsBufferImageGranularityConflict(
2265  VmaSuballocationType suballocType1,
2266  VmaSuballocationType suballocType2)
2267 {
2268  if(suballocType1 > suballocType2)
2269  {
2270  VMA_SWAP(suballocType1, suballocType2);
2271  }
2272 
2273  switch(suballocType1)
2274  {
2275  case VMA_SUBALLOCATION_TYPE_FREE:
2276  return false;
2277  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2278  return true;
2279  case VMA_SUBALLOCATION_TYPE_BUFFER:
2280  return
2281  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2282  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2283  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2284  return
2285  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2286  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2287  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2288  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2289  return
2290  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2291  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2292  return false;
2293  default:
2294  VMA_ASSERT(0);
2295  return true;
2296  }
2297 }
2298 
2299 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2300 struct VmaMutexLock
2301 {
2302 public:
2303  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2304  m_pMutex(useMutex ? &mutex : VMA_NULL)
2305  {
2306  if(m_pMutex)
2307  {
2308  m_pMutex->Lock();
2309  }
2310  }
2311 
2312  ~VmaMutexLock()
2313  {
2314  if(m_pMutex)
2315  {
2316  m_pMutex->Unlock();
2317  }
2318  }
2319 
2320 private:
2321  VMA_MUTEX* m_pMutex;
2322 };
2323 
2324 #if VMA_DEBUG_GLOBAL_MUTEX
2325  static VMA_MUTEX gDebugGlobalMutex;
2326  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2327 #else
2328  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2329 #endif
2330 
2331 // Minimum size of a free suballocation to register it in the free suballocation collection.
2332 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2333 
2334 /*
2335 Performs binary search and returns iterator to first element that is greater or
2336 equal to (key), according to comparison (cmp).
2337 
2338 Cmp should return true if first argument is less than second argument.
2339 
2340 Returned value is the found element, if present in the collection or place where
2341 new element with value (key) should be inserted.
2342 */
2343 template <typename IterT, typename KeyT, typename CmpT>
2344 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2345 {
2346  size_t down = 0, up = (end - beg);
2347  while(down < up)
2348  {
2349  const size_t mid = (down + up) / 2;
2350  if(cmp(*(beg+mid), key))
2351  {
2352  down = mid + 1;
2353  }
2354  else
2355  {
2356  up = mid;
2357  }
2358  }
2359  return beg + down;
2360 }
2361 
2363 // Memory allocation
2364 
2365 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2366 {
2367  if((pAllocationCallbacks != VMA_NULL) &&
2368  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2369  {
2370  return (*pAllocationCallbacks->pfnAllocation)(
2371  pAllocationCallbacks->pUserData,
2372  size,
2373  alignment,
2374  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2375  }
2376  else
2377  {
2378  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2379  }
2380 }
2381 
2382 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2383 {
2384  if((pAllocationCallbacks != VMA_NULL) &&
2385  (pAllocationCallbacks->pfnFree != VMA_NULL))
2386  {
2387  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2388  }
2389  else
2390  {
2391  VMA_SYSTEM_FREE(ptr);
2392  }
2393 }
2394 
2395 template<typename T>
2396 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2397 {
2398  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2399 }
2400 
2401 template<typename T>
2402 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2403 {
2404  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2405 }
2406 
2407 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2408 
2409 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2410 
2411 template<typename T>
2412 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2413 {
2414  ptr->~T();
2415  VmaFree(pAllocationCallbacks, ptr);
2416 }
2417 
2418 template<typename T>
2419 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2420 {
2421  if(ptr != VMA_NULL)
2422  {
2423  for(size_t i = count; i--; )
2424  {
2425  ptr[i].~T();
2426  }
2427  VmaFree(pAllocationCallbacks, ptr);
2428  }
2429 }
2430 
2431 // STL-compatible allocator.
2432 template<typename T>
2433 class VmaStlAllocator
2434 {
2435 public:
2436  const VkAllocationCallbacks* const m_pCallbacks;
2437  typedef T value_type;
2438 
2439  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2440  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2441 
2442  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2443  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2444 
2445  template<typename U>
2446  bool operator==(const VmaStlAllocator<U>& rhs) const
2447  {
2448  return m_pCallbacks == rhs.m_pCallbacks;
2449  }
2450  template<typename U>
2451  bool operator!=(const VmaStlAllocator<U>& rhs) const
2452  {
2453  return m_pCallbacks != rhs.m_pCallbacks;
2454  }
2455 
2456  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2457 };
2458 
2459 #if VMA_USE_STL_VECTOR
2460 
2461 #define VmaVector std::vector
2462 
2463 template<typename T, typename allocatorT>
2464 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2465 {
2466  vec.insert(vec.begin() + index, item);
2467 }
2468 
2469 template<typename T, typename allocatorT>
2470 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2471 {
2472  vec.erase(vec.begin() + index);
2473 }
2474 
2475 #else // #if VMA_USE_STL_VECTOR
2476 
2477 /* Class with interface compatible with subset of std::vector.
2478 T must be POD because constructors and destructors are not called and memcpy is
2479 used for these objects. */
2480 template<typename T, typename AllocatorT>
2481 class VmaVector
2482 {
2483 public:
2484  typedef T value_type;
2485 
2486  VmaVector(const AllocatorT& allocator) :
2487  m_Allocator(allocator),
2488  m_pArray(VMA_NULL),
2489  m_Count(0),
2490  m_Capacity(0)
2491  {
2492  }
2493 
2494  VmaVector(size_t count, const AllocatorT& allocator) :
2495  m_Allocator(allocator),
2496  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2497  m_Count(count),
2498  m_Capacity(count)
2499  {
2500  }
2501 
2502  VmaVector(const VmaVector<T, AllocatorT>& src) :
2503  m_Allocator(src.m_Allocator),
2504  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2505  m_Count(src.m_Count),
2506  m_Capacity(src.m_Count)
2507  {
2508  if(m_Count != 0)
2509  {
2510  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2511  }
2512  }
2513 
2514  ~VmaVector()
2515  {
2516  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2517  }
2518 
2519  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2520  {
2521  if(&rhs != this)
2522  {
2523  resize(rhs.m_Count);
2524  if(m_Count != 0)
2525  {
2526  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2527  }
2528  }
2529  return *this;
2530  }
2531 
2532  bool empty() const { return m_Count == 0; }
2533  size_t size() const { return m_Count; }
2534  T* data() { return m_pArray; }
2535  const T* data() const { return m_pArray; }
2536 
2537  T& operator[](size_t index)
2538  {
2539  VMA_HEAVY_ASSERT(index < m_Count);
2540  return m_pArray[index];
2541  }
2542  const T& operator[](size_t index) const
2543  {
2544  VMA_HEAVY_ASSERT(index < m_Count);
2545  return m_pArray[index];
2546  }
2547 
2548  T& front()
2549  {
2550  VMA_HEAVY_ASSERT(m_Count > 0);
2551  return m_pArray[0];
2552  }
2553  const T& front() const
2554  {
2555  VMA_HEAVY_ASSERT(m_Count > 0);
2556  return m_pArray[0];
2557  }
2558  T& back()
2559  {
2560  VMA_HEAVY_ASSERT(m_Count > 0);
2561  return m_pArray[m_Count - 1];
2562  }
2563  const T& back() const
2564  {
2565  VMA_HEAVY_ASSERT(m_Count > 0);
2566  return m_pArray[m_Count - 1];
2567  }
2568 
2569  void reserve(size_t newCapacity, bool freeMemory = false)
2570  {
2571  newCapacity = VMA_MAX(newCapacity, m_Count);
2572 
2573  if((newCapacity < m_Capacity) && !freeMemory)
2574  {
2575  newCapacity = m_Capacity;
2576  }
2577 
2578  if(newCapacity != m_Capacity)
2579  {
2580  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2581  if(m_Count != 0)
2582  {
2583  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2584  }
2585  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2586  m_Capacity = newCapacity;
2587  m_pArray = newArray;
2588  }
2589  }
2590 
2591  void resize(size_t newCount, bool freeMemory = false)
2592  {
2593  size_t newCapacity = m_Capacity;
2594  if(newCount > m_Capacity)
2595  {
2596  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2597  }
2598  else if(freeMemory)
2599  {
2600  newCapacity = newCount;
2601  }
2602 
2603  if(newCapacity != m_Capacity)
2604  {
2605  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2606  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2607  if(elementsToCopy != 0)
2608  {
2609  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2610  }
2611  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2612  m_Capacity = newCapacity;
2613  m_pArray = newArray;
2614  }
2615 
2616  m_Count = newCount;
2617  }
2618 
2619  void clear(bool freeMemory = false)
2620  {
2621  resize(0, freeMemory);
2622  }
2623 
2624  void insert(size_t index, const T& src)
2625  {
2626  VMA_HEAVY_ASSERT(index <= m_Count);
2627  const size_t oldCount = size();
2628  resize(oldCount + 1);
2629  if(index < oldCount)
2630  {
2631  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2632  }
2633  m_pArray[index] = src;
2634  }
2635 
2636  void remove(size_t index)
2637  {
2638  VMA_HEAVY_ASSERT(index < m_Count);
2639  const size_t oldCount = size();
2640  if(index < oldCount - 1)
2641  {
2642  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2643  }
2644  resize(oldCount - 1);
2645  }
2646 
2647  void push_back(const T& src)
2648  {
2649  const size_t newIndex = size();
2650  resize(newIndex + 1);
2651  m_pArray[newIndex] = src;
2652  }
2653 
2654  void pop_back()
2655  {
2656  VMA_HEAVY_ASSERT(m_Count > 0);
2657  resize(size() - 1);
2658  }
2659 
2660  void push_front(const T& src)
2661  {
2662  insert(0, src);
2663  }
2664 
2665  void pop_front()
2666  {
2667  VMA_HEAVY_ASSERT(m_Count > 0);
2668  remove(0);
2669  }
2670 
2671  typedef T* iterator;
2672 
2673  iterator begin() { return m_pArray; }
2674  iterator end() { return m_pArray + m_Count; }
2675 
2676 private:
2677  AllocatorT m_Allocator;
2678  T* m_pArray;
2679  size_t m_Count;
2680  size_t m_Capacity;
2681 };
2682 
2683 template<typename T, typename allocatorT>
2684 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2685 {
2686  vec.insert(index, item);
2687 }
2688 
2689 template<typename T, typename allocatorT>
2690 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2691 {
2692  vec.remove(index);
2693 }
2694 
2695 #endif // #if VMA_USE_STL_VECTOR
2696 
2697 template<typename CmpLess, typename VectorT>
2698 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2699 {
2700  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2701  vector.data(),
2702  vector.data() + vector.size(),
2703  value,
2704  CmpLess()) - vector.data();
2705  VmaVectorInsert(vector, indexToInsert, value);
2706  return indexToInsert;
2707 }
2708 
2709 template<typename CmpLess, typename VectorT>
2710 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2711 {
2712  CmpLess comparator;
2713  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2714  vector.begin(),
2715  vector.end(),
2716  value,
2717  comparator);
2718  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2719  {
2720  size_t indexToRemove = it - vector.begin();
2721  VmaVectorRemove(vector, indexToRemove);
2722  return true;
2723  }
2724  return false;
2725 }
2726 
2727 template<typename CmpLess, typename VectorT>
2728 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2729 {
2730  CmpLess comparator;
2731  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2732  vector.data(),
2733  vector.data() + vector.size(),
2734  value,
2735  comparator);
2736  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2737  {
2738  return it - vector.begin();
2739  }
2740  else
2741  {
2742  return vector.size();
2743  }
2744 }
2745 
2747 // class VmaPoolAllocator
2748 
2749 /*
2750 Allocator for objects of type T using a list of arrays (pools) to speed up
2751 allocation. Number of elements that can be allocated is not bounded because
2752 allocator can create multiple blocks.
2753 */
2754 template<typename T>
2755 class VmaPoolAllocator
2756 {
2757 public:
2758  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2759  ~VmaPoolAllocator();
2760  void Clear();
2761  T* Alloc();
2762  void Free(T* ptr);
2763 
2764 private:
2765  union Item
2766  {
2767  uint32_t NextFreeIndex;
2768  T Value;
2769  };
2770 
2771  struct ItemBlock
2772  {
2773  Item* pItems;
2774  uint32_t FirstFreeIndex;
2775  };
2776 
2777  const VkAllocationCallbacks* m_pAllocationCallbacks;
2778  size_t m_ItemsPerBlock;
2779  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2780 
2781  ItemBlock& CreateNewBlock();
2782 };
2783 
2784 template<typename T>
2785 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2786  m_pAllocationCallbacks(pAllocationCallbacks),
2787  m_ItemsPerBlock(itemsPerBlock),
2788  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2789 {
2790  VMA_ASSERT(itemsPerBlock > 0);
2791 }
2792 
2793 template<typename T>
2794 VmaPoolAllocator<T>::~VmaPoolAllocator()
2795 {
2796  Clear();
2797 }
2798 
2799 template<typename T>
2800 void VmaPoolAllocator<T>::Clear()
2801 {
2802  for(size_t i = m_ItemBlocks.size(); i--; )
2803  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2804  m_ItemBlocks.clear();
2805 }
2806 
2807 template<typename T>
2808 T* VmaPoolAllocator<T>::Alloc()
2809 {
2810  for(size_t i = m_ItemBlocks.size(); i--; )
2811  {
2812  ItemBlock& block = m_ItemBlocks[i];
2813  // This block has some free items: Use first one.
2814  if(block.FirstFreeIndex != UINT32_MAX)
2815  {
2816  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2817  block.FirstFreeIndex = pItem->NextFreeIndex;
2818  return &pItem->Value;
2819  }
2820  }
2821 
2822  // No block has free item: Create new one and use it.
2823  ItemBlock& newBlock = CreateNewBlock();
2824  Item* const pItem = &newBlock.pItems[0];
2825  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2826  return &pItem->Value;
2827 }
2828 
2829 template<typename T>
2830 void VmaPoolAllocator<T>::Free(T* ptr)
2831 {
2832  // Search all memory blocks to find ptr.
2833  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2834  {
2835  ItemBlock& block = m_ItemBlocks[i];
2836 
2837  // Casting to union.
2838  Item* pItemPtr;
2839  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2840 
2841  // Check if pItemPtr is in address range of this block.
2842  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2843  {
2844  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2845  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2846  block.FirstFreeIndex = index;
2847  return;
2848  }
2849  }
2850  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2851 }
2852 
2853 template<typename T>
2854 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2855 {
2856  ItemBlock newBlock = {
2857  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2858 
2859  m_ItemBlocks.push_back(newBlock);
2860 
2861  // Setup singly-linked list of all free items in this block.
2862  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2863  newBlock.pItems[i].NextFreeIndex = i + 1;
2864  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2865  return m_ItemBlocks.back();
2866 }
2867 
2869 // class VmaRawList, VmaList
2870 
2871 #if VMA_USE_STL_LIST
2872 
2873 #define VmaList std::list
2874 
2875 #else // #if VMA_USE_STL_LIST
2876 
2877 template<typename T>
2878 struct VmaListItem
2879 {
2880  VmaListItem* pPrev;
2881  VmaListItem* pNext;
2882  T Value;
2883 };
2884 
2885 // Doubly linked list.
2886 template<typename T>
2887 class VmaRawList
2888 {
2889 public:
2890  typedef VmaListItem<T> ItemType;
2891 
2892  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2893  ~VmaRawList();
2894  void Clear();
2895 
2896  size_t GetCount() const { return m_Count; }
2897  bool IsEmpty() const { return m_Count == 0; }
2898 
2899  ItemType* Front() { return m_pFront; }
2900  const ItemType* Front() const { return m_pFront; }
2901  ItemType* Back() { return m_pBack; }
2902  const ItemType* Back() const { return m_pBack; }
2903 
2904  ItemType* PushBack();
2905  ItemType* PushFront();
2906  ItemType* PushBack(const T& value);
2907  ItemType* PushFront(const T& value);
2908  void PopBack();
2909  void PopFront();
2910 
2911  // Item can be null - it means PushBack.
2912  ItemType* InsertBefore(ItemType* pItem);
2913  // Item can be null - it means PushFront.
2914  ItemType* InsertAfter(ItemType* pItem);
2915 
2916  ItemType* InsertBefore(ItemType* pItem, const T& value);
2917  ItemType* InsertAfter(ItemType* pItem, const T& value);
2918 
2919  void Remove(ItemType* pItem);
2920 
2921 private:
2922  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2923  VmaPoolAllocator<ItemType> m_ItemAllocator;
2924  ItemType* m_pFront;
2925  ItemType* m_pBack;
2926  size_t m_Count;
2927 
2928  // Declared not defined, to block copy constructor and assignment operator.
2929  VmaRawList(const VmaRawList<T>& src);
2930  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2931 };
2932 
2933 template<typename T>
2934 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2935  m_pAllocationCallbacks(pAllocationCallbacks),
2936  m_ItemAllocator(pAllocationCallbacks, 128),
2937  m_pFront(VMA_NULL),
2938  m_pBack(VMA_NULL),
2939  m_Count(0)
2940 {
2941 }
2942 
2943 template<typename T>
2944 VmaRawList<T>::~VmaRawList()
2945 {
2946  // Intentionally not calling Clear, because that would be unnecessary
2947  // computations to return all items to m_ItemAllocator as free.
2948 }
2949 
2950 template<typename T>
2951 void VmaRawList<T>::Clear()
2952 {
2953  if(IsEmpty() == false)
2954  {
2955  ItemType* pItem = m_pBack;
2956  while(pItem != VMA_NULL)
2957  {
2958  ItemType* const pPrevItem = pItem->pPrev;
2959  m_ItemAllocator.Free(pItem);
2960  pItem = pPrevItem;
2961  }
2962  m_pFront = VMA_NULL;
2963  m_pBack = VMA_NULL;
2964  m_Count = 0;
2965  }
2966 }
2967 
2968 template<typename T>
2969 VmaListItem<T>* VmaRawList<T>::PushBack()
2970 {
2971  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2972  pNewItem->pNext = VMA_NULL;
2973  if(IsEmpty())
2974  {
2975  pNewItem->pPrev = VMA_NULL;
2976  m_pFront = pNewItem;
2977  m_pBack = pNewItem;
2978  m_Count = 1;
2979  }
2980  else
2981  {
2982  pNewItem->pPrev = m_pBack;
2983  m_pBack->pNext = pNewItem;
2984  m_pBack = pNewItem;
2985  ++m_Count;
2986  }
2987  return pNewItem;
2988 }
2989 
2990 template<typename T>
2991 VmaListItem<T>* VmaRawList<T>::PushFront()
2992 {
2993  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2994  pNewItem->pPrev = VMA_NULL;
2995  if(IsEmpty())
2996  {
2997  pNewItem->pNext = VMA_NULL;
2998  m_pFront = pNewItem;
2999  m_pBack = pNewItem;
3000  m_Count = 1;
3001  }
3002  else
3003  {
3004  pNewItem->pNext = m_pFront;
3005  m_pFront->pPrev = pNewItem;
3006  m_pFront = pNewItem;
3007  ++m_Count;
3008  }
3009  return pNewItem;
3010 }
3011 
3012 template<typename T>
3013 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3014 {
3015  ItemType* const pNewItem = PushBack();
3016  pNewItem->Value = value;
3017  return pNewItem;
3018 }
3019 
3020 template<typename T>
3021 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3022 {
3023  ItemType* const pNewItem = PushFront();
3024  pNewItem->Value = value;
3025  return pNewItem;
3026 }
3027 
3028 template<typename T>
3029 void VmaRawList<T>::PopBack()
3030 {
3031  VMA_HEAVY_ASSERT(m_Count > 0);
3032  ItemType* const pBackItem = m_pBack;
3033  ItemType* const pPrevItem = pBackItem->pPrev;
3034  if(pPrevItem != VMA_NULL)
3035  {
3036  pPrevItem->pNext = VMA_NULL;
3037  }
3038  m_pBack = pPrevItem;
3039  m_ItemAllocator.Free(pBackItem);
3040  --m_Count;
3041 }
3042 
3043 template<typename T>
3044 void VmaRawList<T>::PopFront()
3045 {
3046  VMA_HEAVY_ASSERT(m_Count > 0);
3047  ItemType* const pFrontItem = m_pFront;
3048  ItemType* const pNextItem = pFrontItem->pNext;
3049  if(pNextItem != VMA_NULL)
3050  {
3051  pNextItem->pPrev = VMA_NULL;
3052  }
3053  m_pFront = pNextItem;
3054  m_ItemAllocator.Free(pFrontItem);
3055  --m_Count;
3056 }
3057 
3058 template<typename T>
3059 void VmaRawList<T>::Remove(ItemType* pItem)
3060 {
3061  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3062  VMA_HEAVY_ASSERT(m_Count > 0);
3063 
3064  if(pItem->pPrev != VMA_NULL)
3065  {
3066  pItem->pPrev->pNext = pItem->pNext;
3067  }
3068  else
3069  {
3070  VMA_HEAVY_ASSERT(m_pFront == pItem);
3071  m_pFront = pItem->pNext;
3072  }
3073 
3074  if(pItem->pNext != VMA_NULL)
3075  {
3076  pItem->pNext->pPrev = pItem->pPrev;
3077  }
3078  else
3079  {
3080  VMA_HEAVY_ASSERT(m_pBack == pItem);
3081  m_pBack = pItem->pPrev;
3082  }
3083 
3084  m_ItemAllocator.Free(pItem);
3085  --m_Count;
3086 }
3087 
3088 template<typename T>
3089 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3090 {
3091  if(pItem != VMA_NULL)
3092  {
3093  ItemType* const prevItem = pItem->pPrev;
3094  ItemType* const newItem = m_ItemAllocator.Alloc();
3095  newItem->pPrev = prevItem;
3096  newItem->pNext = pItem;
3097  pItem->pPrev = newItem;
3098  if(prevItem != VMA_NULL)
3099  {
3100  prevItem->pNext = newItem;
3101  }
3102  else
3103  {
3104  VMA_HEAVY_ASSERT(m_pFront == pItem);
3105  m_pFront = newItem;
3106  }
3107  ++m_Count;
3108  return newItem;
3109  }
3110  else
3111  return PushBack();
3112 }
3113 
3114 template<typename T>
3115 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3116 {
3117  if(pItem != VMA_NULL)
3118  {
3119  ItemType* const nextItem = pItem->pNext;
3120  ItemType* const newItem = m_ItemAllocator.Alloc();
3121  newItem->pNext = nextItem;
3122  newItem->pPrev = pItem;
3123  pItem->pNext = newItem;
3124  if(nextItem != VMA_NULL)
3125  {
3126  nextItem->pPrev = newItem;
3127  }
3128  else
3129  {
3130  VMA_HEAVY_ASSERT(m_pBack == pItem);
3131  m_pBack = newItem;
3132  }
3133  ++m_Count;
3134  return newItem;
3135  }
3136  else
3137  return PushFront();
3138 }
3139 
3140 template<typename T>
3141 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3142 {
3143  ItemType* const newItem = InsertBefore(pItem);
3144  newItem->Value = value;
3145  return newItem;
3146 }
3147 
3148 template<typename T>
3149 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3150 {
3151  ItemType* const newItem = InsertAfter(pItem);
3152  newItem->Value = value;
3153  return newItem;
3154 }
3155 
3156 template<typename T, typename AllocatorT>
3157 class VmaList
3158 {
3159 public:
3160  class iterator
3161  {
3162  public:
3163  iterator() :
3164  m_pList(VMA_NULL),
3165  m_pItem(VMA_NULL)
3166  {
3167  }
3168 
3169  T& operator*() const
3170  {
3171  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3172  return m_pItem->Value;
3173  }
3174  T* operator->() const
3175  {
3176  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3177  return &m_pItem->Value;
3178  }
3179 
3180  iterator& operator++()
3181  {
3182  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3183  m_pItem = m_pItem->pNext;
3184  return *this;
3185  }
3186  iterator& operator--()
3187  {
3188  if(m_pItem != VMA_NULL)
3189  {
3190  m_pItem = m_pItem->pPrev;
3191  }
3192  else
3193  {
3194  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3195  m_pItem = m_pList->Back();
3196  }
3197  return *this;
3198  }
3199 
3200  iterator operator++(int)
3201  {
3202  iterator result = *this;
3203  ++*this;
3204  return result;
3205  }
3206  iterator operator--(int)
3207  {
3208  iterator result = *this;
3209  --*this;
3210  return result;
3211  }
3212 
3213  bool operator==(const iterator& rhs) const
3214  {
3215  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3216  return m_pItem == rhs.m_pItem;
3217  }
3218  bool operator!=(const iterator& rhs) const
3219  {
3220  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3221  return m_pItem != rhs.m_pItem;
3222  }
3223 
3224  private:
3225  VmaRawList<T>* m_pList;
3226  VmaListItem<T>* m_pItem;
3227 
3228  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3229  m_pList(pList),
3230  m_pItem(pItem)
3231  {
3232  }
3233 
3234  friend class VmaList<T, AllocatorT>;
3235  };
3236 
3237  class const_iterator
3238  {
3239  public:
3240  const_iterator() :
3241  m_pList(VMA_NULL),
3242  m_pItem(VMA_NULL)
3243  {
3244  }
3245 
3246  const_iterator(const iterator& src) :
3247  m_pList(src.m_pList),
3248  m_pItem(src.m_pItem)
3249  {
3250  }
3251 
3252  const T& operator*() const
3253  {
3254  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3255  return m_pItem->Value;
3256  }
3257  const T* operator->() const
3258  {
3259  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3260  return &m_pItem->Value;
3261  }
3262 
3263  const_iterator& operator++()
3264  {
3265  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3266  m_pItem = m_pItem->pNext;
3267  return *this;
3268  }
3269  const_iterator& operator--()
3270  {
3271  if(m_pItem != VMA_NULL)
3272  {
3273  m_pItem = m_pItem->pPrev;
3274  }
3275  else
3276  {
3277  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3278  m_pItem = m_pList->Back();
3279  }
3280  return *this;
3281  }
3282 
3283  const_iterator operator++(int)
3284  {
3285  const_iterator result = *this;
3286  ++*this;
3287  return result;
3288  }
3289  const_iterator operator--(int)
3290  {
3291  const_iterator result = *this;
3292  --*this;
3293  return result;
3294  }
3295 
3296  bool operator==(const const_iterator& rhs) const
3297  {
3298  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3299  return m_pItem == rhs.m_pItem;
3300  }
3301  bool operator!=(const const_iterator& rhs) const
3302  {
3303  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3304  return m_pItem != rhs.m_pItem;
3305  }
3306 
3307  private:
3308  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3309  m_pList(pList),
3310  m_pItem(pItem)
3311  {
3312  }
3313 
3314  const VmaRawList<T>* m_pList;
3315  const VmaListItem<T>* m_pItem;
3316 
3317  friend class VmaList<T, AllocatorT>;
3318  };
3319 
3320  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3321 
3322  bool empty() const { return m_RawList.IsEmpty(); }
3323  size_t size() const { return m_RawList.GetCount(); }
3324 
3325  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3326  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3327 
3328  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3329  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3330 
3331  void clear() { m_RawList.Clear(); }
3332  void push_back(const T& value) { m_RawList.PushBack(value); }
3333  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3334  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3335 
3336 private:
3337  VmaRawList<T> m_RawList;
3338 };
3339 
3340 #endif // #if VMA_USE_STL_LIST
3341 
3343 // class VmaMap
3344 
3345 // Unused in this version.
3346 #if 0
3347 
3348 #if VMA_USE_STL_UNORDERED_MAP
3349 
3350 #define VmaPair std::pair
3351 
3352 #define VMA_MAP_TYPE(KeyT, ValueT) \
3353  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3354 
3355 #else // #if VMA_USE_STL_UNORDERED_MAP
3356 
3357 template<typename T1, typename T2>
3358 struct VmaPair
3359 {
3360  T1 first;
3361  T2 second;
3362 
3363  VmaPair() : first(), second() { }
3364  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3365 };
3366 
3367 /* Class compatible with subset of interface of std::unordered_map.
3368 KeyT, ValueT must be POD because they will be stored in VmaVector.
3369 */
3370 template<typename KeyT, typename ValueT>
3371 class VmaMap
3372 {
3373 public:
3374  typedef VmaPair<KeyT, ValueT> PairType;
3375  typedef PairType* iterator;
3376 
3377  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3378 
3379  iterator begin() { return m_Vector.begin(); }
3380  iterator end() { return m_Vector.end(); }
3381 
3382  void insert(const PairType& pair);
3383  iterator find(const KeyT& key);
3384  void erase(iterator it);
3385 
3386 private:
3387  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3388 };
3389 
3390 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3391 
3392 template<typename FirstT, typename SecondT>
3393 struct VmaPairFirstLess
3394 {
3395  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3396  {
3397  return lhs.first < rhs.first;
3398  }
3399  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3400  {
3401  return lhs.first < rhsFirst;
3402  }
3403 };
3404 
3405 template<typename KeyT, typename ValueT>
3406 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3407 {
3408  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3409  m_Vector.data(),
3410  m_Vector.data() + m_Vector.size(),
3411  pair,
3412  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3413  VmaVectorInsert(m_Vector, indexToInsert, pair);
3414 }
3415 
3416 template<typename KeyT, typename ValueT>
3417 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3418 {
3419  PairType* it = VmaBinaryFindFirstNotLess(
3420  m_Vector.data(),
3421  m_Vector.data() + m_Vector.size(),
3422  key,
3423  VmaPairFirstLess<KeyT, ValueT>());
3424  if((it != m_Vector.end()) && (it->first == key))
3425  {
3426  return it;
3427  }
3428  else
3429  {
3430  return m_Vector.end();
3431  }
3432 }
3433 
3434 template<typename KeyT, typename ValueT>
3435 void VmaMap<KeyT, ValueT>::erase(iterator it)
3436 {
3437  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3438 }
3439 
3440 #endif // #if VMA_USE_STL_UNORDERED_MAP
3441 
3442 #endif // #if 0
3443 
3445 
3446 class VmaDeviceMemoryBlock;
3447 
3448 struct VmaAllocation_T
3449 {
3450 private:
3451  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3452 
3453  enum FLAGS
3454  {
3455  FLAG_USER_DATA_STRING = 0x01,
3456  };
3457 
3458 public:
3459  enum ALLOCATION_TYPE
3460  {
3461  ALLOCATION_TYPE_NONE,
3462  ALLOCATION_TYPE_BLOCK,
3463  ALLOCATION_TYPE_DEDICATED,
3464  };
3465 
3466  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3467  m_Alignment(1),
3468  m_Size(0),
3469  m_pUserData(VMA_NULL),
3470  m_LastUseFrameIndex(currentFrameIndex),
3471  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3472  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3473  m_MapCount(0),
3474  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3475  {
3476  }
3477 
3478  ~VmaAllocation_T()
3479  {
3480  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3481 
3482  // Check if owned string was freed.
3483  VMA_ASSERT(m_pUserData == VMA_NULL);
3484  }
3485 
3486  void InitBlockAllocation(
3487  VmaPool hPool,
3488  VmaDeviceMemoryBlock* block,
3489  VkDeviceSize offset,
3490  VkDeviceSize alignment,
3491  VkDeviceSize size,
3492  VmaSuballocationType suballocationType,
3493  bool mapped,
3494  bool canBecomeLost)
3495  {
3496  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3497  VMA_ASSERT(block != VMA_NULL);
3498  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3499  m_Alignment = alignment;
3500  m_Size = size;
3501  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3502  m_SuballocationType = (uint8_t)suballocationType;
3503  m_BlockAllocation.m_hPool = hPool;
3504  m_BlockAllocation.m_Block = block;
3505  m_BlockAllocation.m_Offset = offset;
3506  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3507  }
3508 
3509  void InitLost()
3510  {
3511  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3512  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3513  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3514  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3515  m_BlockAllocation.m_Block = VMA_NULL;
3516  m_BlockAllocation.m_Offset = 0;
3517  m_BlockAllocation.m_CanBecomeLost = true;
3518  }
3519 
3520  void ChangeBlockAllocation(
3521  VmaAllocator hAllocator,
3522  VmaDeviceMemoryBlock* block,
3523  VkDeviceSize offset);
3524 
3525  // pMappedData not null means allocation is created with MAPPED flag.
3526  void InitDedicatedAllocation(
3527  uint32_t memoryTypeIndex,
3528  VkDeviceMemory hMemory,
3529  VmaSuballocationType suballocationType,
3530  void* pMappedData,
3531  VkDeviceSize size)
3532  {
3533  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3534  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3535  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3536  m_Alignment = 0;
3537  m_Size = size;
3538  m_SuballocationType = (uint8_t)suballocationType;
3539  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3540  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3541  m_DedicatedAllocation.m_hMemory = hMemory;
3542  m_DedicatedAllocation.m_pMappedData = pMappedData;
3543  }
3544 
3545  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3546  VkDeviceSize GetAlignment() const { return m_Alignment; }
3547  VkDeviceSize GetSize() const { return m_Size; }
3548  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3549  void* GetUserData() const { return m_pUserData; }
3550  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3551  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3552 
3553  VmaDeviceMemoryBlock* GetBlock() const
3554  {
3555  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3556  return m_BlockAllocation.m_Block;
3557  }
3558  VkDeviceSize GetOffset() const;
3559  VkDeviceMemory GetMemory() const;
3560  uint32_t GetMemoryTypeIndex() const;
3561  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3562  void* GetMappedData() const;
3563  bool CanBecomeLost() const;
3564  VmaPool GetPool() const;
3565 
3566  uint32_t GetLastUseFrameIndex() const
3567  {
3568  return m_LastUseFrameIndex.load();
3569  }
3570  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3571  {
3572  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3573  }
3574  /*
3575  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3576  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3577  - Else, returns false.
3578 
3579  If hAllocation is already lost, assert - you should not call it then.
3580  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3581  */
3582  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3583 
3584  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3585  {
3586  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3587  outInfo.blockCount = 1;
3588  outInfo.allocationCount = 1;
3589  outInfo.unusedRangeCount = 0;
3590  outInfo.usedBytes = m_Size;
3591  outInfo.unusedBytes = 0;
3592  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3593  outInfo.unusedRangeSizeMin = UINT64_MAX;
3594  outInfo.unusedRangeSizeMax = 0;
3595  }
3596 
3597  void BlockAllocMap();
3598  void BlockAllocUnmap();
3599  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3600  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3601 
3602 private:
3603  VkDeviceSize m_Alignment;
3604  VkDeviceSize m_Size;
3605  void* m_pUserData;
3606  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3607  uint8_t m_Type; // ALLOCATION_TYPE
3608  uint8_t m_SuballocationType; // VmaSuballocationType
3609  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3610  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3611  uint8_t m_MapCount;
3612  uint8_t m_Flags; // enum FLAGS
3613 
3614  // Allocation out of VmaDeviceMemoryBlock.
3615  struct BlockAllocation
3616  {
3617  VmaPool m_hPool; // Null if belongs to general memory.
3618  VmaDeviceMemoryBlock* m_Block;
3619  VkDeviceSize m_Offset;
3620  bool m_CanBecomeLost;
3621  };
3622 
3623  // Allocation for an object that has its own private VkDeviceMemory.
3624  struct DedicatedAllocation
3625  {
3626  uint32_t m_MemoryTypeIndex;
3627  VkDeviceMemory m_hMemory;
3628  void* m_pMappedData; // Not null means memory is mapped.
3629  };
3630 
3631  union
3632  {
3633  // Allocation out of VmaDeviceMemoryBlock.
3634  BlockAllocation m_BlockAllocation;
3635  // Allocation for an object that has its own private VkDeviceMemory.
3636  DedicatedAllocation m_DedicatedAllocation;
3637  };
3638 
3639  void FreeUserDataString(VmaAllocator hAllocator);
3640 };
3641 
3642 /*
3643 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3644 allocated memory block or free.
3645 */
3646 struct VmaSuballocation
3647 {
3648  VkDeviceSize offset;
3649  VkDeviceSize size;
3650  VmaAllocation hAllocation;
3651  VmaSuballocationType type;
3652 };
3653 
3654 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3655 
3656 // Cost of one additional allocation lost, as equivalent in bytes.
3657 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3658 
3659 /*
3660 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3661 
3662 If canMakeOtherLost was false:
3663 - item points to a FREE suballocation.
3664 - itemsToMakeLostCount is 0.
3665 
3666 If canMakeOtherLost was true:
3667 - item points to first of sequence of suballocations, which are either FREE,
3668  or point to VmaAllocations that can become lost.
3669 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3670  the requested allocation to succeed.
3671 */
3672 struct VmaAllocationRequest
3673 {
3674  VkDeviceSize offset;
3675  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3676  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3677  VmaSuballocationList::iterator item;
3678  size_t itemsToMakeLostCount;
3679 
3680  VkDeviceSize CalcCost() const
3681  {
3682  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3683  }
3684 };
3685 
3686 /*
3687 Data structure used for bookkeeping of allocations and unused ranges of memory
3688 in a single VkDeviceMemory block.
3689 */
3690 class VmaBlockMetadata
3691 {
3692 public:
3693  VmaBlockMetadata(VmaAllocator hAllocator);
3694  ~VmaBlockMetadata();
3695  void Init(VkDeviceSize size);
3696 
3697  // Validates all data structures inside this object. If not valid, returns false.
3698  bool Validate() const;
3699  VkDeviceSize GetSize() const { return m_Size; }
3700  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3701  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3702  VkDeviceSize GetUnusedRangeSizeMax() const;
3703  // Returns true if this block is empty - contains only single free suballocation.
3704  bool IsEmpty() const;
3705 
3706  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3707  void AddPoolStats(VmaPoolStats& inoutStats) const;
3708 
3709 #if VMA_STATS_STRING_ENABLED
3710  void PrintDetailedMap(class VmaJsonWriter& json) const;
3711 #endif
3712 
3713  // Creates trivial request for case when block is empty.
3714  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3715 
3716  // Tries to find a place for suballocation with given parameters inside this block.
3717  // If succeeded, fills pAllocationRequest and returns true.
3718  // If failed, returns false.
3719  bool CreateAllocationRequest(
3720  uint32_t currentFrameIndex,
3721  uint32_t frameInUseCount,
3722  VkDeviceSize bufferImageGranularity,
3723  VkDeviceSize allocSize,
3724  VkDeviceSize allocAlignment,
3725  VmaSuballocationType allocType,
3726  bool canMakeOtherLost,
3727  VmaAllocationRequest* pAllocationRequest);
3728 
3729  bool MakeRequestedAllocationsLost(
3730  uint32_t currentFrameIndex,
3731  uint32_t frameInUseCount,
3732  VmaAllocationRequest* pAllocationRequest);
3733 
3734  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3735 
3736  // Makes actual allocation based on request. Request must already be checked and valid.
3737  void Alloc(
3738  const VmaAllocationRequest& request,
3739  VmaSuballocationType type,
3740  VkDeviceSize allocSize,
3741  VmaAllocation hAllocation);
3742 
3743  // Frees suballocation assigned to given memory region.
3744  void Free(const VmaAllocation allocation);
3745  void FreeAtOffset(VkDeviceSize offset);
3746 
3747 private:
3748  VkDeviceSize m_Size;
3749  uint32_t m_FreeCount;
3750  VkDeviceSize m_SumFreeSize;
3751  VmaSuballocationList m_Suballocations;
3752  // Suballocations that are free and have size greater than certain threshold.
3753  // Sorted by size, ascending.
3754  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3755 
3756  bool ValidateFreeSuballocationList() const;
3757 
3758  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3759  // If yes, fills pOffset and returns true. If no, returns false.
3760  bool CheckAllocation(
3761  uint32_t currentFrameIndex,
3762  uint32_t frameInUseCount,
3763  VkDeviceSize bufferImageGranularity,
3764  VkDeviceSize allocSize,
3765  VkDeviceSize allocAlignment,
3766  VmaSuballocationType allocType,
3767  VmaSuballocationList::const_iterator suballocItem,
3768  bool canMakeOtherLost,
3769  VkDeviceSize* pOffset,
3770  size_t* itemsToMakeLostCount,
3771  VkDeviceSize* pSumFreeSize,
3772  VkDeviceSize* pSumItemSize) const;
3773  // Given free suballocation, it merges it with following one, which must also be free.
3774  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3775  // Releases given suballocation, making it free.
3776  // Merges it with adjacent free suballocations if applicable.
3777  // Returns iterator to new free suballocation at this place.
3778  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3779  // Given free suballocation, it inserts it into sorted list of
3780  // m_FreeSuballocationsBySize if it's suitable.
3781  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3782  // Given free suballocation, it removes it from sorted list of
3783  // m_FreeSuballocationsBySize if it's suitable.
3784  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3785 };
3786 
3787 // Helper class that represents mapped memory. Synchronized internally.
3788 class VmaDeviceMemoryMapping
3789 {
3790 public:
3791  VmaDeviceMemoryMapping();
3792  ~VmaDeviceMemoryMapping();
3793 
3794  void* GetMappedData() const { return m_pMappedData; }
3795 
3796  // ppData can be null.
3797  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3798  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3799 
3800 private:
3801  VMA_MUTEX m_Mutex;
3802  uint32_t m_MapCount;
3803  void* m_pMappedData;
3804 };
3805 
3806 /*
3807 Represents a single block of device memory (`VkDeviceMemory`) with all the
3808 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3809 
3810 Thread-safety: This class must be externally synchronized.
3811 */
3812 class VmaDeviceMemoryBlock
3813 {
3814 public:
3815  uint32_t m_MemoryTypeIndex;
3816  VkDeviceMemory m_hMemory;
3817  VmaDeviceMemoryMapping m_Mapping;
3818  VmaBlockMetadata m_Metadata;
3819 
3820  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3821 
3822  ~VmaDeviceMemoryBlock()
3823  {
3824  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3825  }
3826 
3827  // Always call after construction.
3828  void Init(
3829  uint32_t newMemoryTypeIndex,
3830  VkDeviceMemory newMemory,
3831  VkDeviceSize newSize);
3832  // Always call before destruction.
3833  void Destroy(VmaAllocator allocator);
3834 
3835  // Validates all data structures inside this object. If not valid, returns false.
3836  bool Validate() const;
3837 
3838  // ppData can be null.
3839  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3840  void Unmap(VmaAllocator hAllocator, uint32_t count);
3841 };
3842 
3843 struct VmaPointerLess
3844 {
3845  bool operator()(const void* lhs, const void* rhs) const
3846  {
3847  return lhs < rhs;
3848  }
3849 };
3850 
3851 class VmaDefragmentator;
3852 
3853 /*
3854 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3855 Vulkan memory type.
3856 
3857 Synchronized internally with a mutex.
3858 */
3859 struct VmaBlockVector
3860 {
3861  VmaBlockVector(
3862  VmaAllocator hAllocator,
3863  uint32_t memoryTypeIndex,
3864  VkDeviceSize preferredBlockSize,
3865  size_t minBlockCount,
3866  size_t maxBlockCount,
3867  VkDeviceSize bufferImageGranularity,
3868  uint32_t frameInUseCount,
3869  bool isCustomPool);
3870  ~VmaBlockVector();
3871 
3872  VkResult CreateMinBlocks();
3873 
3874  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3875  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3876  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3877  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3878 
3879  void GetPoolStats(VmaPoolStats* pStats);
3880 
3881  bool IsEmpty() const { return m_Blocks.empty(); }
3882 
3883  VkResult Allocate(
3884  VmaPool hCurrentPool,
3885  uint32_t currentFrameIndex,
3886  const VkMemoryRequirements& vkMemReq,
3887  const VmaAllocationCreateInfo& createInfo,
3888  VmaSuballocationType suballocType,
3889  VmaAllocation* pAllocation);
3890 
3891  void Free(
3892  VmaAllocation hAllocation);
3893 
3894  // Adds statistics of this BlockVector to pStats.
3895  void AddStats(VmaStats* pStats);
3896 
3897 #if VMA_STATS_STRING_ENABLED
3898  void PrintDetailedMap(class VmaJsonWriter& json);
3899 #endif
3900 
3901  void MakePoolAllocationsLost(
3902  uint32_t currentFrameIndex,
3903  size_t* pLostAllocationCount);
3904 
3905  VmaDefragmentator* EnsureDefragmentator(
3906  VmaAllocator hAllocator,
3907  uint32_t currentFrameIndex);
3908 
3909  VkResult Defragment(
3910  VmaDefragmentationStats* pDefragmentationStats,
3911  VkDeviceSize& maxBytesToMove,
3912  uint32_t& maxAllocationsToMove);
3913 
3914  void DestroyDefragmentator();
3915 
3916 private:
3917  friend class VmaDefragmentator;
3918 
3919  const VmaAllocator m_hAllocator;
3920  const uint32_t m_MemoryTypeIndex;
3921  const VkDeviceSize m_PreferredBlockSize;
3922  const size_t m_MinBlockCount;
3923  const size_t m_MaxBlockCount;
3924  const VkDeviceSize m_BufferImageGranularity;
3925  const uint32_t m_FrameInUseCount;
3926  const bool m_IsCustomPool;
3927  VMA_MUTEX m_Mutex;
3928  // Incrementally sorted by sumFreeSize, ascending.
3929  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3930  /* There can be at most one allocation that is completely empty - a
3931  hysteresis to avoid pessimistic case of alternating creation and destruction
3932  of a VkDeviceMemory. */
3933  bool m_HasEmptyBlock;
3934  VmaDefragmentator* m_pDefragmentator;
3935 
3936  size_t CalcMaxBlockSize() const;
3937 
3938  // Finds and removes given block from vector.
3939  void Remove(VmaDeviceMemoryBlock* pBlock);
3940 
3941  // Performs single step in sorting m_Blocks. They may not be fully sorted
3942  // after this call.
3943  void IncrementallySortBlocks();
3944 
3945  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3946 };
3947 
3948 struct VmaPool_T
3949 {
3950 public:
3951  VmaBlockVector m_BlockVector;
3952 
3953  // Takes ownership.
3954  VmaPool_T(
3955  VmaAllocator hAllocator,
3956  const VmaPoolCreateInfo& createInfo);
3957  ~VmaPool_T();
3958 
3959  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3960 
3961 #if VMA_STATS_STRING_ENABLED
3962  //void PrintDetailedMap(class VmaStringBuilder& sb);
3963 #endif
3964 };
3965 
3966 class VmaDefragmentator
3967 {
3968  const VmaAllocator m_hAllocator;
3969  VmaBlockVector* const m_pBlockVector;
3970  uint32_t m_CurrentFrameIndex;
3971  VkDeviceSize m_BytesMoved;
3972  uint32_t m_AllocationsMoved;
3973 
3974  struct AllocationInfo
3975  {
3976  VmaAllocation m_hAllocation;
3977  VkBool32* m_pChanged;
3978 
3979  AllocationInfo() :
3980  m_hAllocation(VK_NULL_HANDLE),
3981  m_pChanged(VMA_NULL)
3982  {
3983  }
3984  };
3985 
3986  struct AllocationInfoSizeGreater
3987  {
3988  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3989  {
3990  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3991  }
3992  };
3993 
3994  // Used between AddAllocation and Defragment.
3995  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3996 
3997  struct BlockInfo
3998  {
3999  VmaDeviceMemoryBlock* m_pBlock;
4000  bool m_HasNonMovableAllocations;
4001  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4002 
4003  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4004  m_pBlock(VMA_NULL),
4005  m_HasNonMovableAllocations(true),
4006  m_Allocations(pAllocationCallbacks),
4007  m_pMappedDataForDefragmentation(VMA_NULL)
4008  {
4009  }
4010 
4011  void CalcHasNonMovableAllocations()
4012  {
4013  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4014  const size_t defragmentAllocCount = m_Allocations.size();
4015  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4016  }
4017 
4018  void SortAllocationsBySizeDescecnding()
4019  {
4020  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4021  }
4022 
4023  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4024  void Unmap(VmaAllocator hAllocator);
4025 
4026  private:
4027  // Not null if mapped for defragmentation only, not originally mapped.
4028  void* m_pMappedDataForDefragmentation;
4029  };
4030 
4031  struct BlockPointerLess
4032  {
4033  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4034  {
4035  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4036  }
4037  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4038  {
4039  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4040  }
4041  };
4042 
4043  // 1. Blocks with some non-movable allocations go first.
4044  // 2. Blocks with smaller sumFreeSize go first.
4045  struct BlockInfoCompareMoveDestination
4046  {
4047  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4048  {
4049  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4050  {
4051  return true;
4052  }
4053  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4054  {
4055  return false;
4056  }
4057  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4058  {
4059  return true;
4060  }
4061  return false;
4062  }
4063  };
4064 
4065  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4066  BlockInfoVector m_Blocks;
4067 
4068  VkResult DefragmentRound(
4069  VkDeviceSize maxBytesToMove,
4070  uint32_t maxAllocationsToMove);
4071 
4072  static bool MoveMakesSense(
4073  size_t dstBlockIndex, VkDeviceSize dstOffset,
4074  size_t srcBlockIndex, VkDeviceSize srcOffset);
4075 
4076 public:
4077  VmaDefragmentator(
4078  VmaAllocator hAllocator,
4079  VmaBlockVector* pBlockVector,
4080  uint32_t currentFrameIndex);
4081 
4082  ~VmaDefragmentator();
4083 
4084  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4085  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4086 
4087  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4088 
4089  VkResult Defragment(
4090  VkDeviceSize maxBytesToMove,
4091  uint32_t maxAllocationsToMove);
4092 };
4093 
4094 // Main allocator object.
4095 struct VmaAllocator_T
4096 {
4097  bool m_UseMutex;
4098  bool m_UseKhrDedicatedAllocation;
4099  VkDevice m_hDevice;
4100  bool m_AllocationCallbacksSpecified;
4101  VkAllocationCallbacks m_AllocationCallbacks;
4102  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4103 
4104  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4105  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4106  VMA_MUTEX m_HeapSizeLimitMutex;
4107 
4108  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4109  VkPhysicalDeviceMemoryProperties m_MemProps;
4110 
4111  // Default pools.
4112  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4113 
4114  // Each vector is sorted by memory (handle value).
4115  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4116  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4117  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4118 
4119  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4120  ~VmaAllocator_T();
4121 
4122  const VkAllocationCallbacks* GetAllocationCallbacks() const
4123  {
4124  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4125  }
4126  const VmaVulkanFunctions& GetVulkanFunctions() const
4127  {
4128  return m_VulkanFunctions;
4129  }
4130 
4131  VkDeviceSize GetBufferImageGranularity() const
4132  {
4133  return VMA_MAX(
4134  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4135  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4136  }
4137 
4138  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4139  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4140 
4141  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4142  {
4143  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4144  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4145  }
4146 
4147  void GetBufferMemoryRequirements(
4148  VkBuffer hBuffer,
4149  VkMemoryRequirements& memReq,
4150  bool& requiresDedicatedAllocation,
4151  bool& prefersDedicatedAllocation) const;
4152  void GetImageMemoryRequirements(
4153  VkImage hImage,
4154  VkMemoryRequirements& memReq,
4155  bool& requiresDedicatedAllocation,
4156  bool& prefersDedicatedAllocation) const;
4157 
4158  // Main allocation function.
4159  VkResult AllocateMemory(
4160  const VkMemoryRequirements& vkMemReq,
4161  bool requiresDedicatedAllocation,
4162  bool prefersDedicatedAllocation,
4163  VkBuffer dedicatedBuffer,
4164  VkImage dedicatedImage,
4165  const VmaAllocationCreateInfo& createInfo,
4166  VmaSuballocationType suballocType,
4167  VmaAllocation* pAllocation);
4168 
4169  // Main deallocation function.
4170  void FreeMemory(const VmaAllocation allocation);
4171 
4172  void CalculateStats(VmaStats* pStats);
4173 
4174 #if VMA_STATS_STRING_ENABLED
4175  void PrintDetailedMap(class VmaJsonWriter& json);
4176 #endif
4177 
4178  VkResult Defragment(
4179  VmaAllocation* pAllocations,
4180  size_t allocationCount,
4181  VkBool32* pAllocationsChanged,
4182  const VmaDefragmentationInfo* pDefragmentationInfo,
4183  VmaDefragmentationStats* pDefragmentationStats);
4184 
4185  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4186  bool TouchAllocation(VmaAllocation hAllocation);
4187 
4188  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4189  void DestroyPool(VmaPool pool);
4190  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4191 
4192  void SetCurrentFrameIndex(uint32_t frameIndex);
4193 
4194  void MakePoolAllocationsLost(
4195  VmaPool hPool,
4196  size_t* pLostAllocationCount);
4197 
4198  void CreateLostAllocation(VmaAllocation* pAllocation);
4199 
4200  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4201  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4202 
4203  VkResult Map(VmaAllocation hAllocation, void** ppData);
4204  void Unmap(VmaAllocation hAllocation);
4205 
4206 private:
4207  VkDeviceSize m_PreferredLargeHeapBlockSize;
4208 
4209  VkPhysicalDevice m_PhysicalDevice;
4210  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4211 
4212  VMA_MUTEX m_PoolsMutex;
4213  // Protected by m_PoolsMutex. Sorted by pointer value.
4214  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4215 
4216  VmaVulkanFunctions m_VulkanFunctions;
4217 
4218  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4219 
4220  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4221 
4222  VkResult AllocateMemoryOfType(
4223  const VkMemoryRequirements& vkMemReq,
4224  bool dedicatedAllocation,
4225  VkBuffer dedicatedBuffer,
4226  VkImage dedicatedImage,
4227  const VmaAllocationCreateInfo& createInfo,
4228  uint32_t memTypeIndex,
4229  VmaSuballocationType suballocType,
4230  VmaAllocation* pAllocation);
4231 
4232  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4233  VkResult AllocateDedicatedMemory(
4234  VkDeviceSize size,
4235  VmaSuballocationType suballocType,
4236  uint32_t memTypeIndex,
4237  bool map,
4238  bool isUserDataString,
4239  void* pUserData,
4240  VkBuffer dedicatedBuffer,
4241  VkImage dedicatedImage,
4242  VmaAllocation* pAllocation);
4243 
4244  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4245  void FreeDedicatedMemory(VmaAllocation allocation);
4246 };
4247 
4249 // Memory allocation #2 after VmaAllocator_T definition
4250 
4251 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4252 {
4253  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4254 }
4255 
4256 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4257 {
4258  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4259 }
4260 
4261 template<typename T>
4262 static T* VmaAllocate(VmaAllocator hAllocator)
4263 {
4264  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4265 }
4266 
4267 template<typename T>
4268 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4269 {
4270  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4271 }
4272 
4273 template<typename T>
4274 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4275 {
4276  if(ptr != VMA_NULL)
4277  {
4278  ptr->~T();
4279  VmaFree(hAllocator, ptr);
4280  }
4281 }
4282 
4283 template<typename T>
4284 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4285 {
4286  if(ptr != VMA_NULL)
4287  {
4288  for(size_t i = count; i--; )
4289  ptr[i].~T();
4290  VmaFree(hAllocator, ptr);
4291  }
4292 }
4293 
4295 // VmaStringBuilder
4296 
4297 #if VMA_STATS_STRING_ENABLED
4298 
4299 class VmaStringBuilder
4300 {
4301 public:
4302  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4303  size_t GetLength() const { return m_Data.size(); }
4304  const char* GetData() const { return m_Data.data(); }
4305 
4306  void Add(char ch) { m_Data.push_back(ch); }
4307  void Add(const char* pStr);
4308  void AddNewLine() { Add('\n'); }
4309  void AddNumber(uint32_t num);
4310  void AddNumber(uint64_t num);
4311  void AddPointer(const void* ptr);
4312 
4313 private:
4314  VmaVector< char, VmaStlAllocator<char> > m_Data;
4315 };
4316 
4317 void VmaStringBuilder::Add(const char* pStr)
4318 {
4319  const size_t strLen = strlen(pStr);
4320  if(strLen > 0)
4321  {
4322  const size_t oldCount = m_Data.size();
4323  m_Data.resize(oldCount + strLen);
4324  memcpy(m_Data.data() + oldCount, pStr, strLen);
4325  }
4326 }
4327 
4328 void VmaStringBuilder::AddNumber(uint32_t num)
4329 {
4330  char buf[11];
4331  VmaUint32ToStr(buf, sizeof(buf), num);
4332  Add(buf);
4333 }
4334 
4335 void VmaStringBuilder::AddNumber(uint64_t num)
4336 {
4337  char buf[21];
4338  VmaUint64ToStr(buf, sizeof(buf), num);
4339  Add(buf);
4340 }
4341 
4342 void VmaStringBuilder::AddPointer(const void* ptr)
4343 {
4344  char buf[21];
4345  VmaPtrToStr(buf, sizeof(buf), ptr);
4346  Add(buf);
4347 }
4348 
4349 #endif // #if VMA_STATS_STRING_ENABLED
4350 
4352 // VmaJsonWriter
4353 
4354 #if VMA_STATS_STRING_ENABLED
4355 
4356 class VmaJsonWriter
4357 {
4358 public:
4359  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4360  ~VmaJsonWriter();
4361 
4362  void BeginObject(bool singleLine = false);
4363  void EndObject();
4364 
4365  void BeginArray(bool singleLine = false);
4366  void EndArray();
4367 
4368  void WriteString(const char* pStr);
4369  void BeginString(const char* pStr = VMA_NULL);
4370  void ContinueString(const char* pStr);
4371  void ContinueString(uint32_t n);
4372  void ContinueString(uint64_t n);
4373  void ContinueString_Pointer(const void* ptr);
4374  void EndString(const char* pStr = VMA_NULL);
4375 
4376  void WriteNumber(uint32_t n);
4377  void WriteNumber(uint64_t n);
4378  void WriteBool(bool b);
4379  void WriteNull();
4380 
4381 private:
4382  static const char* const INDENT;
4383 
4384  enum COLLECTION_TYPE
4385  {
4386  COLLECTION_TYPE_OBJECT,
4387  COLLECTION_TYPE_ARRAY,
4388  };
4389  struct StackItem
4390  {
4391  COLLECTION_TYPE type;
4392  uint32_t valueCount;
4393  bool singleLineMode;
4394  };
4395 
4396  VmaStringBuilder& m_SB;
4397  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4398  bool m_InsideString;
4399 
4400  void BeginValue(bool isString);
4401  void WriteIndent(bool oneLess = false);
4402 };
4403 
4404 const char* const VmaJsonWriter::INDENT = " ";
4405 
4406 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4407  m_SB(sb),
4408  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4409  m_InsideString(false)
4410 {
4411 }
4412 
4413 VmaJsonWriter::~VmaJsonWriter()
4414 {
4415  VMA_ASSERT(!m_InsideString);
4416  VMA_ASSERT(m_Stack.empty());
4417 }
4418 
4419 void VmaJsonWriter::BeginObject(bool singleLine)
4420 {
4421  VMA_ASSERT(!m_InsideString);
4422 
4423  BeginValue(false);
4424  m_SB.Add('{');
4425 
4426  StackItem item;
4427  item.type = COLLECTION_TYPE_OBJECT;
4428  item.valueCount = 0;
4429  item.singleLineMode = singleLine;
4430  m_Stack.push_back(item);
4431 }
4432 
4433 void VmaJsonWriter::EndObject()
4434 {
4435  VMA_ASSERT(!m_InsideString);
4436 
4437  WriteIndent(true);
4438  m_SB.Add('}');
4439 
4440  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4441  m_Stack.pop_back();
4442 }
4443 
4444 void VmaJsonWriter::BeginArray(bool singleLine)
4445 {
4446  VMA_ASSERT(!m_InsideString);
4447 
4448  BeginValue(false);
4449  m_SB.Add('[');
4450 
4451  StackItem item;
4452  item.type = COLLECTION_TYPE_ARRAY;
4453  item.valueCount = 0;
4454  item.singleLineMode = singleLine;
4455  m_Stack.push_back(item);
4456 }
4457 
4458 void VmaJsonWriter::EndArray()
4459 {
4460  VMA_ASSERT(!m_InsideString);
4461 
4462  WriteIndent(true);
4463  m_SB.Add(']');
4464 
4465  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4466  m_Stack.pop_back();
4467 }
4468 
4469 void VmaJsonWriter::WriteString(const char* pStr)
4470 {
4471  BeginString(pStr);
4472  EndString();
4473 }
4474 
4475 void VmaJsonWriter::BeginString(const char* pStr)
4476 {
4477  VMA_ASSERT(!m_InsideString);
4478 
4479  BeginValue(true);
4480  m_SB.Add('"');
4481  m_InsideString = true;
4482  if(pStr != VMA_NULL && pStr[0] != '\0')
4483  {
4484  ContinueString(pStr);
4485  }
4486 }
4487 
4488 void VmaJsonWriter::ContinueString(const char* pStr)
4489 {
4490  VMA_ASSERT(m_InsideString);
4491 
4492  const size_t strLen = strlen(pStr);
4493  for(size_t i = 0; i < strLen; ++i)
4494  {
4495  char ch = pStr[i];
4496  if(ch == '\'')
4497  {
4498  m_SB.Add("\\\\");
4499  }
4500  else if(ch == '"')
4501  {
4502  m_SB.Add("\\\"");
4503  }
4504  else if(ch >= 32)
4505  {
4506  m_SB.Add(ch);
4507  }
4508  else switch(ch)
4509  {
4510  case '\b':
4511  m_SB.Add("\\b");
4512  break;
4513  case '\f':
4514  m_SB.Add("\\f");
4515  break;
4516  case '\n':
4517  m_SB.Add("\\n");
4518  break;
4519  case '\r':
4520  m_SB.Add("\\r");
4521  break;
4522  case '\t':
4523  m_SB.Add("\\t");
4524  break;
4525  default:
4526  VMA_ASSERT(0 && "Character not currently supported.");
4527  break;
4528  }
4529  }
4530 }
4531 
4532 void VmaJsonWriter::ContinueString(uint32_t n)
4533 {
4534  VMA_ASSERT(m_InsideString);
4535  m_SB.AddNumber(n);
4536 }
4537 
4538 void VmaJsonWriter::ContinueString(uint64_t n)
4539 {
4540  VMA_ASSERT(m_InsideString);
4541  m_SB.AddNumber(n);
4542 }
4543 
4544 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4545 {
4546  VMA_ASSERT(m_InsideString);
4547  m_SB.AddPointer(ptr);
4548 }
4549 
4550 void VmaJsonWriter::EndString(const char* pStr)
4551 {
4552  VMA_ASSERT(m_InsideString);
4553  if(pStr != VMA_NULL && pStr[0] != '\0')
4554  {
4555  ContinueString(pStr);
4556  }
4557  m_SB.Add('"');
4558  m_InsideString = false;
4559 }
4560 
4561 void VmaJsonWriter::WriteNumber(uint32_t n)
4562 {
4563  VMA_ASSERT(!m_InsideString);
4564  BeginValue(false);
4565  m_SB.AddNumber(n);
4566 }
4567 
4568 void VmaJsonWriter::WriteNumber(uint64_t n)
4569 {
4570  VMA_ASSERT(!m_InsideString);
4571  BeginValue(false);
4572  m_SB.AddNumber(n);
4573 }
4574 
4575 void VmaJsonWriter::WriteBool(bool b)
4576 {
4577  VMA_ASSERT(!m_InsideString);
4578  BeginValue(false);
4579  m_SB.Add(b ? "true" : "false");
4580 }
4581 
4582 void VmaJsonWriter::WriteNull()
4583 {
4584  VMA_ASSERT(!m_InsideString);
4585  BeginValue(false);
4586  m_SB.Add("null");
4587 }
4588 
4589 void VmaJsonWriter::BeginValue(bool isString)
4590 {
4591  if(!m_Stack.empty())
4592  {
4593  StackItem& currItem = m_Stack.back();
4594  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4595  currItem.valueCount % 2 == 0)
4596  {
4597  VMA_ASSERT(isString);
4598  }
4599 
4600  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4601  currItem.valueCount % 2 != 0)
4602  {
4603  m_SB.Add(": ");
4604  }
4605  else if(currItem.valueCount > 0)
4606  {
4607  m_SB.Add(", ");
4608  WriteIndent();
4609  }
4610  else
4611  {
4612  WriteIndent();
4613  }
4614  ++currItem.valueCount;
4615  }
4616 }
4617 
4618 void VmaJsonWriter::WriteIndent(bool oneLess)
4619 {
4620  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4621  {
4622  m_SB.AddNewLine();
4623 
4624  size_t count = m_Stack.size();
4625  if(count > 0 && oneLess)
4626  {
4627  --count;
4628  }
4629  for(size_t i = 0; i < count; ++i)
4630  {
4631  m_SB.Add(INDENT);
4632  }
4633  }
4634 }
4635 
4636 #endif // #if VMA_STATS_STRING_ENABLED
4637 
4639 
4640 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4641 {
4642  if(IsUserDataString())
4643  {
4644  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4645 
4646  FreeUserDataString(hAllocator);
4647 
4648  if(pUserData != VMA_NULL)
4649  {
4650  const char* const newStrSrc = (char*)pUserData;
4651  const size_t newStrLen = strlen(newStrSrc);
4652  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4653  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4654  m_pUserData = newStrDst;
4655  }
4656  }
4657  else
4658  {
4659  m_pUserData = pUserData;
4660  }
4661 }
4662 
4663 void VmaAllocation_T::ChangeBlockAllocation(
4664  VmaAllocator hAllocator,
4665  VmaDeviceMemoryBlock* block,
4666  VkDeviceSize offset)
4667 {
4668  VMA_ASSERT(block != VMA_NULL);
4669  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4670 
4671  // Move mapping reference counter from old block to new block.
4672  if(block != m_BlockAllocation.m_Block)
4673  {
4674  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4675  if(IsPersistentMap())
4676  ++mapRefCount;
4677  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4678  block->Map(hAllocator, mapRefCount, VMA_NULL);
4679  }
4680 
4681  m_BlockAllocation.m_Block = block;
4682  m_BlockAllocation.m_Offset = offset;
4683 }
4684 
4685 VkDeviceSize VmaAllocation_T::GetOffset() const
4686 {
4687  switch(m_Type)
4688  {
4689  case ALLOCATION_TYPE_BLOCK:
4690  return m_BlockAllocation.m_Offset;
4691  case ALLOCATION_TYPE_DEDICATED:
4692  return 0;
4693  default:
4694  VMA_ASSERT(0);
4695  return 0;
4696  }
4697 }
4698 
4699 VkDeviceMemory VmaAllocation_T::GetMemory() const
4700 {
4701  switch(m_Type)
4702  {
4703  case ALLOCATION_TYPE_BLOCK:
4704  return m_BlockAllocation.m_Block->m_hMemory;
4705  case ALLOCATION_TYPE_DEDICATED:
4706  return m_DedicatedAllocation.m_hMemory;
4707  default:
4708  VMA_ASSERT(0);
4709  return VK_NULL_HANDLE;
4710  }
4711 }
4712 
4713 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4714 {
4715  switch(m_Type)
4716  {
4717  case ALLOCATION_TYPE_BLOCK:
4718  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4719  case ALLOCATION_TYPE_DEDICATED:
4720  return m_DedicatedAllocation.m_MemoryTypeIndex;
4721  default:
4722  VMA_ASSERT(0);
4723  return UINT32_MAX;
4724  }
4725 }
4726 
4727 void* VmaAllocation_T::GetMappedData() const
4728 {
4729  switch(m_Type)
4730  {
4731  case ALLOCATION_TYPE_BLOCK:
4732  if(m_MapCount != 0)
4733  {
4734  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4735  VMA_ASSERT(pBlockData != VMA_NULL);
4736  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4737  }
4738  else
4739  {
4740  return VMA_NULL;
4741  }
4742  break;
4743  case ALLOCATION_TYPE_DEDICATED:
4744  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4745  return m_DedicatedAllocation.m_pMappedData;
4746  default:
4747  VMA_ASSERT(0);
4748  return VMA_NULL;
4749  }
4750 }
4751 
4752 bool VmaAllocation_T::CanBecomeLost() const
4753 {
4754  switch(m_Type)
4755  {
4756  case ALLOCATION_TYPE_BLOCK:
4757  return m_BlockAllocation.m_CanBecomeLost;
4758  case ALLOCATION_TYPE_DEDICATED:
4759  return false;
4760  default:
4761  VMA_ASSERT(0);
4762  return false;
4763  }
4764 }
4765 
4766 VmaPool VmaAllocation_T::GetPool() const
4767 {
4768  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4769  return m_BlockAllocation.m_hPool;
4770 }
4771 
4772 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4773 {
4774  VMA_ASSERT(CanBecomeLost());
4775 
4776  /*
4777  Warning: This is a carefully designed algorithm.
4778  Do not modify unless you really know what you're doing :)
4779  */
4780  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4781  for(;;)
4782  {
4783  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4784  {
4785  VMA_ASSERT(0);
4786  return false;
4787  }
4788  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4789  {
4790  return false;
4791  }
4792  else // Last use time earlier than current time.
4793  {
4794  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4795  {
4796  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4797  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4798  return true;
4799  }
4800  }
4801  }
4802 }
4803 
4804 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4805 {
4806  VMA_ASSERT(IsUserDataString());
4807  if(m_pUserData != VMA_NULL)
4808  {
4809  char* const oldStr = (char*)m_pUserData;
4810  const size_t oldStrLen = strlen(oldStr);
4811  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4812  m_pUserData = VMA_NULL;
4813  }
4814 }
4815 
4816 void VmaAllocation_T::BlockAllocMap()
4817 {
4818  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4819 
4820  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4821  {
4822  ++m_MapCount;
4823  }
4824  else
4825  {
4826  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4827  }
4828 }
4829 
4830 void VmaAllocation_T::BlockAllocUnmap()
4831 {
4832  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4833 
4834  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4835  {
4836  --m_MapCount;
4837  }
4838  else
4839  {
4840  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4841  }
4842 }
4843 
4844 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4845 {
4846  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4847 
4848  if(m_MapCount != 0)
4849  {
4850  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4851  {
4852  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4853  *ppData = m_DedicatedAllocation.m_pMappedData;
4854  ++m_MapCount;
4855  return VK_SUCCESS;
4856  }
4857  else
4858  {
4859  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4860  return VK_ERROR_MEMORY_MAP_FAILED;
4861  }
4862  }
4863  else
4864  {
4865  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4866  hAllocator->m_hDevice,
4867  m_DedicatedAllocation.m_hMemory,
4868  0, // offset
4869  VK_WHOLE_SIZE,
4870  0, // flags
4871  ppData);
4872  if(result == VK_SUCCESS)
4873  {
4874  m_DedicatedAllocation.m_pMappedData = *ppData;
4875  m_MapCount = 1;
4876  }
4877  return result;
4878  }
4879 }
4880 
4881 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4882 {
4883  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4884 
4885  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4886  {
4887  --m_MapCount;
4888  if(m_MapCount == 0)
4889  {
4890  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4891  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4892  hAllocator->m_hDevice,
4893  m_DedicatedAllocation.m_hMemory);
4894  }
4895  }
4896  else
4897  {
4898  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4899  }
4900 }
4901 
4902 #if VMA_STATS_STRING_ENABLED
4903 
4904 // Correspond to values of enum VmaSuballocationType.
4905 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4906  "FREE",
4907  "UNKNOWN",
4908  "BUFFER",
4909  "IMAGE_UNKNOWN",
4910  "IMAGE_LINEAR",
4911  "IMAGE_OPTIMAL",
4912 };
4913 
4914 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4915 {
4916  json.BeginObject();
4917 
4918  json.WriteString("Blocks");
4919  json.WriteNumber(stat.blockCount);
4920 
4921  json.WriteString("Allocations");
4922  json.WriteNumber(stat.allocationCount);
4923 
4924  json.WriteString("UnusedRanges");
4925  json.WriteNumber(stat.unusedRangeCount);
4926 
4927  json.WriteString("UsedBytes");
4928  json.WriteNumber(stat.usedBytes);
4929 
4930  json.WriteString("UnusedBytes");
4931  json.WriteNumber(stat.unusedBytes);
4932 
4933  if(stat.allocationCount > 1)
4934  {
4935  json.WriteString("AllocationSize");
4936  json.BeginObject(true);
4937  json.WriteString("Min");
4938  json.WriteNumber(stat.allocationSizeMin);
4939  json.WriteString("Avg");
4940  json.WriteNumber(stat.allocationSizeAvg);
4941  json.WriteString("Max");
4942  json.WriteNumber(stat.allocationSizeMax);
4943  json.EndObject();
4944  }
4945 
4946  if(stat.unusedRangeCount > 1)
4947  {
4948  json.WriteString("UnusedRangeSize");
4949  json.BeginObject(true);
4950  json.WriteString("Min");
4951  json.WriteNumber(stat.unusedRangeSizeMin);
4952  json.WriteString("Avg");
4953  json.WriteNumber(stat.unusedRangeSizeAvg);
4954  json.WriteString("Max");
4955  json.WriteNumber(stat.unusedRangeSizeMax);
4956  json.EndObject();
4957  }
4958 
4959  json.EndObject();
4960 }
4961 
4962 #endif // #if VMA_STATS_STRING_ENABLED
4963 
4964 struct VmaSuballocationItemSizeLess
4965 {
4966  bool operator()(
4967  const VmaSuballocationList::iterator lhs,
4968  const VmaSuballocationList::iterator rhs) const
4969  {
4970  return lhs->size < rhs->size;
4971  }
4972  bool operator()(
4973  const VmaSuballocationList::iterator lhs,
4974  VkDeviceSize rhsSize) const
4975  {
4976  return lhs->size < rhsSize;
4977  }
4978 };
4979 
4981 // class VmaBlockMetadata
4982 
4983 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4984  m_Size(0),
4985  m_FreeCount(0),
4986  m_SumFreeSize(0),
4987  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4988  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4989 {
4990 }
4991 
4992 VmaBlockMetadata::~VmaBlockMetadata()
4993 {
4994 }
4995 
4996 void VmaBlockMetadata::Init(VkDeviceSize size)
4997 {
4998  m_Size = size;
4999  m_FreeCount = 1;
5000  m_SumFreeSize = size;
5001 
5002  VmaSuballocation suballoc = {};
5003  suballoc.offset = 0;
5004  suballoc.size = size;
5005  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5006  suballoc.hAllocation = VK_NULL_HANDLE;
5007 
5008  m_Suballocations.push_back(suballoc);
5009  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5010  --suballocItem;
5011  m_FreeSuballocationsBySize.push_back(suballocItem);
5012 }
5013 
5014 bool VmaBlockMetadata::Validate() const
5015 {
5016  if(m_Suballocations.empty())
5017  {
5018  return false;
5019  }
5020 
5021  // Expected offset of new suballocation as calculates from previous ones.
5022  VkDeviceSize calculatedOffset = 0;
5023  // Expected number of free suballocations as calculated from traversing their list.
5024  uint32_t calculatedFreeCount = 0;
5025  // Expected sum size of free suballocations as calculated from traversing their list.
5026  VkDeviceSize calculatedSumFreeSize = 0;
5027  // Expected number of free suballocations that should be registered in
5028  // m_FreeSuballocationsBySize calculated from traversing their list.
5029  size_t freeSuballocationsToRegister = 0;
5030  // True if previous visisted suballocation was free.
5031  bool prevFree = false;
5032 
5033  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5034  suballocItem != m_Suballocations.cend();
5035  ++suballocItem)
5036  {
5037  const VmaSuballocation& subAlloc = *suballocItem;
5038 
5039  // Actual offset of this suballocation doesn't match expected one.
5040  if(subAlloc.offset != calculatedOffset)
5041  {
5042  return false;
5043  }
5044 
5045  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5046  // Two adjacent free suballocations are invalid. They should be merged.
5047  if(prevFree && currFree)
5048  {
5049  return false;
5050  }
5051 
5052  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5053  {
5054  return false;
5055  }
5056 
5057  if(currFree)
5058  {
5059  calculatedSumFreeSize += subAlloc.size;
5060  ++calculatedFreeCount;
5061  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5062  {
5063  ++freeSuballocationsToRegister;
5064  }
5065  }
5066  else
5067  {
5068  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5069  {
5070  return false;
5071  }
5072  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5073  {
5074  return false;
5075  }
5076  }
5077 
5078  calculatedOffset += subAlloc.size;
5079  prevFree = currFree;
5080  }
5081 
5082  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5083  // match expected one.
5084  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5085  {
5086  return false;
5087  }
5088 
5089  VkDeviceSize lastSize = 0;
5090  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5091  {
5092  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5093 
5094  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5095  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5096  {
5097  return false;
5098  }
5099  // They must be sorted by size ascending.
5100  if(suballocItem->size < lastSize)
5101  {
5102  return false;
5103  }
5104 
5105  lastSize = suballocItem->size;
5106  }
5107 
5108  // Check if totals match calculacted values.
5109  if(!ValidateFreeSuballocationList() ||
5110  (calculatedOffset != m_Size) ||
5111  (calculatedSumFreeSize != m_SumFreeSize) ||
5112  (calculatedFreeCount != m_FreeCount))
5113  {
5114  return false;
5115  }
5116 
5117  return true;
5118 }
5119 
5120 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5121 {
5122  if(!m_FreeSuballocationsBySize.empty())
5123  {
5124  return m_FreeSuballocationsBySize.back()->size;
5125  }
5126  else
5127  {
5128  return 0;
5129  }
5130 }
5131 
5132 bool VmaBlockMetadata::IsEmpty() const
5133 {
5134  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5135 }
5136 
5137 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5138 {
5139  outInfo.blockCount = 1;
5140 
5141  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5142  outInfo.allocationCount = rangeCount - m_FreeCount;
5143  outInfo.unusedRangeCount = m_FreeCount;
5144 
5145  outInfo.unusedBytes = m_SumFreeSize;
5146  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5147 
5148  outInfo.allocationSizeMin = UINT64_MAX;
5149  outInfo.allocationSizeMax = 0;
5150  outInfo.unusedRangeSizeMin = UINT64_MAX;
5151  outInfo.unusedRangeSizeMax = 0;
5152 
5153  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5154  suballocItem != m_Suballocations.cend();
5155  ++suballocItem)
5156  {
5157  const VmaSuballocation& suballoc = *suballocItem;
5158  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5159  {
5160  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5161  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5162  }
5163  else
5164  {
5165  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5166  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5167  }
5168  }
5169 }
5170 
5171 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5172 {
5173  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5174 
5175  inoutStats.size += m_Size;
5176  inoutStats.unusedSize += m_SumFreeSize;
5177  inoutStats.allocationCount += rangeCount - m_FreeCount;
5178  inoutStats.unusedRangeCount += m_FreeCount;
5179  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5180 }
5181 
5182 #if VMA_STATS_STRING_ENABLED
5183 
5184 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5185 {
5186  json.BeginObject();
5187 
5188  json.WriteString("TotalBytes");
5189  json.WriteNumber(m_Size);
5190 
5191  json.WriteString("UnusedBytes");
5192  json.WriteNumber(m_SumFreeSize);
5193 
5194  json.WriteString("Allocations");
5195  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5196 
5197  json.WriteString("UnusedRanges");
5198  json.WriteNumber(m_FreeCount);
5199 
5200  json.WriteString("Suballocations");
5201  json.BeginArray();
5202  size_t i = 0;
5203  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5204  suballocItem != m_Suballocations.cend();
5205  ++suballocItem, ++i)
5206  {
5207  json.BeginObject(true);
5208 
5209  json.WriteString("Type");
5210  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5211 
5212  json.WriteString("Size");
5213  json.WriteNumber(suballocItem->size);
5214 
5215  json.WriteString("Offset");
5216  json.WriteNumber(suballocItem->offset);
5217 
5218  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5219  {
5220  const void* pUserData = suballocItem->hAllocation->GetUserData();
5221  if(pUserData != VMA_NULL)
5222  {
5223  json.WriteString("UserData");
5224  if(suballocItem->hAllocation->IsUserDataString())
5225  {
5226  json.WriteString((const char*)pUserData);
5227  }
5228  else
5229  {
5230  json.BeginString();
5231  json.ContinueString_Pointer(pUserData);
5232  json.EndString();
5233  }
5234  }
5235  }
5236 
5237  json.EndObject();
5238  }
5239  json.EndArray();
5240 
5241  json.EndObject();
5242 }
5243 
5244 #endif // #if VMA_STATS_STRING_ENABLED
5245 
5246 /*
5247 How many suitable free suballocations to analyze before choosing best one.
5248 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5249  be chosen.
5250 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5251  suballocations will be analized and best one will be chosen.
5252 - Any other value is also acceptable.
5253 */
5254 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5255 
5256 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5257 {
5258  VMA_ASSERT(IsEmpty());
5259  pAllocationRequest->offset = 0;
5260  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5261  pAllocationRequest->sumItemSize = 0;
5262  pAllocationRequest->item = m_Suballocations.begin();
5263  pAllocationRequest->itemsToMakeLostCount = 0;
5264 }
5265 
5266 bool VmaBlockMetadata::CreateAllocationRequest(
5267  uint32_t currentFrameIndex,
5268  uint32_t frameInUseCount,
5269  VkDeviceSize bufferImageGranularity,
5270  VkDeviceSize allocSize,
5271  VkDeviceSize allocAlignment,
5272  VmaSuballocationType allocType,
5273  bool canMakeOtherLost,
5274  VmaAllocationRequest* pAllocationRequest)
5275 {
5276  VMA_ASSERT(allocSize > 0);
5277  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5278  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5279  VMA_HEAVY_ASSERT(Validate());
5280 
5281  // There is not enough total free space in this block to fullfill the request: Early return.
5282  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5283  {
5284  return false;
5285  }
5286 
5287  // New algorithm, efficiently searching freeSuballocationsBySize.
5288  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5289  if(freeSuballocCount > 0)
5290  {
5291  if(VMA_BEST_FIT)
5292  {
5293  // Find first free suballocation with size not less than allocSize.
5294  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5295  m_FreeSuballocationsBySize.data(),
5296  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5297  allocSize,
5298  VmaSuballocationItemSizeLess());
5299  size_t index = it - m_FreeSuballocationsBySize.data();
5300  for(; index < freeSuballocCount; ++index)
5301  {
5302  if(CheckAllocation(
5303  currentFrameIndex,
5304  frameInUseCount,
5305  bufferImageGranularity,
5306  allocSize,
5307  allocAlignment,
5308  allocType,
5309  m_FreeSuballocationsBySize[index],
5310  false, // canMakeOtherLost
5311  &pAllocationRequest->offset,
5312  &pAllocationRequest->itemsToMakeLostCount,
5313  &pAllocationRequest->sumFreeSize,
5314  &pAllocationRequest->sumItemSize))
5315  {
5316  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5317  return true;
5318  }
5319  }
5320  }
5321  else
5322  {
5323  // Search staring from biggest suballocations.
5324  for(size_t index = freeSuballocCount; index--; )
5325  {
5326  if(CheckAllocation(
5327  currentFrameIndex,
5328  frameInUseCount,
5329  bufferImageGranularity,
5330  allocSize,
5331  allocAlignment,
5332  allocType,
5333  m_FreeSuballocationsBySize[index],
5334  false, // canMakeOtherLost
5335  &pAllocationRequest->offset,
5336  &pAllocationRequest->itemsToMakeLostCount,
5337  &pAllocationRequest->sumFreeSize,
5338  &pAllocationRequest->sumItemSize))
5339  {
5340  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5341  return true;
5342  }
5343  }
5344  }
5345  }
5346 
5347  if(canMakeOtherLost)
5348  {
5349  // Brute-force algorithm. TODO: Come up with something better.
5350 
5351  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5352  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5353 
5354  VmaAllocationRequest tmpAllocRequest = {};
5355  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5356  suballocIt != m_Suballocations.end();
5357  ++suballocIt)
5358  {
5359  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5360  suballocIt->hAllocation->CanBecomeLost())
5361  {
5362  if(CheckAllocation(
5363  currentFrameIndex,
5364  frameInUseCount,
5365  bufferImageGranularity,
5366  allocSize,
5367  allocAlignment,
5368  allocType,
5369  suballocIt,
5370  canMakeOtherLost,
5371  &tmpAllocRequest.offset,
5372  &tmpAllocRequest.itemsToMakeLostCount,
5373  &tmpAllocRequest.sumFreeSize,
5374  &tmpAllocRequest.sumItemSize))
5375  {
5376  tmpAllocRequest.item = suballocIt;
5377 
5378  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5379  {
5380  *pAllocationRequest = tmpAllocRequest;
5381  }
5382  }
5383  }
5384  }
5385 
5386  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5387  {
5388  return true;
5389  }
5390  }
5391 
5392  return false;
5393 }
5394 
5395 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5396  uint32_t currentFrameIndex,
5397  uint32_t frameInUseCount,
5398  VmaAllocationRequest* pAllocationRequest)
5399 {
5400  while(pAllocationRequest->itemsToMakeLostCount > 0)
5401  {
5402  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5403  {
5404  ++pAllocationRequest->item;
5405  }
5406  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5407  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5408  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5409  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5410  {
5411  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5412  --pAllocationRequest->itemsToMakeLostCount;
5413  }
5414  else
5415  {
5416  return false;
5417  }
5418  }
5419 
5420  VMA_HEAVY_ASSERT(Validate());
5421  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5422  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5423 
5424  return true;
5425 }
5426 
5427 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5428 {
5429  uint32_t lostAllocationCount = 0;
5430  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5431  it != m_Suballocations.end();
5432  ++it)
5433  {
5434  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5435  it->hAllocation->CanBecomeLost() &&
5436  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5437  {
5438  it = FreeSuballocation(it);
5439  ++lostAllocationCount;
5440  }
5441  }
5442  return lostAllocationCount;
5443 }
5444 
5445 void VmaBlockMetadata::Alloc(
5446  const VmaAllocationRequest& request,
5447  VmaSuballocationType type,
5448  VkDeviceSize allocSize,
5449  VmaAllocation hAllocation)
5450 {
5451  VMA_ASSERT(request.item != m_Suballocations.end());
5452  VmaSuballocation& suballoc = *request.item;
5453  // Given suballocation is a free block.
5454  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5455  // Given offset is inside this suballocation.
5456  VMA_ASSERT(request.offset >= suballoc.offset);
5457  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5458  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5459  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5460 
5461  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5462  // it to become used.
5463  UnregisterFreeSuballocation(request.item);
5464 
5465  suballoc.offset = request.offset;
5466  suballoc.size = allocSize;
5467  suballoc.type = type;
5468  suballoc.hAllocation = hAllocation;
5469 
5470  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5471  if(paddingEnd)
5472  {
5473  VmaSuballocation paddingSuballoc = {};
5474  paddingSuballoc.offset = request.offset + allocSize;
5475  paddingSuballoc.size = paddingEnd;
5476  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5477  VmaSuballocationList::iterator next = request.item;
5478  ++next;
5479  const VmaSuballocationList::iterator paddingEndItem =
5480  m_Suballocations.insert(next, paddingSuballoc);
5481  RegisterFreeSuballocation(paddingEndItem);
5482  }
5483 
5484  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5485  if(paddingBegin)
5486  {
5487  VmaSuballocation paddingSuballoc = {};
5488  paddingSuballoc.offset = request.offset - paddingBegin;
5489  paddingSuballoc.size = paddingBegin;
5490  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5491  const VmaSuballocationList::iterator paddingBeginItem =
5492  m_Suballocations.insert(request.item, paddingSuballoc);
5493  RegisterFreeSuballocation(paddingBeginItem);
5494  }
5495 
5496  // Update totals.
5497  m_FreeCount = m_FreeCount - 1;
5498  if(paddingBegin > 0)
5499  {
5500  ++m_FreeCount;
5501  }
5502  if(paddingEnd > 0)
5503  {
5504  ++m_FreeCount;
5505  }
5506  m_SumFreeSize -= allocSize;
5507 }
5508 
5509 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5510 {
5511  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5512  suballocItem != m_Suballocations.end();
5513  ++suballocItem)
5514  {
5515  VmaSuballocation& suballoc = *suballocItem;
5516  if(suballoc.hAllocation == allocation)
5517  {
5518  FreeSuballocation(suballocItem);
5519  VMA_HEAVY_ASSERT(Validate());
5520  return;
5521  }
5522  }
5523  VMA_ASSERT(0 && "Not found!");
5524 }
5525 
5526 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5527 {
5528  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5529  suballocItem != m_Suballocations.end();
5530  ++suballocItem)
5531  {
5532  VmaSuballocation& suballoc = *suballocItem;
5533  if(suballoc.offset == offset)
5534  {
5535  FreeSuballocation(suballocItem);
5536  return;
5537  }
5538  }
5539  VMA_ASSERT(0 && "Not found!");
5540 }
5541 
5542 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5543 {
5544  VkDeviceSize lastSize = 0;
5545  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5546  {
5547  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5548 
5549  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5550  {
5551  VMA_ASSERT(0);
5552  return false;
5553  }
5554  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5555  {
5556  VMA_ASSERT(0);
5557  return false;
5558  }
5559  if(it->size < lastSize)
5560  {
5561  VMA_ASSERT(0);
5562  return false;
5563  }
5564 
5565  lastSize = it->size;
5566  }
5567  return true;
5568 }
5569 
5570 bool VmaBlockMetadata::CheckAllocation(
5571  uint32_t currentFrameIndex,
5572  uint32_t frameInUseCount,
5573  VkDeviceSize bufferImageGranularity,
5574  VkDeviceSize allocSize,
5575  VkDeviceSize allocAlignment,
5576  VmaSuballocationType allocType,
5577  VmaSuballocationList::const_iterator suballocItem,
5578  bool canMakeOtherLost,
5579  VkDeviceSize* pOffset,
5580  size_t* itemsToMakeLostCount,
5581  VkDeviceSize* pSumFreeSize,
5582  VkDeviceSize* pSumItemSize) const
5583 {
5584  VMA_ASSERT(allocSize > 0);
5585  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5586  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5587  VMA_ASSERT(pOffset != VMA_NULL);
5588 
5589  *itemsToMakeLostCount = 0;
5590  *pSumFreeSize = 0;
5591  *pSumItemSize = 0;
5592 
5593  if(canMakeOtherLost)
5594  {
5595  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5596  {
5597  *pSumFreeSize = suballocItem->size;
5598  }
5599  else
5600  {
5601  if(suballocItem->hAllocation->CanBecomeLost() &&
5602  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5603  {
5604  ++*itemsToMakeLostCount;
5605  *pSumItemSize = suballocItem->size;
5606  }
5607  else
5608  {
5609  return false;
5610  }
5611  }
5612 
5613  // Remaining size is too small for this request: Early return.
5614  if(m_Size - suballocItem->offset < allocSize)
5615  {
5616  return false;
5617  }
5618 
5619  // Start from offset equal to beginning of this suballocation.
5620  *pOffset = suballocItem->offset;
5621 
5622  // Apply VMA_DEBUG_MARGIN at the beginning.
5623  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5624  {
5625  *pOffset += VMA_DEBUG_MARGIN;
5626  }
5627 
5628  // Apply alignment.
5629  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5630  *pOffset = VmaAlignUp(*pOffset, alignment);
5631 
5632  // Check previous suballocations for BufferImageGranularity conflicts.
5633  // Make bigger alignment if necessary.
5634  if(bufferImageGranularity > 1)
5635  {
5636  bool bufferImageGranularityConflict = false;
5637  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5638  while(prevSuballocItem != m_Suballocations.cbegin())
5639  {
5640  --prevSuballocItem;
5641  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5642  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5643  {
5644  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5645  {
5646  bufferImageGranularityConflict = true;
5647  break;
5648  }
5649  }
5650  else
5651  // Already on previous page.
5652  break;
5653  }
5654  if(bufferImageGranularityConflict)
5655  {
5656  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5657  }
5658  }
5659 
5660  // Now that we have final *pOffset, check if we are past suballocItem.
5661  // If yes, return false - this function should be called for another suballocItem as starting point.
5662  if(*pOffset >= suballocItem->offset + suballocItem->size)
5663  {
5664  return false;
5665  }
5666 
5667  // Calculate padding at the beginning based on current offset.
5668  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5669 
5670  // Calculate required margin at the end if this is not last suballocation.
5671  VmaSuballocationList::const_iterator next = suballocItem;
5672  ++next;
5673  const VkDeviceSize requiredEndMargin =
5674  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5675 
5676  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5677  // Another early return check.
5678  if(suballocItem->offset + totalSize > m_Size)
5679  {
5680  return false;
5681  }
5682 
5683  // Advance lastSuballocItem until desired size is reached.
5684  // Update itemsToMakeLostCount.
5685  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5686  if(totalSize > suballocItem->size)
5687  {
5688  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5689  while(remainingSize > 0)
5690  {
5691  ++lastSuballocItem;
5692  if(lastSuballocItem == m_Suballocations.cend())
5693  {
5694  return false;
5695  }
5696  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5697  {
5698  *pSumFreeSize += lastSuballocItem->size;
5699  }
5700  else
5701  {
5702  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5703  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5704  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5705  {
5706  ++*itemsToMakeLostCount;
5707  *pSumItemSize += lastSuballocItem->size;
5708  }
5709  else
5710  {
5711  return false;
5712  }
5713  }
5714  remainingSize = (lastSuballocItem->size < remainingSize) ?
5715  remainingSize - lastSuballocItem->size : 0;
5716  }
5717  }
5718 
5719  // Check next suballocations for BufferImageGranularity conflicts.
5720  // If conflict exists, we must mark more allocations lost or fail.
5721  if(bufferImageGranularity > 1)
5722  {
5723  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5724  ++nextSuballocItem;
5725  while(nextSuballocItem != m_Suballocations.cend())
5726  {
5727  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5728  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5729  {
5730  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5731  {
5732  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5733  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5734  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5735  {
5736  ++*itemsToMakeLostCount;
5737  }
5738  else
5739  {
5740  return false;
5741  }
5742  }
5743  }
5744  else
5745  {
5746  // Already on next page.
5747  break;
5748  }
5749  ++nextSuballocItem;
5750  }
5751  }
5752  }
5753  else
5754  {
5755  const VmaSuballocation& suballoc = *suballocItem;
5756  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5757 
5758  *pSumFreeSize = suballoc.size;
5759 
5760  // Size of this suballocation is too small for this request: Early return.
5761  if(suballoc.size < allocSize)
5762  {
5763  return false;
5764  }
5765 
5766  // Start from offset equal to beginning of this suballocation.
5767  *pOffset = suballoc.offset;
5768 
5769  // Apply VMA_DEBUG_MARGIN at the beginning.
5770  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5771  {
5772  *pOffset += VMA_DEBUG_MARGIN;
5773  }
5774 
5775  // Apply alignment.
5776  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5777  *pOffset = VmaAlignUp(*pOffset, alignment);
5778 
5779  // Check previous suballocations for BufferImageGranularity conflicts.
5780  // Make bigger alignment if necessary.
5781  if(bufferImageGranularity > 1)
5782  {
5783  bool bufferImageGranularityConflict = false;
5784  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5785  while(prevSuballocItem != m_Suballocations.cbegin())
5786  {
5787  --prevSuballocItem;
5788  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5789  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5790  {
5791  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5792  {
5793  bufferImageGranularityConflict = true;
5794  break;
5795  }
5796  }
5797  else
5798  // Already on previous page.
5799  break;
5800  }
5801  if(bufferImageGranularityConflict)
5802  {
5803  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5804  }
5805  }
5806 
5807  // Calculate padding at the beginning based on current offset.
5808  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5809 
5810  // Calculate required margin at the end if this is not last suballocation.
5811  VmaSuballocationList::const_iterator next = suballocItem;
5812  ++next;
5813  const VkDeviceSize requiredEndMargin =
5814  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5815 
5816  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5817  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5818  {
5819  return false;
5820  }
5821 
5822  // Check next suballocations for BufferImageGranularity conflicts.
5823  // If conflict exists, allocation cannot be made here.
5824  if(bufferImageGranularity > 1)
5825  {
5826  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5827  ++nextSuballocItem;
5828  while(nextSuballocItem != m_Suballocations.cend())
5829  {
5830  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5831  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5832  {
5833  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5834  {
5835  return false;
5836  }
5837  }
5838  else
5839  {
5840  // Already on next page.
5841  break;
5842  }
5843  ++nextSuballocItem;
5844  }
5845  }
5846  }
5847 
5848  // All tests passed: Success. pOffset is already filled.
5849  return true;
5850 }
5851 
5852 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5853 {
5854  VMA_ASSERT(item != m_Suballocations.end());
5855  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5856 
5857  VmaSuballocationList::iterator nextItem = item;
5858  ++nextItem;
5859  VMA_ASSERT(nextItem != m_Suballocations.end());
5860  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5861 
5862  item->size += nextItem->size;
5863  --m_FreeCount;
5864  m_Suballocations.erase(nextItem);
5865 }
5866 
5867 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5868 {
5869  // Change this suballocation to be marked as free.
5870  VmaSuballocation& suballoc = *suballocItem;
5871  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5872  suballoc.hAllocation = VK_NULL_HANDLE;
5873 
5874  // Update totals.
5875  ++m_FreeCount;
5876  m_SumFreeSize += suballoc.size;
5877 
5878  // Merge with previous and/or next suballocation if it's also free.
5879  bool mergeWithNext = false;
5880  bool mergeWithPrev = false;
5881 
5882  VmaSuballocationList::iterator nextItem = suballocItem;
5883  ++nextItem;
5884  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5885  {
5886  mergeWithNext = true;
5887  }
5888 
5889  VmaSuballocationList::iterator prevItem = suballocItem;
5890  if(suballocItem != m_Suballocations.begin())
5891  {
5892  --prevItem;
5893  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5894  {
5895  mergeWithPrev = true;
5896  }
5897  }
5898 
5899  if(mergeWithNext)
5900  {
5901  UnregisterFreeSuballocation(nextItem);
5902  MergeFreeWithNext(suballocItem);
5903  }
5904 
5905  if(mergeWithPrev)
5906  {
5907  UnregisterFreeSuballocation(prevItem);
5908  MergeFreeWithNext(prevItem);
5909  RegisterFreeSuballocation(prevItem);
5910  return prevItem;
5911  }
5912  else
5913  {
5914  RegisterFreeSuballocation(suballocItem);
5915  return suballocItem;
5916  }
5917 }
5918 
5919 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5920 {
5921  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5922  VMA_ASSERT(item->size > 0);
5923 
5924  // You may want to enable this validation at the beginning or at the end of
5925  // this function, depending on what do you want to check.
5926  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5927 
5928  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5929  {
5930  if(m_FreeSuballocationsBySize.empty())
5931  {
5932  m_FreeSuballocationsBySize.push_back(item);
5933  }
5934  else
5935  {
5936  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5937  }
5938  }
5939 
5940  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5941 }
5942 
5943 
5944 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5945 {
5946  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5947  VMA_ASSERT(item->size > 0);
5948 
5949  // You may want to enable this validation at the beginning or at the end of
5950  // this function, depending on what do you want to check.
5951  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5952 
5953  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5954  {
5955  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5956  m_FreeSuballocationsBySize.data(),
5957  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5958  item,
5959  VmaSuballocationItemSizeLess());
5960  for(size_t index = it - m_FreeSuballocationsBySize.data();
5961  index < m_FreeSuballocationsBySize.size();
5962  ++index)
5963  {
5964  if(m_FreeSuballocationsBySize[index] == item)
5965  {
5966  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5967  return;
5968  }
5969  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5970  }
5971  VMA_ASSERT(0 && "Not found.");
5972  }
5973 
5974  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5975 }
5976 
5978 // class VmaDeviceMemoryMapping
5979 
5980 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5981  m_MapCount(0),
5982  m_pMappedData(VMA_NULL)
5983 {
5984 }
5985 
5986 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5987 {
5988  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5989 }
5990 
5991 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
5992 {
5993  if(count == 0)
5994  {
5995  return VK_SUCCESS;
5996  }
5997 
5998  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5999  if(m_MapCount != 0)
6000  {
6001  m_MapCount += count;
6002  VMA_ASSERT(m_pMappedData != VMA_NULL);
6003  if(ppData != VMA_NULL)
6004  {
6005  *ppData = m_pMappedData;
6006  }
6007  return VK_SUCCESS;
6008  }
6009  else
6010  {
6011  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6012  hAllocator->m_hDevice,
6013  hMemory,
6014  0, // offset
6015  VK_WHOLE_SIZE,
6016  0, // flags
6017  &m_pMappedData);
6018  if(result == VK_SUCCESS)
6019  {
6020  if(ppData != VMA_NULL)
6021  {
6022  *ppData = m_pMappedData;
6023  }
6024  m_MapCount = count;
6025  }
6026  return result;
6027  }
6028 }
6029 
6030 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6031 {
6032  if(count == 0)
6033  {
6034  return;
6035  }
6036 
6037  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6038  if(m_MapCount >= count)
6039  {
6040  m_MapCount -= count;
6041  if(m_MapCount == 0)
6042  {
6043  m_pMappedData = VMA_NULL;
6044  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6045  }
6046  }
6047  else
6048  {
6049  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6050  }
6051 }
6052 
6054 // class VmaDeviceMemoryBlock
6055 
6056 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6057  m_MemoryTypeIndex(UINT32_MAX),
6058  m_hMemory(VK_NULL_HANDLE),
6059  m_Metadata(hAllocator)
6060 {
6061 }
6062 
6063 void VmaDeviceMemoryBlock::Init(
6064  uint32_t newMemoryTypeIndex,
6065  VkDeviceMemory newMemory,
6066  VkDeviceSize newSize)
6067 {
6068  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6069 
6070  m_MemoryTypeIndex = newMemoryTypeIndex;
6071  m_hMemory = newMemory;
6072 
6073  m_Metadata.Init(newSize);
6074 }
6075 
6076 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6077 {
6078  // This is the most important assert in the entire library.
6079  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6080  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6081 
6082  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6083  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6084  m_hMemory = VK_NULL_HANDLE;
6085 }
6086 
6087 bool VmaDeviceMemoryBlock::Validate() const
6088 {
6089  if((m_hMemory == VK_NULL_HANDLE) ||
6090  (m_Metadata.GetSize() == 0))
6091  {
6092  return false;
6093  }
6094 
6095  return m_Metadata.Validate();
6096 }
6097 
6098 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6099 {
6100  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6101 }
6102 
6103 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6104 {
6105  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6106 }
6107 
6108 static void InitStatInfo(VmaStatInfo& outInfo)
6109 {
6110  memset(&outInfo, 0, sizeof(outInfo));
6111  outInfo.allocationSizeMin = UINT64_MAX;
6112  outInfo.unusedRangeSizeMin = UINT64_MAX;
6113 }
6114 
6115 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6116 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6117 {
6118  inoutInfo.blockCount += srcInfo.blockCount;
6119  inoutInfo.allocationCount += srcInfo.allocationCount;
6120  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6121  inoutInfo.usedBytes += srcInfo.usedBytes;
6122  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6123  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6124  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6125  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6126  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6127 }
6128 
6129 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6130 {
6131  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6132  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6133  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6134  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6135 }
6136 
6137 VmaPool_T::VmaPool_T(
6138  VmaAllocator hAllocator,
6139  const VmaPoolCreateInfo& createInfo) :
6140  m_BlockVector(
6141  hAllocator,
6142  createInfo.memoryTypeIndex,
6143  createInfo.blockSize,
6144  createInfo.minBlockCount,
6145  createInfo.maxBlockCount,
6146  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6147  createInfo.frameInUseCount,
6148  true) // isCustomPool
6149 {
6150 }
6151 
6152 VmaPool_T::~VmaPool_T()
6153 {
6154 }
6155 
6156 #if VMA_STATS_STRING_ENABLED
6157 
6158 #endif // #if VMA_STATS_STRING_ENABLED
6159 
6160 VmaBlockVector::VmaBlockVector(
6161  VmaAllocator hAllocator,
6162  uint32_t memoryTypeIndex,
6163  VkDeviceSize preferredBlockSize,
6164  size_t minBlockCount,
6165  size_t maxBlockCount,
6166  VkDeviceSize bufferImageGranularity,
6167  uint32_t frameInUseCount,
6168  bool isCustomPool) :
6169  m_hAllocator(hAllocator),
6170  m_MemoryTypeIndex(memoryTypeIndex),
6171  m_PreferredBlockSize(preferredBlockSize),
6172  m_MinBlockCount(minBlockCount),
6173  m_MaxBlockCount(maxBlockCount),
6174  m_BufferImageGranularity(bufferImageGranularity),
6175  m_FrameInUseCount(frameInUseCount),
6176  m_IsCustomPool(isCustomPool),
6177  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6178  m_HasEmptyBlock(false),
6179  m_pDefragmentator(VMA_NULL)
6180 {
6181 }
6182 
6183 VmaBlockVector::~VmaBlockVector()
6184 {
6185  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6186 
6187  for(size_t i = m_Blocks.size(); i--; )
6188  {
6189  m_Blocks[i]->Destroy(m_hAllocator);
6190  vma_delete(m_hAllocator, m_Blocks[i]);
6191  }
6192 }
6193 
6194 VkResult VmaBlockVector::CreateMinBlocks()
6195 {
6196  for(size_t i = 0; i < m_MinBlockCount; ++i)
6197  {
6198  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6199  if(res != VK_SUCCESS)
6200  {
6201  return res;
6202  }
6203  }
6204  return VK_SUCCESS;
6205 }
6206 
6207 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6208 {
6209  pStats->size = 0;
6210  pStats->unusedSize = 0;
6211  pStats->allocationCount = 0;
6212  pStats->unusedRangeCount = 0;
6213  pStats->unusedRangeSizeMax = 0;
6214 
6215  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6216 
6217  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6218  {
6219  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6220  VMA_ASSERT(pBlock);
6221  VMA_HEAVY_ASSERT(pBlock->Validate());
6222  pBlock->m_Metadata.AddPoolStats(*pStats);
6223  }
6224 }
6225 
6226 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6227 
6228 VkResult VmaBlockVector::Allocate(
6229  VmaPool hCurrentPool,
6230  uint32_t currentFrameIndex,
6231  const VkMemoryRequirements& vkMemReq,
6232  const VmaAllocationCreateInfo& createInfo,
6233  VmaSuballocationType suballocType,
6234  VmaAllocation* pAllocation)
6235 {
6236  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6237  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6238 
6239  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6240 
6241  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6242  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6243  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6244  {
6245  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6246  VMA_ASSERT(pCurrBlock);
6247  VmaAllocationRequest currRequest = {};
6248  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6249  currentFrameIndex,
6250  m_FrameInUseCount,
6251  m_BufferImageGranularity,
6252  vkMemReq.size,
6253  vkMemReq.alignment,
6254  suballocType,
6255  false, // canMakeOtherLost
6256  &currRequest))
6257  {
6258  // Allocate from pCurrBlock.
6259  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6260 
6261  if(mapped)
6262  {
6263  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6264  if(res != VK_SUCCESS)
6265  {
6266  return res;
6267  }
6268  }
6269 
6270  // We no longer have an empty Allocation.
6271  if(pCurrBlock->m_Metadata.IsEmpty())
6272  {
6273  m_HasEmptyBlock = false;
6274  }
6275 
6276  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6277  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6278  (*pAllocation)->InitBlockAllocation(
6279  hCurrentPool,
6280  pCurrBlock,
6281  currRequest.offset,
6282  vkMemReq.alignment,
6283  vkMemReq.size,
6284  suballocType,
6285  mapped,
6286  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6287  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6288  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6289  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6290  return VK_SUCCESS;
6291  }
6292  }
6293 
6294  const bool canCreateNewBlock =
6295  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6296  (m_Blocks.size() < m_MaxBlockCount);
6297 
6298  // 2. Try to create new block.
6299  if(canCreateNewBlock)
6300  {
6301  // Calculate optimal size for new block.
6302  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6303  uint32_t newBlockSizeShift = 0;
6304  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6305 
6306  // Allocating blocks of other sizes is allowed only in default pools.
6307  // In custom pools block size is fixed.
6308  if(m_IsCustomPool == false)
6309  {
6310  // Allocate 1/8, 1/4, 1/2 as first blocks.
6311  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6312  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6313  {
6314  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6315  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6316  {
6317  newBlockSize = smallerNewBlockSize;
6318  ++newBlockSizeShift;
6319  }
6320  else
6321  {
6322  break;
6323  }
6324  }
6325  }
6326 
6327  size_t newBlockIndex = 0;
6328  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6329  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6330  if(m_IsCustomPool == false)
6331  {
6332  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6333  {
6334  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6335  if(smallerNewBlockSize >= vkMemReq.size)
6336  {
6337  newBlockSize = smallerNewBlockSize;
6338  ++newBlockSizeShift;
6339  res = CreateBlock(newBlockSize, &newBlockIndex);
6340  }
6341  else
6342  {
6343  break;
6344  }
6345  }
6346  }
6347 
6348  if(res == VK_SUCCESS)
6349  {
6350  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6351  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6352 
6353  if(mapped)
6354  {
6355  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6356  if(res != VK_SUCCESS)
6357  {
6358  return res;
6359  }
6360  }
6361 
6362  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6363  VmaAllocationRequest allocRequest;
6364  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6365  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6366  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6367  (*pAllocation)->InitBlockAllocation(
6368  hCurrentPool,
6369  pBlock,
6370  allocRequest.offset,
6371  vkMemReq.alignment,
6372  vkMemReq.size,
6373  suballocType,
6374  mapped,
6375  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6376  VMA_HEAVY_ASSERT(pBlock->Validate());
6377  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6378  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6379  return VK_SUCCESS;
6380  }
6381  }
6382 
6383  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6384 
6385  // 3. Try to allocate from existing blocks with making other allocations lost.
6386  if(canMakeOtherLost)
6387  {
6388  uint32_t tryIndex = 0;
6389  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6390  {
6391  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6392  VmaAllocationRequest bestRequest = {};
6393  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6394 
6395  // 1. Search existing allocations.
6396  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6397  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6398  {
6399  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6400  VMA_ASSERT(pCurrBlock);
6401  VmaAllocationRequest currRequest = {};
6402  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6403  currentFrameIndex,
6404  m_FrameInUseCount,
6405  m_BufferImageGranularity,
6406  vkMemReq.size,
6407  vkMemReq.alignment,
6408  suballocType,
6409  canMakeOtherLost,
6410  &currRequest))
6411  {
6412  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6413  if(pBestRequestBlock == VMA_NULL ||
6414  currRequestCost < bestRequestCost)
6415  {
6416  pBestRequestBlock = pCurrBlock;
6417  bestRequest = currRequest;
6418  bestRequestCost = currRequestCost;
6419 
6420  if(bestRequestCost == 0)
6421  {
6422  break;
6423  }
6424  }
6425  }
6426  }
6427 
6428  if(pBestRequestBlock != VMA_NULL)
6429  {
6430  if(mapped)
6431  {
6432  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6433  if(res != VK_SUCCESS)
6434  {
6435  return res;
6436  }
6437  }
6438 
6439  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6440  currentFrameIndex,
6441  m_FrameInUseCount,
6442  &bestRequest))
6443  {
6444  // We no longer have an empty Allocation.
6445  if(pBestRequestBlock->m_Metadata.IsEmpty())
6446  {
6447  m_HasEmptyBlock = false;
6448  }
6449  // Allocate from this pBlock.
6450  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6451  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6452  (*pAllocation)->InitBlockAllocation(
6453  hCurrentPool,
6454  pBestRequestBlock,
6455  bestRequest.offset,
6456  vkMemReq.alignment,
6457  vkMemReq.size,
6458  suballocType,
6459  mapped,
6460  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6461  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6462  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6463  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6464  return VK_SUCCESS;
6465  }
6466  // else: Some allocations must have been touched while we are here. Next try.
6467  }
6468  else
6469  {
6470  // Could not find place in any of the blocks - break outer loop.
6471  break;
6472  }
6473  }
6474  /* Maximum number of tries exceeded - a very unlike event when many other
6475  threads are simultaneously touching allocations making it impossible to make
6476  lost at the same time as we try to allocate. */
6477  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6478  {
6479  return VK_ERROR_TOO_MANY_OBJECTS;
6480  }
6481  }
6482 
6483  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6484 }
6485 
6486 void VmaBlockVector::Free(
6487  VmaAllocation hAllocation)
6488 {
6489  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6490 
6491  // Scope for lock.
6492  {
6493  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6494 
6495  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6496 
6497  if(hAllocation->IsPersistentMap())
6498  {
6499  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6500  }
6501 
6502  pBlock->m_Metadata.Free(hAllocation);
6503  VMA_HEAVY_ASSERT(pBlock->Validate());
6504 
6505  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6506 
6507  // pBlock became empty after this deallocation.
6508  if(pBlock->m_Metadata.IsEmpty())
6509  {
6510  // Already has empty Allocation. We don't want to have two, so delete this one.
6511  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6512  {
6513  pBlockToDelete = pBlock;
6514  Remove(pBlock);
6515  }
6516  // We now have first empty Allocation.
6517  else
6518  {
6519  m_HasEmptyBlock = true;
6520  }
6521  }
6522  // pBlock didn't become empty, but we have another empty block - find and free that one.
6523  // (This is optional, heuristics.)
6524  else if(m_HasEmptyBlock)
6525  {
6526  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6527  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6528  {
6529  pBlockToDelete = pLastBlock;
6530  m_Blocks.pop_back();
6531  m_HasEmptyBlock = false;
6532  }
6533  }
6534 
6535  IncrementallySortBlocks();
6536  }
6537 
6538  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6539  // lock, for performance reason.
6540  if(pBlockToDelete != VMA_NULL)
6541  {
6542  VMA_DEBUG_LOG(" Deleted empty allocation");
6543  pBlockToDelete->Destroy(m_hAllocator);
6544  vma_delete(m_hAllocator, pBlockToDelete);
6545  }
6546 }
6547 
6548 size_t VmaBlockVector::CalcMaxBlockSize() const
6549 {
6550  size_t result = 0;
6551  for(size_t i = m_Blocks.size(); i--; )
6552  {
6553  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6554  if(result >= m_PreferredBlockSize)
6555  {
6556  break;
6557  }
6558  }
6559  return result;
6560 }
6561 
6562 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6563 {
6564  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6565  {
6566  if(m_Blocks[blockIndex] == pBlock)
6567  {
6568  VmaVectorRemove(m_Blocks, blockIndex);
6569  return;
6570  }
6571  }
6572  VMA_ASSERT(0);
6573 }
6574 
6575 void VmaBlockVector::IncrementallySortBlocks()
6576 {
6577  // Bubble sort only until first swap.
6578  for(size_t i = 1; i < m_Blocks.size(); ++i)
6579  {
6580  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6581  {
6582  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6583  return;
6584  }
6585  }
6586 }
6587 
6588 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6589 {
6590  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6591  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6592  allocInfo.allocationSize = blockSize;
6593  VkDeviceMemory mem = VK_NULL_HANDLE;
6594  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6595  if(res < 0)
6596  {
6597  return res;
6598  }
6599 
6600  // New VkDeviceMemory successfully created.
6601 
6602  // Create new Allocation for it.
6603  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6604  pBlock->Init(
6605  m_MemoryTypeIndex,
6606  mem,
6607  allocInfo.allocationSize);
6608 
6609  m_Blocks.push_back(pBlock);
6610  if(pNewBlockIndex != VMA_NULL)
6611  {
6612  *pNewBlockIndex = m_Blocks.size() - 1;
6613  }
6614 
6615  return VK_SUCCESS;
6616 }
6617 
6618 #if VMA_STATS_STRING_ENABLED
6619 
6620 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6621 {
6622  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6623 
6624  json.BeginObject();
6625 
6626  if(m_IsCustomPool)
6627  {
6628  json.WriteString("MemoryTypeIndex");
6629  json.WriteNumber(m_MemoryTypeIndex);
6630 
6631  json.WriteString("BlockSize");
6632  json.WriteNumber(m_PreferredBlockSize);
6633 
6634  json.WriteString("BlockCount");
6635  json.BeginObject(true);
6636  if(m_MinBlockCount > 0)
6637  {
6638  json.WriteString("Min");
6639  json.WriteNumber((uint64_t)m_MinBlockCount);
6640  }
6641  if(m_MaxBlockCount < SIZE_MAX)
6642  {
6643  json.WriteString("Max");
6644  json.WriteNumber((uint64_t)m_MaxBlockCount);
6645  }
6646  json.WriteString("Cur");
6647  json.WriteNumber((uint64_t)m_Blocks.size());
6648  json.EndObject();
6649 
6650  if(m_FrameInUseCount > 0)
6651  {
6652  json.WriteString("FrameInUseCount");
6653  json.WriteNumber(m_FrameInUseCount);
6654  }
6655  }
6656  else
6657  {
6658  json.WriteString("PreferredBlockSize");
6659  json.WriteNumber(m_PreferredBlockSize);
6660  }
6661 
6662  json.WriteString("Blocks");
6663  json.BeginArray();
6664  for(size_t i = 0; i < m_Blocks.size(); ++i)
6665  {
6666  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6667  }
6668  json.EndArray();
6669 
6670  json.EndObject();
6671 }
6672 
6673 #endif // #if VMA_STATS_STRING_ENABLED
6674 
6675 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6676  VmaAllocator hAllocator,
6677  uint32_t currentFrameIndex)
6678 {
6679  if(m_pDefragmentator == VMA_NULL)
6680  {
6681  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6682  hAllocator,
6683  this,
6684  currentFrameIndex);
6685  }
6686 
6687  return m_pDefragmentator;
6688 }
6689 
6690 VkResult VmaBlockVector::Defragment(
6691  VmaDefragmentationStats* pDefragmentationStats,
6692  VkDeviceSize& maxBytesToMove,
6693  uint32_t& maxAllocationsToMove)
6694 {
6695  if(m_pDefragmentator == VMA_NULL)
6696  {
6697  return VK_SUCCESS;
6698  }
6699 
6700  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6701 
6702  // Defragment.
6703  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6704 
6705  // Accumulate statistics.
6706  if(pDefragmentationStats != VMA_NULL)
6707  {
6708  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6709  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6710  pDefragmentationStats->bytesMoved += bytesMoved;
6711  pDefragmentationStats->allocationsMoved += allocationsMoved;
6712  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6713  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6714  maxBytesToMove -= bytesMoved;
6715  maxAllocationsToMove -= allocationsMoved;
6716  }
6717 
6718  // Free empty blocks.
6719  m_HasEmptyBlock = false;
6720  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6721  {
6722  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6723  if(pBlock->m_Metadata.IsEmpty())
6724  {
6725  if(m_Blocks.size() > m_MinBlockCount)
6726  {
6727  if(pDefragmentationStats != VMA_NULL)
6728  {
6729  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6730  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6731  }
6732 
6733  VmaVectorRemove(m_Blocks, blockIndex);
6734  pBlock->Destroy(m_hAllocator);
6735  vma_delete(m_hAllocator, pBlock);
6736  }
6737  else
6738  {
6739  m_HasEmptyBlock = true;
6740  }
6741  }
6742  }
6743 
6744  return result;
6745 }
6746 
6747 void VmaBlockVector::DestroyDefragmentator()
6748 {
6749  if(m_pDefragmentator != VMA_NULL)
6750  {
6751  vma_delete(m_hAllocator, m_pDefragmentator);
6752  m_pDefragmentator = VMA_NULL;
6753  }
6754 }
6755 
6756 void VmaBlockVector::MakePoolAllocationsLost(
6757  uint32_t currentFrameIndex,
6758  size_t* pLostAllocationCount)
6759 {
6760  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6761  size_t lostAllocationCount = 0;
6762  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6763  {
6764  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6765  VMA_ASSERT(pBlock);
6766  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6767  }
6768  if(pLostAllocationCount != VMA_NULL)
6769  {
6770  *pLostAllocationCount = lostAllocationCount;
6771  }
6772 }
6773 
6774 void VmaBlockVector::AddStats(VmaStats* pStats)
6775 {
6776  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6777  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6778 
6779  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6780 
6781  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6782  {
6783  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6784  VMA_ASSERT(pBlock);
6785  VMA_HEAVY_ASSERT(pBlock->Validate());
6786  VmaStatInfo allocationStatInfo;
6787  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6788  VmaAddStatInfo(pStats->total, allocationStatInfo);
6789  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6790  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6791  }
6792 }
6793 
6795 // VmaDefragmentator members definition
6796 
6797 VmaDefragmentator::VmaDefragmentator(
6798  VmaAllocator hAllocator,
6799  VmaBlockVector* pBlockVector,
6800  uint32_t currentFrameIndex) :
6801  m_hAllocator(hAllocator),
6802  m_pBlockVector(pBlockVector),
6803  m_CurrentFrameIndex(currentFrameIndex),
6804  m_BytesMoved(0),
6805  m_AllocationsMoved(0),
6806  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6807  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6808 {
6809 }
6810 
6811 VmaDefragmentator::~VmaDefragmentator()
6812 {
6813  for(size_t i = m_Blocks.size(); i--; )
6814  {
6815  vma_delete(m_hAllocator, m_Blocks[i]);
6816  }
6817 }
6818 
6819 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6820 {
6821  AllocationInfo allocInfo;
6822  allocInfo.m_hAllocation = hAlloc;
6823  allocInfo.m_pChanged = pChanged;
6824  m_Allocations.push_back(allocInfo);
6825 }
6826 
6827 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6828 {
6829  // It has already been mapped for defragmentation.
6830  if(m_pMappedDataForDefragmentation)
6831  {
6832  *ppMappedData = m_pMappedDataForDefragmentation;
6833  return VK_SUCCESS;
6834  }
6835 
6836  // It is originally mapped.
6837  if(m_pBlock->m_Mapping.GetMappedData())
6838  {
6839  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6840  return VK_SUCCESS;
6841  }
6842 
6843  // Map on first usage.
6844  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6845  *ppMappedData = m_pMappedDataForDefragmentation;
6846  return res;
6847 }
6848 
6849 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6850 {
6851  if(m_pMappedDataForDefragmentation != VMA_NULL)
6852  {
6853  m_pBlock->Unmap(hAllocator, 1);
6854  }
6855 }
6856 
6857 VkResult VmaDefragmentator::DefragmentRound(
6858  VkDeviceSize maxBytesToMove,
6859  uint32_t maxAllocationsToMove)
6860 {
6861  if(m_Blocks.empty())
6862  {
6863  return VK_SUCCESS;
6864  }
6865 
6866  size_t srcBlockIndex = m_Blocks.size() - 1;
6867  size_t srcAllocIndex = SIZE_MAX;
6868  for(;;)
6869  {
6870  // 1. Find next allocation to move.
6871  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6872  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6873  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6874  {
6875  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6876  {
6877  // Finished: no more allocations to process.
6878  if(srcBlockIndex == 0)
6879  {
6880  return VK_SUCCESS;
6881  }
6882  else
6883  {
6884  --srcBlockIndex;
6885  srcAllocIndex = SIZE_MAX;
6886  }
6887  }
6888  else
6889  {
6890  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6891  }
6892  }
6893 
6894  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6895  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6896 
6897  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6898  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6899  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6900  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6901 
6902  // 2. Try to find new place for this allocation in preceding or current block.
6903  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6904  {
6905  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6906  VmaAllocationRequest dstAllocRequest;
6907  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6908  m_CurrentFrameIndex,
6909  m_pBlockVector->GetFrameInUseCount(),
6910  m_pBlockVector->GetBufferImageGranularity(),
6911  size,
6912  alignment,
6913  suballocType,
6914  false, // canMakeOtherLost
6915  &dstAllocRequest) &&
6916  MoveMakesSense(
6917  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6918  {
6919  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6920 
6921  // Reached limit on number of allocations or bytes to move.
6922  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6923  (m_BytesMoved + size > maxBytesToMove))
6924  {
6925  return VK_INCOMPLETE;
6926  }
6927 
6928  void* pDstMappedData = VMA_NULL;
6929  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6930  if(res != VK_SUCCESS)
6931  {
6932  return res;
6933  }
6934 
6935  void* pSrcMappedData = VMA_NULL;
6936  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6937  if(res != VK_SUCCESS)
6938  {
6939  return res;
6940  }
6941 
6942  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6943  memcpy(
6944  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6945  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6946  static_cast<size_t>(size));
6947 
6948  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6949  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6950 
6951  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6952 
6953  if(allocInfo.m_pChanged != VMA_NULL)
6954  {
6955  *allocInfo.m_pChanged = VK_TRUE;
6956  }
6957 
6958  ++m_AllocationsMoved;
6959  m_BytesMoved += size;
6960 
6961  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6962 
6963  break;
6964  }
6965  }
6966 
6967  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6968 
6969  if(srcAllocIndex > 0)
6970  {
6971  --srcAllocIndex;
6972  }
6973  else
6974  {
6975  if(srcBlockIndex > 0)
6976  {
6977  --srcBlockIndex;
6978  srcAllocIndex = SIZE_MAX;
6979  }
6980  else
6981  {
6982  return VK_SUCCESS;
6983  }
6984  }
6985  }
6986 }
6987 
6988 VkResult VmaDefragmentator::Defragment(
6989  VkDeviceSize maxBytesToMove,
6990  uint32_t maxAllocationsToMove)
6991 {
6992  if(m_Allocations.empty())
6993  {
6994  return VK_SUCCESS;
6995  }
6996 
6997  // Create block info for each block.
6998  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6999  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7000  {
7001  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7002  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7003  m_Blocks.push_back(pBlockInfo);
7004  }
7005 
7006  // Sort them by m_pBlock pointer value.
7007  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7008 
7009  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7010  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7011  {
7012  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7013  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7014  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7015  {
7016  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7017  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7018  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7019  {
7020  (*it)->m_Allocations.push_back(allocInfo);
7021  }
7022  else
7023  {
7024  VMA_ASSERT(0);
7025  }
7026  }
7027  }
7028  m_Allocations.clear();
7029 
7030  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7031  {
7032  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7033  pBlockInfo->CalcHasNonMovableAllocations();
7034  pBlockInfo->SortAllocationsBySizeDescecnding();
7035  }
7036 
7037  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7038  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7039 
7040  // Execute defragmentation rounds (the main part).
7041  VkResult result = VK_SUCCESS;
7042  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7043  {
7044  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7045  }
7046 
7047  // Unmap blocks that were mapped for defragmentation.
7048  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7049  {
7050  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7051  }
7052 
7053  return result;
7054 }
7055 
7056 bool VmaDefragmentator::MoveMakesSense(
7057  size_t dstBlockIndex, VkDeviceSize dstOffset,
7058  size_t srcBlockIndex, VkDeviceSize srcOffset)
7059 {
7060  if(dstBlockIndex < srcBlockIndex)
7061  {
7062  return true;
7063  }
7064  if(dstBlockIndex > srcBlockIndex)
7065  {
7066  return false;
7067  }
7068  if(dstOffset < srcOffset)
7069  {
7070  return true;
7071  }
7072  return false;
7073 }
7074 
7076 // VmaAllocator_T
7077 
7078 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7079  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7080  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7081  m_hDevice(pCreateInfo->device),
7082  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7083  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7084  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7085  m_PreferredLargeHeapBlockSize(0),
7086  m_PhysicalDevice(pCreateInfo->physicalDevice),
7087  m_CurrentFrameIndex(0),
7088  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7089 {
7090  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7091 
7092  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7093  memset(&m_MemProps, 0, sizeof(m_MemProps));
7094  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7095 
7096  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7097  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7098 
7099  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7100  {
7101  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7102  }
7103 
7104  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7105  {
7106  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7107  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7108  }
7109 
7110  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7111 
7112  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7113  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7114 
7115  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7116  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7117 
7118  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7119  {
7120  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7121  {
7122  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7123  if(limit != VK_WHOLE_SIZE)
7124  {
7125  m_HeapSizeLimit[heapIndex] = limit;
7126  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7127  {
7128  m_MemProps.memoryHeaps[heapIndex].size = limit;
7129  }
7130  }
7131  }
7132  }
7133 
7134  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7135  {
7136  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7137 
7138  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7139  this,
7140  memTypeIndex,
7141  preferredBlockSize,
7142  0,
7143  SIZE_MAX,
7144  GetBufferImageGranularity(),
7145  pCreateInfo->frameInUseCount,
7146  false); // isCustomPool
7147  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7148  // becase minBlockCount is 0.
7149  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7150  }
7151 }
7152 
7153 VmaAllocator_T::~VmaAllocator_T()
7154 {
7155  VMA_ASSERT(m_Pools.empty());
7156 
7157  for(size_t i = GetMemoryTypeCount(); i--; )
7158  {
7159  vma_delete(this, m_pDedicatedAllocations[i]);
7160  vma_delete(this, m_pBlockVectors[i]);
7161  }
7162 }
7163 
7164 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7165 {
7166 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7167  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7168  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7169  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7170  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7171  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7172  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7173  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7174  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7175  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7176  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7177  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7178  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7179  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7180  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7181  if(m_UseKhrDedicatedAllocation)
7182  {
7183  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7184  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7185  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7186  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7187  }
7188 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7189 
7190 #define VMA_COPY_IF_NOT_NULL(funcName) \
7191  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7192 
7193  if(pVulkanFunctions != VMA_NULL)
7194  {
7195  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7196  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7197  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7198  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7199  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7200  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7201  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7202  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7203  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7204  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7205  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7206  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7207  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7208  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7209  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7210  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7211  }
7212 
7213 #undef VMA_COPY_IF_NOT_NULL
7214 
7215  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7216  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7217  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7219  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7220  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7221  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7222  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7223  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7224  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7225  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7226  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7227  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7228  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7229  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7230  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7231  if(m_UseKhrDedicatedAllocation)
7232  {
7233  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7234  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7235  }
7236 }
7237 
7238 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7239 {
7240  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7241  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7242  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7243  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7244 }
7245 
7246 VkResult VmaAllocator_T::AllocateMemoryOfType(
7247  const VkMemoryRequirements& vkMemReq,
7248  bool dedicatedAllocation,
7249  VkBuffer dedicatedBuffer,
7250  VkImage dedicatedImage,
7251  const VmaAllocationCreateInfo& createInfo,
7252  uint32_t memTypeIndex,
7253  VmaSuballocationType suballocType,
7254  VmaAllocation* pAllocation)
7255 {
7256  VMA_ASSERT(pAllocation != VMA_NULL);
7257  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7258 
7259  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7260 
7261  // If memory type is not HOST_VISIBLE, disable MAPPED.
7262  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7263  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7264  {
7265  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7266  }
7267 
7268  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7269  VMA_ASSERT(blockVector);
7270 
7271  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7272  bool preferDedicatedMemory =
7273  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7274  dedicatedAllocation ||
7275  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7276  vkMemReq.size > preferredBlockSize / 2;
7277 
7278  if(preferDedicatedMemory &&
7279  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7280  finalCreateInfo.pool == VK_NULL_HANDLE)
7281  {
7283  }
7284 
7285  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7286  {
7287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7288  {
7289  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7290  }
7291  else
7292  {
7293  return AllocateDedicatedMemory(
7294  vkMemReq.size,
7295  suballocType,
7296  memTypeIndex,
7297  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7298  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7299  finalCreateInfo.pUserData,
7300  dedicatedBuffer,
7301  dedicatedImage,
7302  pAllocation);
7303  }
7304  }
7305  else
7306  {
7307  VkResult res = blockVector->Allocate(
7308  VK_NULL_HANDLE, // hCurrentPool
7309  m_CurrentFrameIndex.load(),
7310  vkMemReq,
7311  finalCreateInfo,
7312  suballocType,
7313  pAllocation);
7314  if(res == VK_SUCCESS)
7315  {
7316  return res;
7317  }
7318 
7319  // 5. Try dedicated memory.
7320  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7321  {
7322  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7323  }
7324  else
7325  {
7326  res = AllocateDedicatedMemory(
7327  vkMemReq.size,
7328  suballocType,
7329  memTypeIndex,
7330  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7331  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7332  finalCreateInfo.pUserData,
7333  dedicatedBuffer,
7334  dedicatedImage,
7335  pAllocation);
7336  if(res == VK_SUCCESS)
7337  {
7338  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7339  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7340  return VK_SUCCESS;
7341  }
7342  else
7343  {
7344  // Everything failed: Return error code.
7345  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7346  return res;
7347  }
7348  }
7349  }
7350 }
7351 
7352 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7353  VkDeviceSize size,
7354  VmaSuballocationType suballocType,
7355  uint32_t memTypeIndex,
7356  bool map,
7357  bool isUserDataString,
7358  void* pUserData,
7359  VkBuffer dedicatedBuffer,
7360  VkImage dedicatedImage,
7361  VmaAllocation* pAllocation)
7362 {
7363  VMA_ASSERT(pAllocation);
7364 
7365  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7366  allocInfo.memoryTypeIndex = memTypeIndex;
7367  allocInfo.allocationSize = size;
7368 
7369  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7370  if(m_UseKhrDedicatedAllocation)
7371  {
7372  if(dedicatedBuffer != VK_NULL_HANDLE)
7373  {
7374  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7375  dedicatedAllocInfo.buffer = dedicatedBuffer;
7376  allocInfo.pNext = &dedicatedAllocInfo;
7377  }
7378  else if(dedicatedImage != VK_NULL_HANDLE)
7379  {
7380  dedicatedAllocInfo.image = dedicatedImage;
7381  allocInfo.pNext = &dedicatedAllocInfo;
7382  }
7383  }
7384 
7385  // Allocate VkDeviceMemory.
7386  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7387  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7388  if(res < 0)
7389  {
7390  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7391  return res;
7392  }
7393 
7394  void* pMappedData = VMA_NULL;
7395  if(map)
7396  {
7397  res = (*m_VulkanFunctions.vkMapMemory)(
7398  m_hDevice,
7399  hMemory,
7400  0,
7401  VK_WHOLE_SIZE,
7402  0,
7403  &pMappedData);
7404  if(res < 0)
7405  {
7406  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7407  FreeVulkanMemory(memTypeIndex, size, hMemory);
7408  return res;
7409  }
7410  }
7411 
7412  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7413  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7414  (*pAllocation)->SetUserData(this, pUserData);
7415 
7416  // Register it in m_pDedicatedAllocations.
7417  {
7418  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7419  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7420  VMA_ASSERT(pDedicatedAllocations);
7421  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7422  }
7423 
7424  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7425 
7426  return VK_SUCCESS;
7427 }
7428 
7429 void VmaAllocator_T::GetBufferMemoryRequirements(
7430  VkBuffer hBuffer,
7431  VkMemoryRequirements& memReq,
7432  bool& requiresDedicatedAllocation,
7433  bool& prefersDedicatedAllocation) const
7434 {
7435  if(m_UseKhrDedicatedAllocation)
7436  {
7437  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7438  memReqInfo.buffer = hBuffer;
7439 
7440  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7441 
7442  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7443  memReq2.pNext = &memDedicatedReq;
7444 
7445  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7446 
7447  memReq = memReq2.memoryRequirements;
7448  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7449  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7450  }
7451  else
7452  {
7453  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7454  requiresDedicatedAllocation = false;
7455  prefersDedicatedAllocation = false;
7456  }
7457 }
7458 
7459 void VmaAllocator_T::GetImageMemoryRequirements(
7460  VkImage hImage,
7461  VkMemoryRequirements& memReq,
7462  bool& requiresDedicatedAllocation,
7463  bool& prefersDedicatedAllocation) const
7464 {
7465  if(m_UseKhrDedicatedAllocation)
7466  {
7467  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7468  memReqInfo.image = hImage;
7469 
7470  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7471 
7472  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7473  memReq2.pNext = &memDedicatedReq;
7474 
7475  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7476 
7477  memReq = memReq2.memoryRequirements;
7478  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7479  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7480  }
7481  else
7482  {
7483  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7484  requiresDedicatedAllocation = false;
7485  prefersDedicatedAllocation = false;
7486  }
7487 }
7488 
7489 VkResult VmaAllocator_T::AllocateMemory(
7490  const VkMemoryRequirements& vkMemReq,
7491  bool requiresDedicatedAllocation,
7492  bool prefersDedicatedAllocation,
7493  VkBuffer dedicatedBuffer,
7494  VkImage dedicatedImage,
7495  const VmaAllocationCreateInfo& createInfo,
7496  VmaSuballocationType suballocType,
7497  VmaAllocation* pAllocation)
7498 {
7499  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7500  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7501  {
7502  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7503  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7504  }
7505  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7507  {
7508  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7509  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7510  }
7511  if(requiresDedicatedAllocation)
7512  {
7513  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7514  {
7515  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7516  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7517  }
7518  if(createInfo.pool != VK_NULL_HANDLE)
7519  {
7520  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7521  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7522  }
7523  }
7524  if((createInfo.pool != VK_NULL_HANDLE) &&
7525  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7526  {
7527  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7529  }
7530 
7531  if(createInfo.pool != VK_NULL_HANDLE)
7532  {
7533  return createInfo.pool->m_BlockVector.Allocate(
7534  createInfo.pool,
7535  m_CurrentFrameIndex.load(),
7536  vkMemReq,
7537  createInfo,
7538  suballocType,
7539  pAllocation);
7540  }
7541  else
7542  {
7543  // Bit mask of memory Vulkan types acceptable for this allocation.
7544  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7545  uint32_t memTypeIndex = UINT32_MAX;
7546  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7547  if(res == VK_SUCCESS)
7548  {
7549  res = AllocateMemoryOfType(
7550  vkMemReq,
7551  requiresDedicatedAllocation || prefersDedicatedAllocation,
7552  dedicatedBuffer,
7553  dedicatedImage,
7554  createInfo,
7555  memTypeIndex,
7556  suballocType,
7557  pAllocation);
7558  // Succeeded on first try.
7559  if(res == VK_SUCCESS)
7560  {
7561  return res;
7562  }
7563  // Allocation from this memory type failed. Try other compatible memory types.
7564  else
7565  {
7566  for(;;)
7567  {
7568  // Remove old memTypeIndex from list of possibilities.
7569  memoryTypeBits &= ~(1u << memTypeIndex);
7570  // Find alternative memTypeIndex.
7571  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7572  if(res == VK_SUCCESS)
7573  {
7574  res = AllocateMemoryOfType(
7575  vkMemReq,
7576  requiresDedicatedAllocation || prefersDedicatedAllocation,
7577  dedicatedBuffer,
7578  dedicatedImage,
7579  createInfo,
7580  memTypeIndex,
7581  suballocType,
7582  pAllocation);
7583  // Allocation from this alternative memory type succeeded.
7584  if(res == VK_SUCCESS)
7585  {
7586  return res;
7587  }
7588  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7589  }
7590  // No other matching memory type index could be found.
7591  else
7592  {
7593  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7594  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7595  }
7596  }
7597  }
7598  }
7599  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7600  else
7601  return res;
7602  }
7603 }
7604 
7605 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7606 {
7607  VMA_ASSERT(allocation);
7608 
7609  if(allocation->CanBecomeLost() == false ||
7610  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7611  {
7612  switch(allocation->GetType())
7613  {
7614  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7615  {
7616  VmaBlockVector* pBlockVector = VMA_NULL;
7617  VmaPool hPool = allocation->GetPool();
7618  if(hPool != VK_NULL_HANDLE)
7619  {
7620  pBlockVector = &hPool->m_BlockVector;
7621  }
7622  else
7623  {
7624  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7625  pBlockVector = m_pBlockVectors[memTypeIndex];
7626  }
7627  pBlockVector->Free(allocation);
7628  }
7629  break;
7630  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7631  FreeDedicatedMemory(allocation);
7632  break;
7633  default:
7634  VMA_ASSERT(0);
7635  }
7636  }
7637 
7638  allocation->SetUserData(this, VMA_NULL);
7639  vma_delete(this, allocation);
7640 }
7641 
7642 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7643 {
7644  // Initialize.
7645  InitStatInfo(pStats->total);
7646  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7647  InitStatInfo(pStats->memoryType[i]);
7648  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7649  InitStatInfo(pStats->memoryHeap[i]);
7650 
7651  // Process default pools.
7652  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7653  {
7654  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7655  VMA_ASSERT(pBlockVector);
7656  pBlockVector->AddStats(pStats);
7657  }
7658 
7659  // Process custom pools.
7660  {
7661  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7662  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7663  {
7664  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7665  }
7666  }
7667 
7668  // Process dedicated allocations.
7669  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7670  {
7671  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7672  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7673  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7674  VMA_ASSERT(pDedicatedAllocVector);
7675  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7676  {
7677  VmaStatInfo allocationStatInfo;
7678  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7679  VmaAddStatInfo(pStats->total, allocationStatInfo);
7680  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7681  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7682  }
7683  }
7684 
7685  // Postprocess.
7686  VmaPostprocessCalcStatInfo(pStats->total);
7687  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7688  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7689  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7690  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7691 }
7692 
7693 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7694 
7695 VkResult VmaAllocator_T::Defragment(
7696  VmaAllocation* pAllocations,
7697  size_t allocationCount,
7698  VkBool32* pAllocationsChanged,
7699  const VmaDefragmentationInfo* pDefragmentationInfo,
7700  VmaDefragmentationStats* pDefragmentationStats)
7701 {
7702  if(pAllocationsChanged != VMA_NULL)
7703  {
7704  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7705  }
7706  if(pDefragmentationStats != VMA_NULL)
7707  {
7708  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7709  }
7710 
7711  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7712 
7713  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7714 
7715  const size_t poolCount = m_Pools.size();
7716 
7717  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7718  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7719  {
7720  VmaAllocation hAlloc = pAllocations[allocIndex];
7721  VMA_ASSERT(hAlloc);
7722  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7723  // DedicatedAlloc cannot be defragmented.
7724  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7725  // Only HOST_VISIBLE memory types can be defragmented.
7726  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7727  // Lost allocation cannot be defragmented.
7728  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7729  {
7730  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7731 
7732  const VmaPool hAllocPool = hAlloc->GetPool();
7733  // This allocation belongs to custom pool.
7734  if(hAllocPool != VK_NULL_HANDLE)
7735  {
7736  pAllocBlockVector = &hAllocPool->GetBlockVector();
7737  }
7738  // This allocation belongs to general pool.
7739  else
7740  {
7741  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7742  }
7743 
7744  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7745 
7746  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7747  &pAllocationsChanged[allocIndex] : VMA_NULL;
7748  pDefragmentator->AddAllocation(hAlloc, pChanged);
7749  }
7750  }
7751 
7752  VkResult result = VK_SUCCESS;
7753 
7754  // ======== Main processing.
7755 
7756  VkDeviceSize maxBytesToMove = SIZE_MAX;
7757  uint32_t maxAllocationsToMove = UINT32_MAX;
7758  if(pDefragmentationInfo != VMA_NULL)
7759  {
7760  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7761  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7762  }
7763 
7764  // Process standard memory.
7765  for(uint32_t memTypeIndex = 0;
7766  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7767  ++memTypeIndex)
7768  {
7769  // Only HOST_VISIBLE memory types can be defragmented.
7770  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7771  {
7772  result = m_pBlockVectors[memTypeIndex]->Defragment(
7773  pDefragmentationStats,
7774  maxBytesToMove,
7775  maxAllocationsToMove);
7776  }
7777  }
7778 
7779  // Process custom pools.
7780  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7781  {
7782  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7783  pDefragmentationStats,
7784  maxBytesToMove,
7785  maxAllocationsToMove);
7786  }
7787 
7788  // ======== Destroy defragmentators.
7789 
7790  // Process custom pools.
7791  for(size_t poolIndex = poolCount; poolIndex--; )
7792  {
7793  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7794  }
7795 
7796  // Process standard memory.
7797  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7798  {
7799  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7800  {
7801  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7802  }
7803  }
7804 
7805  return result;
7806 }
7807 
7808 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7809 {
7810  if(hAllocation->CanBecomeLost())
7811  {
7812  /*
7813  Warning: This is a carefully designed algorithm.
7814  Do not modify unless you really know what you're doing :)
7815  */
7816  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7817  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7818  for(;;)
7819  {
7820  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7821  {
7822  pAllocationInfo->memoryType = UINT32_MAX;
7823  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7824  pAllocationInfo->offset = 0;
7825  pAllocationInfo->size = hAllocation->GetSize();
7826  pAllocationInfo->pMappedData = VMA_NULL;
7827  pAllocationInfo->pUserData = hAllocation->GetUserData();
7828  return;
7829  }
7830  else if(localLastUseFrameIndex == localCurrFrameIndex)
7831  {
7832  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7833  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7834  pAllocationInfo->offset = hAllocation->GetOffset();
7835  pAllocationInfo->size = hAllocation->GetSize();
7836  pAllocationInfo->pMappedData = VMA_NULL;
7837  pAllocationInfo->pUserData = hAllocation->GetUserData();
7838  return;
7839  }
7840  else // Last use time earlier than current time.
7841  {
7842  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7843  {
7844  localLastUseFrameIndex = localCurrFrameIndex;
7845  }
7846  }
7847  }
7848  }
7849  else
7850  {
7851  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7852  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7853  pAllocationInfo->offset = hAllocation->GetOffset();
7854  pAllocationInfo->size = hAllocation->GetSize();
7855  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7856  pAllocationInfo->pUserData = hAllocation->GetUserData();
7857  }
7858 }
7859 
7860 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7861 {
7862  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7863  if(hAllocation->CanBecomeLost())
7864  {
7865  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7866  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7867  for(;;)
7868  {
7869  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7870  {
7871  return false;
7872  }
7873  else if(localLastUseFrameIndex == localCurrFrameIndex)
7874  {
7875  return true;
7876  }
7877  else // Last use time earlier than current time.
7878  {
7879  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7880  {
7881  localLastUseFrameIndex = localCurrFrameIndex;
7882  }
7883  }
7884  }
7885  }
7886  else
7887  {
7888  return true;
7889  }
7890 }
7891 
7892 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7893 {
7894  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7895 
7896  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7897 
7898  if(newCreateInfo.maxBlockCount == 0)
7899  {
7900  newCreateInfo.maxBlockCount = SIZE_MAX;
7901  }
7902  if(newCreateInfo.blockSize == 0)
7903  {
7904  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7905  }
7906 
7907  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7908 
7909  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7910  if(res != VK_SUCCESS)
7911  {
7912  vma_delete(this, *pPool);
7913  *pPool = VMA_NULL;
7914  return res;
7915  }
7916 
7917  // Add to m_Pools.
7918  {
7919  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7920  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7921  }
7922 
7923  return VK_SUCCESS;
7924 }
7925 
7926 void VmaAllocator_T::DestroyPool(VmaPool pool)
7927 {
7928  // Remove from m_Pools.
7929  {
7930  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7931  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7932  VMA_ASSERT(success && "Pool not found in Allocator.");
7933  }
7934 
7935  vma_delete(this, pool);
7936 }
7937 
7938 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7939 {
7940  pool->m_BlockVector.GetPoolStats(pPoolStats);
7941 }
7942 
7943 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7944 {
7945  m_CurrentFrameIndex.store(frameIndex);
7946 }
7947 
7948 void VmaAllocator_T::MakePoolAllocationsLost(
7949  VmaPool hPool,
7950  size_t* pLostAllocationCount)
7951 {
7952  hPool->m_BlockVector.MakePoolAllocationsLost(
7953  m_CurrentFrameIndex.load(),
7954  pLostAllocationCount);
7955 }
7956 
7957 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7958 {
7959  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7960  (*pAllocation)->InitLost();
7961 }
7962 
7963 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7964 {
7965  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7966 
7967  VkResult res;
7968  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7969  {
7970  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7971  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7972  {
7973  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7974  if(res == VK_SUCCESS)
7975  {
7976  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7977  }
7978  }
7979  else
7980  {
7981  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7982  }
7983  }
7984  else
7985  {
7986  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7987  }
7988 
7989  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7990  {
7991  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7992  }
7993 
7994  return res;
7995 }
7996 
7997 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7998 {
7999  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8000  {
8001  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8002  }
8003 
8004  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8005 
8006  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8007  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8008  {
8009  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8010  m_HeapSizeLimit[heapIndex] += size;
8011  }
8012 }
8013 
8014 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8015 {
8016  if(hAllocation->CanBecomeLost())
8017  {
8018  return VK_ERROR_MEMORY_MAP_FAILED;
8019  }
8020 
8021  switch(hAllocation->GetType())
8022  {
8023  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8024  {
8025  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8026  char *pBytes = VMA_NULL;
8027  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8028  if(res == VK_SUCCESS)
8029  {
8030  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8031  hAllocation->BlockAllocMap();
8032  }
8033  return res;
8034  }
8035  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8036  return hAllocation->DedicatedAllocMap(this, ppData);
8037  default:
8038  VMA_ASSERT(0);
8039  return VK_ERROR_MEMORY_MAP_FAILED;
8040  }
8041 }
8042 
8043 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8044 {
8045  switch(hAllocation->GetType())
8046  {
8047  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8048  {
8049  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8050  hAllocation->BlockAllocUnmap();
8051  pBlock->Unmap(this, 1);
8052  }
8053  break;
8054  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8055  hAllocation->DedicatedAllocUnmap(this);
8056  break;
8057  default:
8058  VMA_ASSERT(0);
8059  }
8060 }
8061 
8062 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8063 {
8064  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8065 
8066  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8067  {
8068  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8069  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8070  VMA_ASSERT(pDedicatedAllocations);
8071  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8072  VMA_ASSERT(success);
8073  }
8074 
8075  VkDeviceMemory hMemory = allocation->GetMemory();
8076 
8077  if(allocation->GetMappedData() != VMA_NULL)
8078  {
8079  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8080  }
8081 
8082  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8083 
8084  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8085 }
8086 
8087 #if VMA_STATS_STRING_ENABLED
8088 
8089 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8090 {
8091  bool dedicatedAllocationsStarted = false;
8092  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8093  {
8094  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8095  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8096  VMA_ASSERT(pDedicatedAllocVector);
8097  if(pDedicatedAllocVector->empty() == false)
8098  {
8099  if(dedicatedAllocationsStarted == false)
8100  {
8101  dedicatedAllocationsStarted = true;
8102  json.WriteString("DedicatedAllocations");
8103  json.BeginObject();
8104  }
8105 
8106  json.BeginString("Type ");
8107  json.ContinueString(memTypeIndex);
8108  json.EndString();
8109 
8110  json.BeginArray();
8111 
8112  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8113  {
8114  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8115  json.BeginObject(true);
8116 
8117  json.WriteString("Type");
8118  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8119 
8120  json.WriteString("Size");
8121  json.WriteNumber(hAlloc->GetSize());
8122 
8123  const void* pUserData = hAlloc->GetUserData();
8124  if(pUserData != VMA_NULL)
8125  {
8126  json.WriteString("UserData");
8127  if(hAlloc->IsUserDataString())
8128  {
8129  json.WriteString((const char*)pUserData);
8130  }
8131  else
8132  {
8133  json.BeginString();
8134  json.ContinueString_Pointer(pUserData);
8135  json.EndString();
8136  }
8137  }
8138 
8139  json.EndObject();
8140  }
8141 
8142  json.EndArray();
8143  }
8144  }
8145  if(dedicatedAllocationsStarted)
8146  {
8147  json.EndObject();
8148  }
8149 
8150  {
8151  bool allocationsStarted = false;
8152  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8153  {
8154  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8155  {
8156  if(allocationsStarted == false)
8157  {
8158  allocationsStarted = true;
8159  json.WriteString("DefaultPools");
8160  json.BeginObject();
8161  }
8162 
8163  json.BeginString("Type ");
8164  json.ContinueString(memTypeIndex);
8165  json.EndString();
8166 
8167  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8168  }
8169  }
8170  if(allocationsStarted)
8171  {
8172  json.EndObject();
8173  }
8174  }
8175 
8176  {
8177  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8178  const size_t poolCount = m_Pools.size();
8179  if(poolCount > 0)
8180  {
8181  json.WriteString("Pools");
8182  json.BeginArray();
8183  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8184  {
8185  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8186  }
8187  json.EndArray();
8188  }
8189  }
8190 }
8191 
8192 #endif // #if VMA_STATS_STRING_ENABLED
8193 
8194 static VkResult AllocateMemoryForImage(
8195  VmaAllocator allocator,
8196  VkImage image,
8197  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8198  VmaSuballocationType suballocType,
8199  VmaAllocation* pAllocation)
8200 {
8201  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8202 
8203  VkMemoryRequirements vkMemReq = {};
8204  bool requiresDedicatedAllocation = false;
8205  bool prefersDedicatedAllocation = false;
8206  allocator->GetImageMemoryRequirements(image, vkMemReq,
8207  requiresDedicatedAllocation, prefersDedicatedAllocation);
8208 
8209  return allocator->AllocateMemory(
8210  vkMemReq,
8211  requiresDedicatedAllocation,
8212  prefersDedicatedAllocation,
8213  VK_NULL_HANDLE, // dedicatedBuffer
8214  image, // dedicatedImage
8215  *pAllocationCreateInfo,
8216  suballocType,
8217  pAllocation);
8218 }
8219 
8221 // Public interface
8222 
8223 VkResult vmaCreateAllocator(
8224  const VmaAllocatorCreateInfo* pCreateInfo,
8225  VmaAllocator* pAllocator)
8226 {
8227  VMA_ASSERT(pCreateInfo && pAllocator);
8228  VMA_DEBUG_LOG("vmaCreateAllocator");
8229  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8230  return VK_SUCCESS;
8231 }
8232 
8233 void vmaDestroyAllocator(
8234  VmaAllocator allocator)
8235 {
8236  if(allocator != VK_NULL_HANDLE)
8237  {
8238  VMA_DEBUG_LOG("vmaDestroyAllocator");
8239  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8240  vma_delete(&allocationCallbacks, allocator);
8241  }
8242 }
8243 
8245  VmaAllocator allocator,
8246  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8247 {
8248  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8249  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8250 }
8251 
8253  VmaAllocator allocator,
8254  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8255 {
8256  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8257  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8258 }
8259 
8261  VmaAllocator allocator,
8262  uint32_t memoryTypeIndex,
8263  VkMemoryPropertyFlags* pFlags)
8264 {
8265  VMA_ASSERT(allocator && pFlags);
8266  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8267  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8268 }
8269 
8271  VmaAllocator allocator,
8272  uint32_t frameIndex)
8273 {
8274  VMA_ASSERT(allocator);
8275  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8276 
8277  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8278 
8279  allocator->SetCurrentFrameIndex(frameIndex);
8280 }
8281 
8282 void vmaCalculateStats(
8283  VmaAllocator allocator,
8284  VmaStats* pStats)
8285 {
8286  VMA_ASSERT(allocator && pStats);
8287  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8288  allocator->CalculateStats(pStats);
8289 }
8290 
8291 #if VMA_STATS_STRING_ENABLED
8292 
8293 void vmaBuildStatsString(
8294  VmaAllocator allocator,
8295  char** ppStatsString,
8296  VkBool32 detailedMap)
8297 {
8298  VMA_ASSERT(allocator && ppStatsString);
8299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8300 
8301  VmaStringBuilder sb(allocator);
8302  {
8303  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8304  json.BeginObject();
8305 
8306  VmaStats stats;
8307  allocator->CalculateStats(&stats);
8308 
8309  json.WriteString("Total");
8310  VmaPrintStatInfo(json, stats.total);
8311 
8312  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8313  {
8314  json.BeginString("Heap ");
8315  json.ContinueString(heapIndex);
8316  json.EndString();
8317  json.BeginObject();
8318 
8319  json.WriteString("Size");
8320  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8321 
8322  json.WriteString("Flags");
8323  json.BeginArray(true);
8324  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8325  {
8326  json.WriteString("DEVICE_LOCAL");
8327  }
8328  json.EndArray();
8329 
8330  if(stats.memoryHeap[heapIndex].blockCount > 0)
8331  {
8332  json.WriteString("Stats");
8333  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8334  }
8335 
8336  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8337  {
8338  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8339  {
8340  json.BeginString("Type ");
8341  json.ContinueString(typeIndex);
8342  json.EndString();
8343 
8344  json.BeginObject();
8345 
8346  json.WriteString("Flags");
8347  json.BeginArray(true);
8348  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8349  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8350  {
8351  json.WriteString("DEVICE_LOCAL");
8352  }
8353  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8354  {
8355  json.WriteString("HOST_VISIBLE");
8356  }
8357  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8358  {
8359  json.WriteString("HOST_COHERENT");
8360  }
8361  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8362  {
8363  json.WriteString("HOST_CACHED");
8364  }
8365  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8366  {
8367  json.WriteString("LAZILY_ALLOCATED");
8368  }
8369  json.EndArray();
8370 
8371  if(stats.memoryType[typeIndex].blockCount > 0)
8372  {
8373  json.WriteString("Stats");
8374  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8375  }
8376 
8377  json.EndObject();
8378  }
8379  }
8380 
8381  json.EndObject();
8382  }
8383  if(detailedMap == VK_TRUE)
8384  {
8385  allocator->PrintDetailedMap(json);
8386  }
8387 
8388  json.EndObject();
8389  }
8390 
8391  const size_t len = sb.GetLength();
8392  char* const pChars = vma_new_array(allocator, char, len + 1);
8393  if(len > 0)
8394  {
8395  memcpy(pChars, sb.GetData(), len);
8396  }
8397  pChars[len] = '\0';
8398  *ppStatsString = pChars;
8399 }
8400 
8401 void vmaFreeStatsString(
8402  VmaAllocator allocator,
8403  char* pStatsString)
8404 {
8405  if(pStatsString != VMA_NULL)
8406  {
8407  VMA_ASSERT(allocator);
8408  size_t len = strlen(pStatsString);
8409  vma_delete_array(allocator, pStatsString, len + 1);
8410  }
8411 }
8412 
8413 #endif // #if VMA_STATS_STRING_ENABLED
8414 
8415 /*
8416 This function is not protected by any mutex because it just reads immutable data.
8417 */
8418 VkResult vmaFindMemoryTypeIndex(
8419  VmaAllocator allocator,
8420  uint32_t memoryTypeBits,
8421  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8422  uint32_t* pMemoryTypeIndex)
8423 {
8424  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8425  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8426  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8427 
8428  if(pAllocationCreateInfo->memoryTypeBits != 0)
8429  {
8430  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8431  }
8432 
8433  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8434  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8435 
8436  // Convert usage to requiredFlags and preferredFlags.
8437  switch(pAllocationCreateInfo->usage)
8438  {
8440  break;
8442  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8443  break;
8445  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8446  break;
8448  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8449  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8450  break;
8452  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8453  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8454  break;
8455  default:
8456  break;
8457  }
8458 
8459  *pMemoryTypeIndex = UINT32_MAX;
8460  uint32_t minCost = UINT32_MAX;
8461  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8462  memTypeIndex < allocator->GetMemoryTypeCount();
8463  ++memTypeIndex, memTypeBit <<= 1)
8464  {
8465  // This memory type is acceptable according to memoryTypeBits bitmask.
8466  if((memTypeBit & memoryTypeBits) != 0)
8467  {
8468  const VkMemoryPropertyFlags currFlags =
8469  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8470  // This memory type contains requiredFlags.
8471  if((requiredFlags & ~currFlags) == 0)
8472  {
8473  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8474  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8475  // Remember memory type with lowest cost.
8476  if(currCost < minCost)
8477  {
8478  *pMemoryTypeIndex = memTypeIndex;
8479  if(currCost == 0)
8480  {
8481  return VK_SUCCESS;
8482  }
8483  minCost = currCost;
8484  }
8485  }
8486  }
8487  }
8488  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8489 }
8490 
8492  VmaAllocator allocator,
8493  const VkBufferCreateInfo* pBufferCreateInfo,
8494  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8495  uint32_t* pMemoryTypeIndex)
8496 {
8497  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8498  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8499  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8500  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8501 
8502  const VkDevice hDev = allocator->m_hDevice;
8503  VkBuffer hBuffer = VK_NULL_HANDLE;
8504  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8505  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8506  if(res == VK_SUCCESS)
8507  {
8508  VkMemoryRequirements memReq = {};
8509  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8510  hDev, hBuffer, &memReq);
8511 
8512  res = vmaFindMemoryTypeIndex(
8513  allocator,
8514  memReq.memoryTypeBits,
8515  pAllocationCreateInfo,
8516  pMemoryTypeIndex);
8517 
8518  allocator->GetVulkanFunctions().vkDestroyBuffer(
8519  hDev, hBuffer, allocator->GetAllocationCallbacks());
8520  }
8521  return res;
8522 }
8523 
8525  VmaAllocator allocator,
8526  const VkImageCreateInfo* pImageCreateInfo,
8527  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8528  uint32_t* pMemoryTypeIndex)
8529 {
8530  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8531  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8532  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8533  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8534 
8535  const VkDevice hDev = allocator->m_hDevice;
8536  VkImage hImage = VK_NULL_HANDLE;
8537  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8538  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8539  if(res == VK_SUCCESS)
8540  {
8541  VkMemoryRequirements memReq = {};
8542  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8543  hDev, hImage, &memReq);
8544 
8545  res = vmaFindMemoryTypeIndex(
8546  allocator,
8547  memReq.memoryTypeBits,
8548  pAllocationCreateInfo,
8549  pMemoryTypeIndex);
8550 
8551  allocator->GetVulkanFunctions().vkDestroyImage(
8552  hDev, hImage, allocator->GetAllocationCallbacks());
8553  }
8554  return res;
8555 }
8556 
8557 VkResult vmaCreatePool(
8558  VmaAllocator allocator,
8559  const VmaPoolCreateInfo* pCreateInfo,
8560  VmaPool* pPool)
8561 {
8562  VMA_ASSERT(allocator && pCreateInfo && pPool);
8563 
8564  VMA_DEBUG_LOG("vmaCreatePool");
8565 
8566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8567 
8568  return allocator->CreatePool(pCreateInfo, pPool);
8569 }
8570 
8571 void vmaDestroyPool(
8572  VmaAllocator allocator,
8573  VmaPool pool)
8574 {
8575  VMA_ASSERT(allocator);
8576 
8577  if(pool == VK_NULL_HANDLE)
8578  {
8579  return;
8580  }
8581 
8582  VMA_DEBUG_LOG("vmaDestroyPool");
8583 
8584  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8585 
8586  allocator->DestroyPool(pool);
8587 }
8588 
8589 void vmaGetPoolStats(
8590  VmaAllocator allocator,
8591  VmaPool pool,
8592  VmaPoolStats* pPoolStats)
8593 {
8594  VMA_ASSERT(allocator && pool && pPoolStats);
8595 
8596  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8597 
8598  allocator->GetPoolStats(pool, pPoolStats);
8599 }
8600 
8602  VmaAllocator allocator,
8603  VmaPool pool,
8604  size_t* pLostAllocationCount)
8605 {
8606  VMA_ASSERT(allocator && pool);
8607 
8608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8609 
8610  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8611 }
8612 
8613 VkResult vmaAllocateMemory(
8614  VmaAllocator allocator,
8615  const VkMemoryRequirements* pVkMemoryRequirements,
8616  const VmaAllocationCreateInfo* pCreateInfo,
8617  VmaAllocation* pAllocation,
8618  VmaAllocationInfo* pAllocationInfo)
8619 {
8620  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8621 
8622  VMA_DEBUG_LOG("vmaAllocateMemory");
8623 
8624  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8625 
8626  VkResult result = allocator->AllocateMemory(
8627  *pVkMemoryRequirements,
8628  false, // requiresDedicatedAllocation
8629  false, // prefersDedicatedAllocation
8630  VK_NULL_HANDLE, // dedicatedBuffer
8631  VK_NULL_HANDLE, // dedicatedImage
8632  *pCreateInfo,
8633  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8634  pAllocation);
8635 
8636  if(pAllocationInfo && result == VK_SUCCESS)
8637  {
8638  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8639  }
8640 
8641  return result;
8642 }
8643 
8645  VmaAllocator allocator,
8646  VkBuffer buffer,
8647  const VmaAllocationCreateInfo* pCreateInfo,
8648  VmaAllocation* pAllocation,
8649  VmaAllocationInfo* pAllocationInfo)
8650 {
8651  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8652 
8653  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8654 
8655  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8656 
8657  VkMemoryRequirements vkMemReq = {};
8658  bool requiresDedicatedAllocation = false;
8659  bool prefersDedicatedAllocation = false;
8660  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8661  requiresDedicatedAllocation,
8662  prefersDedicatedAllocation);
8663 
8664  VkResult result = allocator->AllocateMemory(
8665  vkMemReq,
8666  requiresDedicatedAllocation,
8667  prefersDedicatedAllocation,
8668  buffer, // dedicatedBuffer
8669  VK_NULL_HANDLE, // dedicatedImage
8670  *pCreateInfo,
8671  VMA_SUBALLOCATION_TYPE_BUFFER,
8672  pAllocation);
8673 
8674  if(pAllocationInfo && result == VK_SUCCESS)
8675  {
8676  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8677  }
8678 
8679  return result;
8680 }
8681 
8682 VkResult vmaAllocateMemoryForImage(
8683  VmaAllocator allocator,
8684  VkImage image,
8685  const VmaAllocationCreateInfo* pCreateInfo,
8686  VmaAllocation* pAllocation,
8687  VmaAllocationInfo* pAllocationInfo)
8688 {
8689  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8690 
8691  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8692 
8693  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8694 
8695  VkResult result = AllocateMemoryForImage(
8696  allocator,
8697  image,
8698  pCreateInfo,
8699  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8700  pAllocation);
8701 
8702  if(pAllocationInfo && result == VK_SUCCESS)
8703  {
8704  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8705  }
8706 
8707  return result;
8708 }
8709 
8710 void vmaFreeMemory(
8711  VmaAllocator allocator,
8712  VmaAllocation allocation)
8713 {
8714  VMA_ASSERT(allocator && allocation);
8715 
8716  VMA_DEBUG_LOG("vmaFreeMemory");
8717 
8718  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8719 
8720  allocator->FreeMemory(allocation);
8721 }
8722 
8724  VmaAllocator allocator,
8725  VmaAllocation allocation,
8726  VmaAllocationInfo* pAllocationInfo)
8727 {
8728  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8729 
8730  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8731 
8732  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8733 }
8734 
8735 VkBool32 vmaTouchAllocation(
8736  VmaAllocator allocator,
8737  VmaAllocation allocation)
8738 {
8739  VMA_ASSERT(allocator && allocation);
8740 
8741  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8742 
8743  return allocator->TouchAllocation(allocation);
8744 }
8745 
8747  VmaAllocator allocator,
8748  VmaAllocation allocation,
8749  void* pUserData)
8750 {
8751  VMA_ASSERT(allocator && allocation);
8752 
8753  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8754 
8755  allocation->SetUserData(allocator, pUserData);
8756 }
8757 
8759  VmaAllocator allocator,
8760  VmaAllocation* pAllocation)
8761 {
8762  VMA_ASSERT(allocator && pAllocation);
8763 
8764  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8765 
8766  allocator->CreateLostAllocation(pAllocation);
8767 }
8768 
8769 VkResult vmaMapMemory(
8770  VmaAllocator allocator,
8771  VmaAllocation allocation,
8772  void** ppData)
8773 {
8774  VMA_ASSERT(allocator && allocation && ppData);
8775 
8776  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8777 
8778  return allocator->Map(allocation, ppData);
8779 }
8780 
8781 void vmaUnmapMemory(
8782  VmaAllocator allocator,
8783  VmaAllocation allocation)
8784 {
8785  VMA_ASSERT(allocator && allocation);
8786 
8787  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8788 
8789  allocator->Unmap(allocation);
8790 }
8791 
8792 VkResult vmaDefragment(
8793  VmaAllocator allocator,
8794  VmaAllocation* pAllocations,
8795  size_t allocationCount,
8796  VkBool32* pAllocationsChanged,
8797  const VmaDefragmentationInfo *pDefragmentationInfo,
8798  VmaDefragmentationStats* pDefragmentationStats)
8799 {
8800  VMA_ASSERT(allocator && pAllocations);
8801 
8802  VMA_DEBUG_LOG("vmaDefragment");
8803 
8804  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8805 
8806  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8807 }
8808 
8809 VkResult vmaCreateBuffer(
8810  VmaAllocator allocator,
8811  const VkBufferCreateInfo* pBufferCreateInfo,
8812  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8813  VkBuffer* pBuffer,
8814  VmaAllocation* pAllocation,
8815  VmaAllocationInfo* pAllocationInfo)
8816 {
8817  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8818 
8819  VMA_DEBUG_LOG("vmaCreateBuffer");
8820 
8821  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8822 
8823  *pBuffer = VK_NULL_HANDLE;
8824  *pAllocation = VK_NULL_HANDLE;
8825 
8826  // 1. Create VkBuffer.
8827  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8828  allocator->m_hDevice,
8829  pBufferCreateInfo,
8830  allocator->GetAllocationCallbacks(),
8831  pBuffer);
8832  if(res >= 0)
8833  {
8834  // 2. vkGetBufferMemoryRequirements.
8835  VkMemoryRequirements vkMemReq = {};
8836  bool requiresDedicatedAllocation = false;
8837  bool prefersDedicatedAllocation = false;
8838  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8839  requiresDedicatedAllocation, prefersDedicatedAllocation);
8840 
8841  // Make sure alignment requirements for specific buffer usages reported
8842  // in Physical Device Properties are included in alignment reported by memory requirements.
8843  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8844  {
8845  VMA_ASSERT(vkMemReq.alignment %
8846  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8847  }
8848  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8849  {
8850  VMA_ASSERT(vkMemReq.alignment %
8851  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8852  }
8853  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8854  {
8855  VMA_ASSERT(vkMemReq.alignment %
8856  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8857  }
8858 
8859  // 3. Allocate memory using allocator.
8860  res = allocator->AllocateMemory(
8861  vkMemReq,
8862  requiresDedicatedAllocation,
8863  prefersDedicatedAllocation,
8864  *pBuffer, // dedicatedBuffer
8865  VK_NULL_HANDLE, // dedicatedImage
8866  *pAllocationCreateInfo,
8867  VMA_SUBALLOCATION_TYPE_BUFFER,
8868  pAllocation);
8869  if(res >= 0)
8870  {
8871  // 3. Bind buffer with memory.
8872  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8873  allocator->m_hDevice,
8874  *pBuffer,
8875  (*pAllocation)->GetMemory(),
8876  (*pAllocation)->GetOffset());
8877  if(res >= 0)
8878  {
8879  // All steps succeeded.
8880  if(pAllocationInfo != VMA_NULL)
8881  {
8882  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8883  }
8884  return VK_SUCCESS;
8885  }
8886  allocator->FreeMemory(*pAllocation);
8887  *pAllocation = VK_NULL_HANDLE;
8888  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8889  *pBuffer = VK_NULL_HANDLE;
8890  return res;
8891  }
8892  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8893  *pBuffer = VK_NULL_HANDLE;
8894  return res;
8895  }
8896  return res;
8897 }
8898 
8899 void vmaDestroyBuffer(
8900  VmaAllocator allocator,
8901  VkBuffer buffer,
8902  VmaAllocation allocation)
8903 {
8904  if(buffer != VK_NULL_HANDLE)
8905  {
8906  VMA_ASSERT(allocator);
8907 
8908  VMA_DEBUG_LOG("vmaDestroyBuffer");
8909 
8910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8911 
8912  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8913 
8914  allocator->FreeMemory(allocation);
8915  }
8916 }
8917 
8918 VkResult vmaCreateImage(
8919  VmaAllocator allocator,
8920  const VkImageCreateInfo* pImageCreateInfo,
8921  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8922  VkImage* pImage,
8923  VmaAllocation* pAllocation,
8924  VmaAllocationInfo* pAllocationInfo)
8925 {
8926  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8927 
8928  VMA_DEBUG_LOG("vmaCreateImage");
8929 
8930  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8931 
8932  *pImage = VK_NULL_HANDLE;
8933  *pAllocation = VK_NULL_HANDLE;
8934 
8935  // 1. Create VkImage.
8936  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8937  allocator->m_hDevice,
8938  pImageCreateInfo,
8939  allocator->GetAllocationCallbacks(),
8940  pImage);
8941  if(res >= 0)
8942  {
8943  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8944  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8945  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8946 
8947  // 2. Allocate memory using allocator.
8948  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8949  if(res >= 0)
8950  {
8951  // 3. Bind image with memory.
8952  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8953  allocator->m_hDevice,
8954  *pImage,
8955  (*pAllocation)->GetMemory(),
8956  (*pAllocation)->GetOffset());
8957  if(res >= 0)
8958  {
8959  // All steps succeeded.
8960  if(pAllocationInfo != VMA_NULL)
8961  {
8962  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8963  }
8964  return VK_SUCCESS;
8965  }
8966  allocator->FreeMemory(*pAllocation);
8967  *pAllocation = VK_NULL_HANDLE;
8968  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8969  *pImage = VK_NULL_HANDLE;
8970  return res;
8971  }
8972  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8973  *pImage = VK_NULL_HANDLE;
8974  return res;
8975  }
8976  return res;
8977 }
8978 
8979 void vmaDestroyImage(
8980  VmaAllocator allocator,
8981  VkImage image,
8982  VmaAllocation allocation)
8983 {
8984  if(image != VK_NULL_HANDLE)
8985  {
8986  VMA_ASSERT(allocator);
8987 
8988  VMA_DEBUG_LOG("vmaDestroyImage");
8989 
8990  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8991 
8992  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8993 
8994  allocator->FreeMemory(allocation);
8995  }
8996 }
8997 
8998 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:939
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1193
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:964
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:949
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1150
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:943
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1499
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:961
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1698
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1369
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1423
Definition: vk_mem_alloc.h:1230
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:932
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1268
Definition: vk_mem_alloc.h:1177
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:973
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1026
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:958
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1181
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1091
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:946
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1090
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:954
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1702
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:990
VmaStatInfo total
Definition: vk_mem_alloc.h:1100
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1710
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1252
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1693
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:947
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:874
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:967
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1377
Definition: vk_mem_alloc.h:1371
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1509
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:944
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1289
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1393
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1429
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:930
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1380
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1128
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1688
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1706
Definition: vk_mem_alloc.h:1167
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1276
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:945
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1096
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:880
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:901
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:906
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1708
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1263
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1439
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:940
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1079
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1388
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:893
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1237
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1092
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:897
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1383
Definition: vk_mem_alloc.h:1176
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1258
Definition: vk_mem_alloc.h:1249
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1082
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:942
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1401
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:976
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1432
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1247
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1282
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1014
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1098
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1217
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1091
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:951
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:895
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:950
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1415
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1523
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:970
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1091
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1088
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1420
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1504
Definition: vk_mem_alloc.h:1245
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1704
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:938
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:953
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1086
Definition: vk_mem_alloc.h:1133
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1373
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1084
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:948
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:952
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1204
Definition: vk_mem_alloc.h:1160
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1518
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:928
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:941
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1485
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1351
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1092
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1099
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1426
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1092
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1490