Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
868 #include <vulkan/vulkan.h>
869 
870 VK_DEFINE_HANDLE(VmaAllocator)
871 
872 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
874  VmaAllocator allocator,
875  uint32_t memoryType,
876  VkDeviceMemory memory,
877  VkDeviceSize size);
879 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
880  VmaAllocator allocator,
881  uint32_t memoryType,
882  VkDeviceMemory memory,
883  VkDeviceSize size);
884 
892 typedef struct VmaDeviceMemoryCallbacks {
898 
928 
931 typedef VkFlags VmaAllocatorCreateFlags;
932 
937 typedef struct VmaVulkanFunctions {
938  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
939  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
940  PFN_vkAllocateMemory vkAllocateMemory;
941  PFN_vkFreeMemory vkFreeMemory;
942  PFN_vkMapMemory vkMapMemory;
943  PFN_vkUnmapMemory vkUnmapMemory;
944  PFN_vkBindBufferMemory vkBindBufferMemory;
945  PFN_vkBindImageMemory vkBindImageMemory;
946  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
947  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
948  PFN_vkCreateBuffer vkCreateBuffer;
949  PFN_vkDestroyBuffer vkDestroyBuffer;
950  PFN_vkCreateImage vkCreateImage;
951  PFN_vkDestroyImage vkDestroyImage;
952  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
953  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
955 
958 {
960  VmaAllocatorCreateFlags flags;
962 
963  VkPhysicalDevice physicalDevice;
965 
966  VkDevice device;
968 
971 
972  const VkAllocationCallbacks* pAllocationCallbacks;
974 
989  uint32_t frameInUseCount;
1013  const VkDeviceSize* pHeapSizeLimit;
1027 
1029 VkResult vmaCreateAllocator(
1030  const VmaAllocatorCreateInfo* pCreateInfo,
1031  VmaAllocator* pAllocator);
1032 
1034 void vmaDestroyAllocator(
1035  VmaAllocator allocator);
1036 
1042  VmaAllocator allocator,
1043  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1044 
1050  VmaAllocator allocator,
1051  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1052 
1060  VmaAllocator allocator,
1061  uint32_t memoryTypeIndex,
1062  VkMemoryPropertyFlags* pFlags);
1063 
1073  VmaAllocator allocator,
1074  uint32_t frameIndex);
1075 
1078 typedef struct VmaStatInfo
1079 {
1081  uint32_t blockCount;
1087  VkDeviceSize usedBytes;
1089  VkDeviceSize unusedBytes;
1090  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1091  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1092 } VmaStatInfo;
1093 
1095 typedef struct VmaStats
1096 {
1097  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1098  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1100 } VmaStats;
1101 
1103 void vmaCalculateStats(
1104  VmaAllocator allocator,
1105  VmaStats* pStats);
1106 
1107 #define VMA_STATS_STRING_ENABLED 1
1108 
1109 #if VMA_STATS_STRING_ENABLED
1110 
1112 
1114 void vmaBuildStatsString(
1115  VmaAllocator allocator,
1116  char** ppStatsString,
1117  VkBool32 detailedMap);
1118 
1119 void vmaFreeStatsString(
1120  VmaAllocator allocator,
1121  char* pStatsString);
1122 
1123 #endif // #if VMA_STATS_STRING_ENABLED
1124 
1125 VK_DEFINE_HANDLE(VmaPool)
1126 
1127 typedef enum VmaMemoryUsage
1128 {
1177 } VmaMemoryUsage;
1178 
1193 
1243 
1247 
1249 {
1251  VmaAllocationCreateFlags flags;
1262  VkMemoryPropertyFlags requiredFlags;
1267  VkMemoryPropertyFlags preferredFlags;
1275  uint32_t memoryTypeBits;
1281  VmaPool pool;
1288  void* pUserData;
1290 
1307 VkResult vmaFindMemoryTypeIndex(
1308  VmaAllocator allocator,
1309  uint32_t memoryTypeBits,
1310  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1311  uint32_t* pMemoryTypeIndex);
1312 
1326  VmaAllocator allocator,
1327  const VkBufferCreateInfo* pBufferCreateInfo,
1328  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1329  uint32_t* pMemoryTypeIndex);
1330 
1344  VmaAllocator allocator,
1345  const VkImageCreateInfo* pImageCreateInfo,
1346  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1347  uint32_t* pMemoryTypeIndex);
1348 
1369 
1372 typedef VkFlags VmaPoolCreateFlags;
1373 
1376 typedef struct VmaPoolCreateInfo {
1382  VmaPoolCreateFlags flags;
1387  VkDeviceSize blockSize;
1416 
1419 typedef struct VmaPoolStats {
1422  VkDeviceSize size;
1425  VkDeviceSize unusedSize;
1438  VkDeviceSize unusedRangeSizeMax;
1439 } VmaPoolStats;
1440 
1447 VkResult vmaCreatePool(
1448  VmaAllocator allocator,
1449  const VmaPoolCreateInfo* pCreateInfo,
1450  VmaPool* pPool);
1451 
1454 void vmaDestroyPool(
1455  VmaAllocator allocator,
1456  VmaPool pool);
1457 
1464 void vmaGetPoolStats(
1465  VmaAllocator allocator,
1466  VmaPool pool,
1467  VmaPoolStats* pPoolStats);
1468 
1476  VmaAllocator allocator,
1477  VmaPool pool,
1478  size_t* pLostAllocationCount);
1479 
1480 VK_DEFINE_HANDLE(VmaAllocation)
1481 
1482 
1484 typedef struct VmaAllocationInfo {
1489  uint32_t memoryType;
1498  VkDeviceMemory deviceMemory;
1503  VkDeviceSize offset;
1508  VkDeviceSize size;
1522  void* pUserData;
1524 
1535 VkResult vmaAllocateMemory(
1536  VmaAllocator allocator,
1537  const VkMemoryRequirements* pVkMemoryRequirements,
1538  const VmaAllocationCreateInfo* pCreateInfo,
1539  VmaAllocation* pAllocation,
1540  VmaAllocationInfo* pAllocationInfo);
1541 
1549  VmaAllocator allocator,
1550  VkBuffer buffer,
1551  const VmaAllocationCreateInfo* pCreateInfo,
1552  VmaAllocation* pAllocation,
1553  VmaAllocationInfo* pAllocationInfo);
1554 
1556 VkResult vmaAllocateMemoryForImage(
1557  VmaAllocator allocator,
1558  VkImage image,
1559  const VmaAllocationCreateInfo* pCreateInfo,
1560  VmaAllocation* pAllocation,
1561  VmaAllocationInfo* pAllocationInfo);
1562 
1564 void vmaFreeMemory(
1565  VmaAllocator allocator,
1566  VmaAllocation allocation);
1567 
1585  VmaAllocator allocator,
1586  VmaAllocation allocation,
1587  VmaAllocationInfo* pAllocationInfo);
1588 
1603 VkBool32 vmaTouchAllocation(
1604  VmaAllocator allocator,
1605  VmaAllocation allocation);
1606 
1621  VmaAllocator allocator,
1622  VmaAllocation allocation,
1623  void* pUserData);
1624 
1636  VmaAllocator allocator,
1637  VmaAllocation* pAllocation);
1638 
1673 VkResult vmaMapMemory(
1674  VmaAllocator allocator,
1675  VmaAllocation allocation,
1676  void** ppData);
1677 
1682 void vmaUnmapMemory(
1683  VmaAllocator allocator,
1684  VmaAllocation allocation);
1685 
1687 typedef struct VmaDefragmentationInfo {
1692  VkDeviceSize maxBytesToMove;
1699 
1701 typedef struct VmaDefragmentationStats {
1703  VkDeviceSize bytesMoved;
1705  VkDeviceSize bytesFreed;
1711 
1794 VkResult vmaDefragment(
1795  VmaAllocator allocator,
1796  VmaAllocation* pAllocations,
1797  size_t allocationCount,
1798  VkBool32* pAllocationsChanged,
1799  const VmaDefragmentationInfo *pDefragmentationInfo,
1800  VmaDefragmentationStats* pDefragmentationStats);
1801 
1828 VkResult vmaCreateBuffer(
1829  VmaAllocator allocator,
1830  const VkBufferCreateInfo* pBufferCreateInfo,
1831  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1832  VkBuffer* pBuffer,
1833  VmaAllocation* pAllocation,
1834  VmaAllocationInfo* pAllocationInfo);
1835 
1847 void vmaDestroyBuffer(
1848  VmaAllocator allocator,
1849  VkBuffer buffer,
1850  VmaAllocation allocation);
1851 
1853 VkResult vmaCreateImage(
1854  VmaAllocator allocator,
1855  const VkImageCreateInfo* pImageCreateInfo,
1856  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1857  VkImage* pImage,
1858  VmaAllocation* pAllocation,
1859  VmaAllocationInfo* pAllocationInfo);
1860 
1872 void vmaDestroyImage(
1873  VmaAllocator allocator,
1874  VkImage image,
1875  VmaAllocation allocation);
1876 
1877 #ifdef __cplusplus
1878 }
1879 #endif
1880 
1881 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1882 
1883 // For Visual Studio IntelliSense.
1884 #ifdef __INTELLISENSE__
1885 #define VMA_IMPLEMENTATION
1886 #endif
1887 
1888 #ifdef VMA_IMPLEMENTATION
1889 #undef VMA_IMPLEMENTATION
1890 
1891 #include <cstdint>
1892 #include <cstdlib>
1893 #include <cstring>
1894 
1895 /*******************************************************************************
1896 CONFIGURATION SECTION
1897 
1898 Define some of these macros before each #include of this header or change them
1899 here if you need other then default behavior depending on your environment.
1900 */
1901 
1902 /*
1903 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1904 internally, like:
1905 
1906  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1907 
1908 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1909 VmaAllocatorCreateInfo::pVulkanFunctions.
1910 */
1911 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1912 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1913 #endif
1914 
1915 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1916 //#define VMA_USE_STL_CONTAINERS 1
1917 
1918 /* Set this macro to 1 to make the library including and using STL containers:
1919 std::pair, std::vector, std::list, std::unordered_map.
1920 
1921 Set it to 0 or undefined to make the library using its own implementation of
1922 the containers.
1923 */
1924 #if VMA_USE_STL_CONTAINERS
1925  #define VMA_USE_STL_VECTOR 1
1926  #define VMA_USE_STL_UNORDERED_MAP 1
1927  #define VMA_USE_STL_LIST 1
1928 #endif
1929 
1930 #if VMA_USE_STL_VECTOR
1931  #include <vector>
1932 #endif
1933 
1934 #if VMA_USE_STL_UNORDERED_MAP
1935  #include <unordered_map>
1936 #endif
1937 
1938 #if VMA_USE_STL_LIST
1939  #include <list>
1940 #endif
1941 
1942 /*
1943 Following headers are used in this CONFIGURATION section only, so feel free to
1944 remove them if not needed.
1945 */
1946 #include <cassert> // for assert
1947 #include <algorithm> // for min, max
1948 #include <mutex> // for std::mutex
1949 #include <atomic> // for std::atomic
1950 
1951 #if !defined(_WIN32) && !defined(__APPLE__)
1952  #include <malloc.h> // for aligned_alloc()
1953 #endif
1954 
1955 #ifndef VMA_NULL
1956  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1957  #define VMA_NULL nullptr
1958 #endif
1959 
1960 #if defined(__APPLE__) || defined(__ANDROID__)
1961 #include <cstdlib>
1962 void *aligned_alloc(size_t alignment, size_t size)
1963 {
1964  // alignment must be >= sizeof(void*)
1965  if(alignment < sizeof(void*))
1966  {
1967  alignment = sizeof(void*);
1968  }
1969 
1970  void *pointer;
1971  if(posix_memalign(&pointer, alignment, size) == 0)
1972  return pointer;
1973  return VMA_NULL;
1974 }
1975 #endif
1976 
1977 // Normal assert to check for programmer's errors, especially in Debug configuration.
1978 #ifndef VMA_ASSERT
1979  #ifdef _DEBUG
1980  #define VMA_ASSERT(expr) assert(expr)
1981  #else
1982  #define VMA_ASSERT(expr)
1983  #endif
1984 #endif
1985 
1986 // Assert that will be called very often, like inside data structures e.g. operator[].
1987 // Making it non-empty can make program slow.
1988 #ifndef VMA_HEAVY_ASSERT
1989  #ifdef _DEBUG
1990  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1991  #else
1992  #define VMA_HEAVY_ASSERT(expr)
1993  #endif
1994 #endif
1995 
1996 #ifndef VMA_ALIGN_OF
1997  #define VMA_ALIGN_OF(type) (__alignof(type))
1998 #endif
1999 
2000 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2001  #if defined(_WIN32)
2002  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2003  #else
2004  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2005  #endif
2006 #endif
2007 
2008 #ifndef VMA_SYSTEM_FREE
2009  #if defined(_WIN32)
2010  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2011  #else
2012  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2013  #endif
2014 #endif
2015 
2016 #ifndef VMA_MIN
2017  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2018 #endif
2019 
2020 #ifndef VMA_MAX
2021  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2022 #endif
2023 
2024 #ifndef VMA_SWAP
2025  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2026 #endif
2027 
2028 #ifndef VMA_SORT
2029  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2030 #endif
2031 
2032 #ifndef VMA_DEBUG_LOG
2033  #define VMA_DEBUG_LOG(format, ...)
2034  /*
2035  #define VMA_DEBUG_LOG(format, ...) do { \
2036  printf(format, __VA_ARGS__); \
2037  printf("\n"); \
2038  } while(false)
2039  */
2040 #endif
2041 
2042 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2043 #if VMA_STATS_STRING_ENABLED
2044  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2045  {
2046  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2047  }
2048  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2049  {
2050  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2051  }
2052  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2053  {
2054  snprintf(outStr, strLen, "%p", ptr);
2055  }
2056 #endif
2057 
2058 #ifndef VMA_MUTEX
2059  class VmaMutex
2060  {
2061  public:
2062  VmaMutex() { }
2063  ~VmaMutex() { }
2064  void Lock() { m_Mutex.lock(); }
2065  void Unlock() { m_Mutex.unlock(); }
2066  private:
2067  std::mutex m_Mutex;
2068  };
2069  #define VMA_MUTEX VmaMutex
2070 #endif
2071 
2072 /*
2073 If providing your own implementation, you need to implement a subset of std::atomic:
2074 
2075 - Constructor(uint32_t desired)
2076 - uint32_t load() const
2077 - void store(uint32_t desired)
2078 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2079 */
2080 #ifndef VMA_ATOMIC_UINT32
2081  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2082 #endif
2083 
2084 #ifndef VMA_BEST_FIT
2085 
2097  #define VMA_BEST_FIT (1)
2098 #endif
2099 
2100 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2101 
2105  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2106 #endif
2107 
2108 #ifndef VMA_DEBUG_ALIGNMENT
2109 
2113  #define VMA_DEBUG_ALIGNMENT (1)
2114 #endif
2115 
2116 #ifndef VMA_DEBUG_MARGIN
2117 
2121  #define VMA_DEBUG_MARGIN (0)
2122 #endif
2123 
2124 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2125 
2129  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2130 #endif
2131 
2132 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2133 
2137  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2138 #endif
2139 
2140 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2141  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2143 #endif
2144 
2145 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2146  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2148 #endif
2149 
2150 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2151 
2152 /*******************************************************************************
2153 END OF CONFIGURATION
2154 */
2155 
2156 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2157  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2158 
2159 // Returns number of bits set to 1 in (v).
2160 static inline uint32_t VmaCountBitsSet(uint32_t v)
2161 {
2162  uint32_t c = v - ((v >> 1) & 0x55555555);
2163  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2164  c = ((c >> 4) + c) & 0x0F0F0F0F;
2165  c = ((c >> 8) + c) & 0x00FF00FF;
2166  c = ((c >> 16) + c) & 0x0000FFFF;
2167  return c;
2168 }
2169 
2170 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2171 // Use types like uint32_t, uint64_t as T.
2172 template <typename T>
2173 static inline T VmaAlignUp(T val, T align)
2174 {
2175  return (val + align - 1) / align * align;
2176 }
2177 
2178 // Division with mathematical rounding to nearest number.
2179 template <typename T>
2180 inline T VmaRoundDiv(T x, T y)
2181 {
2182  return (x + (y / (T)2)) / y;
2183 }
2184 
2185 #ifndef VMA_SORT
2186 
2187 template<typename Iterator, typename Compare>
2188 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2189 {
2190  Iterator centerValue = end; --centerValue;
2191  Iterator insertIndex = beg;
2192  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2193  {
2194  if(cmp(*memTypeIndex, *centerValue))
2195  {
2196  if(insertIndex != memTypeIndex)
2197  {
2198  VMA_SWAP(*memTypeIndex, *insertIndex);
2199  }
2200  ++insertIndex;
2201  }
2202  }
2203  if(insertIndex != centerValue)
2204  {
2205  VMA_SWAP(*insertIndex, *centerValue);
2206  }
2207  return insertIndex;
2208 }
2209 
2210 template<typename Iterator, typename Compare>
2211 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2212 {
2213  if(beg < end)
2214  {
2215  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2216  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2217  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2218  }
2219 }
2220 
2221 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2222 
2223 #endif // #ifndef VMA_SORT
2224 
2225 /*
2226 Returns true if two memory blocks occupy overlapping pages.
2227 ResourceA must be in less memory offset than ResourceB.
2228 
2229 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2230 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2231 */
2232 static inline bool VmaBlocksOnSamePage(
2233  VkDeviceSize resourceAOffset,
2234  VkDeviceSize resourceASize,
2235  VkDeviceSize resourceBOffset,
2236  VkDeviceSize pageSize)
2237 {
2238  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2239  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2240  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2241  VkDeviceSize resourceBStart = resourceBOffset;
2242  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2243  return resourceAEndPage == resourceBStartPage;
2244 }
2245 
2246 enum VmaSuballocationType
2247 {
2248  VMA_SUBALLOCATION_TYPE_FREE = 0,
2249  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2250  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2251  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2252  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2253  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2254  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2255 };
2256 
2257 /*
2258 Returns true if given suballocation types could conflict and must respect
2259 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2260 or linear image and another one is optimal image. If type is unknown, behave
2261 conservatively.
2262 */
2263 static inline bool VmaIsBufferImageGranularityConflict(
2264  VmaSuballocationType suballocType1,
2265  VmaSuballocationType suballocType2)
2266 {
2267  if(suballocType1 > suballocType2)
2268  {
2269  VMA_SWAP(suballocType1, suballocType2);
2270  }
2271 
2272  switch(suballocType1)
2273  {
2274  case VMA_SUBALLOCATION_TYPE_FREE:
2275  return false;
2276  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2277  return true;
2278  case VMA_SUBALLOCATION_TYPE_BUFFER:
2279  return
2280  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2281  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2282  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2283  return
2284  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2285  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2286  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2287  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2288  return
2289  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2290  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2291  return false;
2292  default:
2293  VMA_ASSERT(0);
2294  return true;
2295  }
2296 }
2297 
2298 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2299 struct VmaMutexLock
2300 {
2301 public:
2302  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2303  m_pMutex(useMutex ? &mutex : VMA_NULL)
2304  {
2305  if(m_pMutex)
2306  {
2307  m_pMutex->Lock();
2308  }
2309  }
2310 
2311  ~VmaMutexLock()
2312  {
2313  if(m_pMutex)
2314  {
2315  m_pMutex->Unlock();
2316  }
2317  }
2318 
2319 private:
2320  VMA_MUTEX* m_pMutex;
2321 };
2322 
2323 #if VMA_DEBUG_GLOBAL_MUTEX
2324  static VMA_MUTEX gDebugGlobalMutex;
2325  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2326 #else
2327  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2328 #endif
2329 
2330 // Minimum size of a free suballocation to register it in the free suballocation collection.
2331 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2332 
2333 /*
2334 Performs binary search and returns iterator to first element that is greater or
2335 equal to (key), according to comparison (cmp).
2336 
2337 Cmp should return true if first argument is less than second argument.
2338 
2339 Returned value is the found element, if present in the collection or place where
2340 new element with value (key) should be inserted.
2341 */
2342 template <typename IterT, typename KeyT, typename CmpT>
2343 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2344 {
2345  size_t down = 0, up = (end - beg);
2346  while(down < up)
2347  {
2348  const size_t mid = (down + up) / 2;
2349  if(cmp(*(beg+mid), key))
2350  {
2351  down = mid + 1;
2352  }
2353  else
2354  {
2355  up = mid;
2356  }
2357  }
2358  return beg + down;
2359 }
2360 
2362 // Memory allocation
2363 
2364 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2365 {
2366  if((pAllocationCallbacks != VMA_NULL) &&
2367  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2368  {
2369  return (*pAllocationCallbacks->pfnAllocation)(
2370  pAllocationCallbacks->pUserData,
2371  size,
2372  alignment,
2373  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2374  }
2375  else
2376  {
2377  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2378  }
2379 }
2380 
2381 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2382 {
2383  if((pAllocationCallbacks != VMA_NULL) &&
2384  (pAllocationCallbacks->pfnFree != VMA_NULL))
2385  {
2386  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2387  }
2388  else
2389  {
2390  VMA_SYSTEM_FREE(ptr);
2391  }
2392 }
2393 
2394 template<typename T>
2395 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2396 {
2397  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2398 }
2399 
2400 template<typename T>
2401 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2402 {
2403  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2404 }
2405 
2406 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2407 
2408 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2409 
2410 template<typename T>
2411 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2412 {
2413  ptr->~T();
2414  VmaFree(pAllocationCallbacks, ptr);
2415 }
2416 
2417 template<typename T>
2418 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2419 {
2420  if(ptr != VMA_NULL)
2421  {
2422  for(size_t i = count; i--; )
2423  {
2424  ptr[i].~T();
2425  }
2426  VmaFree(pAllocationCallbacks, ptr);
2427  }
2428 }
2429 
2430 // STL-compatible allocator.
2431 template<typename T>
2432 class VmaStlAllocator
2433 {
2434 public:
2435  const VkAllocationCallbacks* const m_pCallbacks;
2436  typedef T value_type;
2437 
2438  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2439  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2440 
2441  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2442  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2443 
2444  template<typename U>
2445  bool operator==(const VmaStlAllocator<U>& rhs) const
2446  {
2447  return m_pCallbacks == rhs.m_pCallbacks;
2448  }
2449  template<typename U>
2450  bool operator!=(const VmaStlAllocator<U>& rhs) const
2451  {
2452  return m_pCallbacks != rhs.m_pCallbacks;
2453  }
2454 
2455  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2456 };
2457 
2458 #if VMA_USE_STL_VECTOR
2459 
2460 #define VmaVector std::vector
2461 
2462 template<typename T, typename allocatorT>
2463 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2464 {
2465  vec.insert(vec.begin() + index, item);
2466 }
2467 
2468 template<typename T, typename allocatorT>
2469 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2470 {
2471  vec.erase(vec.begin() + index);
2472 }
2473 
2474 #else // #if VMA_USE_STL_VECTOR
2475 
2476 /* Class with interface compatible with subset of std::vector.
2477 T must be POD because constructors and destructors are not called and memcpy is
2478 used for these objects. */
2479 template<typename T, typename AllocatorT>
2480 class VmaVector
2481 {
2482 public:
2483  typedef T value_type;
2484 
2485  VmaVector(const AllocatorT& allocator) :
2486  m_Allocator(allocator),
2487  m_pArray(VMA_NULL),
2488  m_Count(0),
2489  m_Capacity(0)
2490  {
2491  }
2492 
2493  VmaVector(size_t count, const AllocatorT& allocator) :
2494  m_Allocator(allocator),
2495  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2496  m_Count(count),
2497  m_Capacity(count)
2498  {
2499  }
2500 
2501  VmaVector(const VmaVector<T, AllocatorT>& src) :
2502  m_Allocator(src.m_Allocator),
2503  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2504  m_Count(src.m_Count),
2505  m_Capacity(src.m_Count)
2506  {
2507  if(m_Count != 0)
2508  {
2509  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2510  }
2511  }
2512 
2513  ~VmaVector()
2514  {
2515  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2516  }
2517 
2518  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2519  {
2520  if(&rhs != this)
2521  {
2522  resize(rhs.m_Count);
2523  if(m_Count != 0)
2524  {
2525  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2526  }
2527  }
2528  return *this;
2529  }
2530 
2531  bool empty() const { return m_Count == 0; }
2532  size_t size() const { return m_Count; }
2533  T* data() { return m_pArray; }
2534  const T* data() const { return m_pArray; }
2535 
2536  T& operator[](size_t index)
2537  {
2538  VMA_HEAVY_ASSERT(index < m_Count);
2539  return m_pArray[index];
2540  }
2541  const T& operator[](size_t index) const
2542  {
2543  VMA_HEAVY_ASSERT(index < m_Count);
2544  return m_pArray[index];
2545  }
2546 
2547  T& front()
2548  {
2549  VMA_HEAVY_ASSERT(m_Count > 0);
2550  return m_pArray[0];
2551  }
2552  const T& front() const
2553  {
2554  VMA_HEAVY_ASSERT(m_Count > 0);
2555  return m_pArray[0];
2556  }
2557  T& back()
2558  {
2559  VMA_HEAVY_ASSERT(m_Count > 0);
2560  return m_pArray[m_Count - 1];
2561  }
2562  const T& back() const
2563  {
2564  VMA_HEAVY_ASSERT(m_Count > 0);
2565  return m_pArray[m_Count - 1];
2566  }
2567 
2568  void reserve(size_t newCapacity, bool freeMemory = false)
2569  {
2570  newCapacity = VMA_MAX(newCapacity, m_Count);
2571 
2572  if((newCapacity < m_Capacity) && !freeMemory)
2573  {
2574  newCapacity = m_Capacity;
2575  }
2576 
2577  if(newCapacity != m_Capacity)
2578  {
2579  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2580  if(m_Count != 0)
2581  {
2582  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2583  }
2584  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2585  m_Capacity = newCapacity;
2586  m_pArray = newArray;
2587  }
2588  }
2589 
2590  void resize(size_t newCount, bool freeMemory = false)
2591  {
2592  size_t newCapacity = m_Capacity;
2593  if(newCount > m_Capacity)
2594  {
2595  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2596  }
2597  else if(freeMemory)
2598  {
2599  newCapacity = newCount;
2600  }
2601 
2602  if(newCapacity != m_Capacity)
2603  {
2604  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2605  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2606  if(elementsToCopy != 0)
2607  {
2608  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2609  }
2610  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2611  m_Capacity = newCapacity;
2612  m_pArray = newArray;
2613  }
2614 
2615  m_Count = newCount;
2616  }
2617 
2618  void clear(bool freeMemory = false)
2619  {
2620  resize(0, freeMemory);
2621  }
2622 
2623  void insert(size_t index, const T& src)
2624  {
2625  VMA_HEAVY_ASSERT(index <= m_Count);
2626  const size_t oldCount = size();
2627  resize(oldCount + 1);
2628  if(index < oldCount)
2629  {
2630  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2631  }
2632  m_pArray[index] = src;
2633  }
2634 
2635  void remove(size_t index)
2636  {
2637  VMA_HEAVY_ASSERT(index < m_Count);
2638  const size_t oldCount = size();
2639  if(index < oldCount - 1)
2640  {
2641  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2642  }
2643  resize(oldCount - 1);
2644  }
2645 
2646  void push_back(const T& src)
2647  {
2648  const size_t newIndex = size();
2649  resize(newIndex + 1);
2650  m_pArray[newIndex] = src;
2651  }
2652 
2653  void pop_back()
2654  {
2655  VMA_HEAVY_ASSERT(m_Count > 0);
2656  resize(size() - 1);
2657  }
2658 
2659  void push_front(const T& src)
2660  {
2661  insert(0, src);
2662  }
2663 
2664  void pop_front()
2665  {
2666  VMA_HEAVY_ASSERT(m_Count > 0);
2667  remove(0);
2668  }
2669 
2670  typedef T* iterator;
2671 
2672  iterator begin() { return m_pArray; }
2673  iterator end() { return m_pArray + m_Count; }
2674 
2675 private:
2676  AllocatorT m_Allocator;
2677  T* m_pArray;
2678  size_t m_Count;
2679  size_t m_Capacity;
2680 };
2681 
2682 template<typename T, typename allocatorT>
2683 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2684 {
2685  vec.insert(index, item);
2686 }
2687 
2688 template<typename T, typename allocatorT>
2689 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2690 {
2691  vec.remove(index);
2692 }
2693 
2694 #endif // #if VMA_USE_STL_VECTOR
2695 
2696 template<typename CmpLess, typename VectorT>
2697 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2698 {
2699  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2700  vector.data(),
2701  vector.data() + vector.size(),
2702  value,
2703  CmpLess()) - vector.data();
2704  VmaVectorInsert(vector, indexToInsert, value);
2705  return indexToInsert;
2706 }
2707 
2708 template<typename CmpLess, typename VectorT>
2709 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2710 {
2711  CmpLess comparator;
2712  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2713  vector.begin(),
2714  vector.end(),
2715  value,
2716  comparator);
2717  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2718  {
2719  size_t indexToRemove = it - vector.begin();
2720  VmaVectorRemove(vector, indexToRemove);
2721  return true;
2722  }
2723  return false;
2724 }
2725 
2726 template<typename CmpLess, typename VectorT>
2727 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2728 {
2729  CmpLess comparator;
2730  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2731  vector.data(),
2732  vector.data() + vector.size(),
2733  value,
2734  comparator);
2735  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2736  {
2737  return it - vector.begin();
2738  }
2739  else
2740  {
2741  return vector.size();
2742  }
2743 }
2744 
2746 // class VmaPoolAllocator
2747 
2748 /*
2749 Allocator for objects of type T using a list of arrays (pools) to speed up
2750 allocation. Number of elements that can be allocated is not bounded because
2751 allocator can create multiple blocks.
2752 */
2753 template<typename T>
2754 class VmaPoolAllocator
2755 {
2756 public:
2757  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2758  ~VmaPoolAllocator();
2759  void Clear();
2760  T* Alloc();
2761  void Free(T* ptr);
2762 
2763 private:
2764  union Item
2765  {
2766  uint32_t NextFreeIndex;
2767  T Value;
2768  };
2769 
2770  struct ItemBlock
2771  {
2772  Item* pItems;
2773  uint32_t FirstFreeIndex;
2774  };
2775 
2776  const VkAllocationCallbacks* m_pAllocationCallbacks;
2777  size_t m_ItemsPerBlock;
2778  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2779 
2780  ItemBlock& CreateNewBlock();
2781 };
2782 
2783 template<typename T>
2784 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2785  m_pAllocationCallbacks(pAllocationCallbacks),
2786  m_ItemsPerBlock(itemsPerBlock),
2787  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2788 {
2789  VMA_ASSERT(itemsPerBlock > 0);
2790 }
2791 
2792 template<typename T>
2793 VmaPoolAllocator<T>::~VmaPoolAllocator()
2794 {
2795  Clear();
2796 }
2797 
2798 template<typename T>
2799 void VmaPoolAllocator<T>::Clear()
2800 {
2801  for(size_t i = m_ItemBlocks.size(); i--; )
2802  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2803  m_ItemBlocks.clear();
2804 }
2805 
2806 template<typename T>
2807 T* VmaPoolAllocator<T>::Alloc()
2808 {
2809  for(size_t i = m_ItemBlocks.size(); i--; )
2810  {
2811  ItemBlock& block = m_ItemBlocks[i];
2812  // This block has some free items: Use first one.
2813  if(block.FirstFreeIndex != UINT32_MAX)
2814  {
2815  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2816  block.FirstFreeIndex = pItem->NextFreeIndex;
2817  return &pItem->Value;
2818  }
2819  }
2820 
2821  // No block has free item: Create new one and use it.
2822  ItemBlock& newBlock = CreateNewBlock();
2823  Item* const pItem = &newBlock.pItems[0];
2824  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2825  return &pItem->Value;
2826 }
2827 
2828 template<typename T>
2829 void VmaPoolAllocator<T>::Free(T* ptr)
2830 {
2831  // Search all memory blocks to find ptr.
2832  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2833  {
2834  ItemBlock& block = m_ItemBlocks[i];
2835 
2836  // Casting to union.
2837  Item* pItemPtr;
2838  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2839 
2840  // Check if pItemPtr is in address range of this block.
2841  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2842  {
2843  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2844  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2845  block.FirstFreeIndex = index;
2846  return;
2847  }
2848  }
2849  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2850 }
2851 
2852 template<typename T>
2853 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2854 {
2855  ItemBlock newBlock = {
2856  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2857 
2858  m_ItemBlocks.push_back(newBlock);
2859 
2860  // Setup singly-linked list of all free items in this block.
2861  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2862  newBlock.pItems[i].NextFreeIndex = i + 1;
2863  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2864  return m_ItemBlocks.back();
2865 }
2866 
2868 // class VmaRawList, VmaList
2869 
2870 #if VMA_USE_STL_LIST
2871 
2872 #define VmaList std::list
2873 
2874 #else // #if VMA_USE_STL_LIST
2875 
2876 template<typename T>
2877 struct VmaListItem
2878 {
2879  VmaListItem* pPrev;
2880  VmaListItem* pNext;
2881  T Value;
2882 };
2883 
2884 // Doubly linked list.
2885 template<typename T>
2886 class VmaRawList
2887 {
2888 public:
2889  typedef VmaListItem<T> ItemType;
2890 
2891  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2892  ~VmaRawList();
2893  void Clear();
2894 
2895  size_t GetCount() const { return m_Count; }
2896  bool IsEmpty() const { return m_Count == 0; }
2897 
2898  ItemType* Front() { return m_pFront; }
2899  const ItemType* Front() const { return m_pFront; }
2900  ItemType* Back() { return m_pBack; }
2901  const ItemType* Back() const { return m_pBack; }
2902 
2903  ItemType* PushBack();
2904  ItemType* PushFront();
2905  ItemType* PushBack(const T& value);
2906  ItemType* PushFront(const T& value);
2907  void PopBack();
2908  void PopFront();
2909 
2910  // Item can be null - it means PushBack.
2911  ItemType* InsertBefore(ItemType* pItem);
2912  // Item can be null - it means PushFront.
2913  ItemType* InsertAfter(ItemType* pItem);
2914 
2915  ItemType* InsertBefore(ItemType* pItem, const T& value);
2916  ItemType* InsertAfter(ItemType* pItem, const T& value);
2917 
2918  void Remove(ItemType* pItem);
2919 
2920 private:
2921  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2922  VmaPoolAllocator<ItemType> m_ItemAllocator;
2923  ItemType* m_pFront;
2924  ItemType* m_pBack;
2925  size_t m_Count;
2926 
2927  // Declared not defined, to block copy constructor and assignment operator.
2928  VmaRawList(const VmaRawList<T>& src);
2929  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2930 };
2931 
2932 template<typename T>
2933 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2934  m_pAllocationCallbacks(pAllocationCallbacks),
2935  m_ItemAllocator(pAllocationCallbacks, 128),
2936  m_pFront(VMA_NULL),
2937  m_pBack(VMA_NULL),
2938  m_Count(0)
2939 {
2940 }
2941 
2942 template<typename T>
2943 VmaRawList<T>::~VmaRawList()
2944 {
2945  // Intentionally not calling Clear, because that would be unnecessary
2946  // computations to return all items to m_ItemAllocator as free.
2947 }
2948 
2949 template<typename T>
2950 void VmaRawList<T>::Clear()
2951 {
2952  if(IsEmpty() == false)
2953  {
2954  ItemType* pItem = m_pBack;
2955  while(pItem != VMA_NULL)
2956  {
2957  ItemType* const pPrevItem = pItem->pPrev;
2958  m_ItemAllocator.Free(pItem);
2959  pItem = pPrevItem;
2960  }
2961  m_pFront = VMA_NULL;
2962  m_pBack = VMA_NULL;
2963  m_Count = 0;
2964  }
2965 }
2966 
2967 template<typename T>
2968 VmaListItem<T>* VmaRawList<T>::PushBack()
2969 {
2970  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2971  pNewItem->pNext = VMA_NULL;
2972  if(IsEmpty())
2973  {
2974  pNewItem->pPrev = VMA_NULL;
2975  m_pFront = pNewItem;
2976  m_pBack = pNewItem;
2977  m_Count = 1;
2978  }
2979  else
2980  {
2981  pNewItem->pPrev = m_pBack;
2982  m_pBack->pNext = pNewItem;
2983  m_pBack = pNewItem;
2984  ++m_Count;
2985  }
2986  return pNewItem;
2987 }
2988 
2989 template<typename T>
2990 VmaListItem<T>* VmaRawList<T>::PushFront()
2991 {
2992  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2993  pNewItem->pPrev = VMA_NULL;
2994  if(IsEmpty())
2995  {
2996  pNewItem->pNext = VMA_NULL;
2997  m_pFront = pNewItem;
2998  m_pBack = pNewItem;
2999  m_Count = 1;
3000  }
3001  else
3002  {
3003  pNewItem->pNext = m_pFront;
3004  m_pFront->pPrev = pNewItem;
3005  m_pFront = pNewItem;
3006  ++m_Count;
3007  }
3008  return pNewItem;
3009 }
3010 
3011 template<typename T>
3012 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3013 {
3014  ItemType* const pNewItem = PushBack();
3015  pNewItem->Value = value;
3016  return pNewItem;
3017 }
3018 
3019 template<typename T>
3020 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3021 {
3022  ItemType* const pNewItem = PushFront();
3023  pNewItem->Value = value;
3024  return pNewItem;
3025 }
3026 
3027 template<typename T>
3028 void VmaRawList<T>::PopBack()
3029 {
3030  VMA_HEAVY_ASSERT(m_Count > 0);
3031  ItemType* const pBackItem = m_pBack;
3032  ItemType* const pPrevItem = pBackItem->pPrev;
3033  if(pPrevItem != VMA_NULL)
3034  {
3035  pPrevItem->pNext = VMA_NULL;
3036  }
3037  m_pBack = pPrevItem;
3038  m_ItemAllocator.Free(pBackItem);
3039  --m_Count;
3040 }
3041 
3042 template<typename T>
3043 void VmaRawList<T>::PopFront()
3044 {
3045  VMA_HEAVY_ASSERT(m_Count > 0);
3046  ItemType* const pFrontItem = m_pFront;
3047  ItemType* const pNextItem = pFrontItem->pNext;
3048  if(pNextItem != VMA_NULL)
3049  {
3050  pNextItem->pPrev = VMA_NULL;
3051  }
3052  m_pFront = pNextItem;
3053  m_ItemAllocator.Free(pFrontItem);
3054  --m_Count;
3055 }
3056 
3057 template<typename T>
3058 void VmaRawList<T>::Remove(ItemType* pItem)
3059 {
3060  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3061  VMA_HEAVY_ASSERT(m_Count > 0);
3062 
3063  if(pItem->pPrev != VMA_NULL)
3064  {
3065  pItem->pPrev->pNext = pItem->pNext;
3066  }
3067  else
3068  {
3069  VMA_HEAVY_ASSERT(m_pFront == pItem);
3070  m_pFront = pItem->pNext;
3071  }
3072 
3073  if(pItem->pNext != VMA_NULL)
3074  {
3075  pItem->pNext->pPrev = pItem->pPrev;
3076  }
3077  else
3078  {
3079  VMA_HEAVY_ASSERT(m_pBack == pItem);
3080  m_pBack = pItem->pPrev;
3081  }
3082 
3083  m_ItemAllocator.Free(pItem);
3084  --m_Count;
3085 }
3086 
3087 template<typename T>
3088 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3089 {
3090  if(pItem != VMA_NULL)
3091  {
3092  ItemType* const prevItem = pItem->pPrev;
3093  ItemType* const newItem = m_ItemAllocator.Alloc();
3094  newItem->pPrev = prevItem;
3095  newItem->pNext = pItem;
3096  pItem->pPrev = newItem;
3097  if(prevItem != VMA_NULL)
3098  {
3099  prevItem->pNext = newItem;
3100  }
3101  else
3102  {
3103  VMA_HEAVY_ASSERT(m_pFront == pItem);
3104  m_pFront = newItem;
3105  }
3106  ++m_Count;
3107  return newItem;
3108  }
3109  else
3110  return PushBack();
3111 }
3112 
3113 template<typename T>
3114 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3115 {
3116  if(pItem != VMA_NULL)
3117  {
3118  ItemType* const nextItem = pItem->pNext;
3119  ItemType* const newItem = m_ItemAllocator.Alloc();
3120  newItem->pNext = nextItem;
3121  newItem->pPrev = pItem;
3122  pItem->pNext = newItem;
3123  if(nextItem != VMA_NULL)
3124  {
3125  nextItem->pPrev = newItem;
3126  }
3127  else
3128  {
3129  VMA_HEAVY_ASSERT(m_pBack == pItem);
3130  m_pBack = newItem;
3131  }
3132  ++m_Count;
3133  return newItem;
3134  }
3135  else
3136  return PushFront();
3137 }
3138 
3139 template<typename T>
3140 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3141 {
3142  ItemType* const newItem = InsertBefore(pItem);
3143  newItem->Value = value;
3144  return newItem;
3145 }
3146 
3147 template<typename T>
3148 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3149 {
3150  ItemType* const newItem = InsertAfter(pItem);
3151  newItem->Value = value;
3152  return newItem;
3153 }
3154 
3155 template<typename T, typename AllocatorT>
3156 class VmaList
3157 {
3158 public:
3159  class iterator
3160  {
3161  public:
3162  iterator() :
3163  m_pList(VMA_NULL),
3164  m_pItem(VMA_NULL)
3165  {
3166  }
3167 
3168  T& operator*() const
3169  {
3170  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3171  return m_pItem->Value;
3172  }
3173  T* operator->() const
3174  {
3175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3176  return &m_pItem->Value;
3177  }
3178 
3179  iterator& operator++()
3180  {
3181  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3182  m_pItem = m_pItem->pNext;
3183  return *this;
3184  }
3185  iterator& operator--()
3186  {
3187  if(m_pItem != VMA_NULL)
3188  {
3189  m_pItem = m_pItem->pPrev;
3190  }
3191  else
3192  {
3193  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3194  m_pItem = m_pList->Back();
3195  }
3196  return *this;
3197  }
3198 
3199  iterator operator++(int)
3200  {
3201  iterator result = *this;
3202  ++*this;
3203  return result;
3204  }
3205  iterator operator--(int)
3206  {
3207  iterator result = *this;
3208  --*this;
3209  return result;
3210  }
3211 
3212  bool operator==(const iterator& rhs) const
3213  {
3214  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3215  return m_pItem == rhs.m_pItem;
3216  }
3217  bool operator!=(const iterator& rhs) const
3218  {
3219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3220  return m_pItem != rhs.m_pItem;
3221  }
3222 
3223  private:
3224  VmaRawList<T>* m_pList;
3225  VmaListItem<T>* m_pItem;
3226 
3227  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3228  m_pList(pList),
3229  m_pItem(pItem)
3230  {
3231  }
3232 
3233  friend class VmaList<T, AllocatorT>;
3234  };
3235 
3236  class const_iterator
3237  {
3238  public:
3239  const_iterator() :
3240  m_pList(VMA_NULL),
3241  m_pItem(VMA_NULL)
3242  {
3243  }
3244 
3245  const_iterator(const iterator& src) :
3246  m_pList(src.m_pList),
3247  m_pItem(src.m_pItem)
3248  {
3249  }
3250 
3251  const T& operator*() const
3252  {
3253  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3254  return m_pItem->Value;
3255  }
3256  const T* operator->() const
3257  {
3258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3259  return &m_pItem->Value;
3260  }
3261 
3262  const_iterator& operator++()
3263  {
3264  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3265  m_pItem = m_pItem->pNext;
3266  return *this;
3267  }
3268  const_iterator& operator--()
3269  {
3270  if(m_pItem != VMA_NULL)
3271  {
3272  m_pItem = m_pItem->pPrev;
3273  }
3274  else
3275  {
3276  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3277  m_pItem = m_pList->Back();
3278  }
3279  return *this;
3280  }
3281 
3282  const_iterator operator++(int)
3283  {
3284  const_iterator result = *this;
3285  ++*this;
3286  return result;
3287  }
3288  const_iterator operator--(int)
3289  {
3290  const_iterator result = *this;
3291  --*this;
3292  return result;
3293  }
3294 
3295  bool operator==(const const_iterator& rhs) const
3296  {
3297  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3298  return m_pItem == rhs.m_pItem;
3299  }
3300  bool operator!=(const const_iterator& rhs) const
3301  {
3302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3303  return m_pItem != rhs.m_pItem;
3304  }
3305 
3306  private:
3307  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3308  m_pList(pList),
3309  m_pItem(pItem)
3310  {
3311  }
3312 
3313  const VmaRawList<T>* m_pList;
3314  const VmaListItem<T>* m_pItem;
3315 
3316  friend class VmaList<T, AllocatorT>;
3317  };
3318 
3319  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3320 
3321  bool empty() const { return m_RawList.IsEmpty(); }
3322  size_t size() const { return m_RawList.GetCount(); }
3323 
3324  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3325  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3326 
3327  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3328  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3329 
3330  void clear() { m_RawList.Clear(); }
3331  void push_back(const T& value) { m_RawList.PushBack(value); }
3332  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3333  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3334 
3335 private:
3336  VmaRawList<T> m_RawList;
3337 };
3338 
3339 #endif // #if VMA_USE_STL_LIST
3340 
3342 // class VmaMap
3343 
3344 // Unused in this version.
3345 #if 0
3346 
3347 #if VMA_USE_STL_UNORDERED_MAP
3348 
3349 #define VmaPair std::pair
3350 
3351 #define VMA_MAP_TYPE(KeyT, ValueT) \
3352  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3353 
3354 #else // #if VMA_USE_STL_UNORDERED_MAP
3355 
3356 template<typename T1, typename T2>
3357 struct VmaPair
3358 {
3359  T1 first;
3360  T2 second;
3361 
3362  VmaPair() : first(), second() { }
3363  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3364 };
3365 
3366 /* Class compatible with subset of interface of std::unordered_map.
3367 KeyT, ValueT must be POD because they will be stored in VmaVector.
3368 */
3369 template<typename KeyT, typename ValueT>
3370 class VmaMap
3371 {
3372 public:
3373  typedef VmaPair<KeyT, ValueT> PairType;
3374  typedef PairType* iterator;
3375 
3376  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3377 
3378  iterator begin() { return m_Vector.begin(); }
3379  iterator end() { return m_Vector.end(); }
3380 
3381  void insert(const PairType& pair);
3382  iterator find(const KeyT& key);
3383  void erase(iterator it);
3384 
3385 private:
3386  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3387 };
3388 
3389 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3390 
3391 template<typename FirstT, typename SecondT>
3392 struct VmaPairFirstLess
3393 {
3394  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3395  {
3396  return lhs.first < rhs.first;
3397  }
3398  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3399  {
3400  return lhs.first < rhsFirst;
3401  }
3402 };
3403 
3404 template<typename KeyT, typename ValueT>
3405 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3406 {
3407  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3408  m_Vector.data(),
3409  m_Vector.data() + m_Vector.size(),
3410  pair,
3411  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3412  VmaVectorInsert(m_Vector, indexToInsert, pair);
3413 }
3414 
3415 template<typename KeyT, typename ValueT>
3416 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3417 {
3418  PairType* it = VmaBinaryFindFirstNotLess(
3419  m_Vector.data(),
3420  m_Vector.data() + m_Vector.size(),
3421  key,
3422  VmaPairFirstLess<KeyT, ValueT>());
3423  if((it != m_Vector.end()) && (it->first == key))
3424  {
3425  return it;
3426  }
3427  else
3428  {
3429  return m_Vector.end();
3430  }
3431 }
3432 
3433 template<typename KeyT, typename ValueT>
3434 void VmaMap<KeyT, ValueT>::erase(iterator it)
3435 {
3436  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3437 }
3438 
3439 #endif // #if VMA_USE_STL_UNORDERED_MAP
3440 
3441 #endif // #if 0
3442 
3444 
3445 class VmaDeviceMemoryBlock;
3446 
3447 struct VmaAllocation_T
3448 {
3449 private:
3450  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3451 
3452  enum FLAGS
3453  {
3454  FLAG_USER_DATA_STRING = 0x01,
3455  };
3456 
3457 public:
3458  enum ALLOCATION_TYPE
3459  {
3460  ALLOCATION_TYPE_NONE,
3461  ALLOCATION_TYPE_BLOCK,
3462  ALLOCATION_TYPE_DEDICATED,
3463  };
3464 
3465  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3466  m_Alignment(1),
3467  m_Size(0),
3468  m_pUserData(VMA_NULL),
3469  m_LastUseFrameIndex(currentFrameIndex),
3470  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3471  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3472  m_MapCount(0),
3473  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3474  {
3475  }
3476 
3477  ~VmaAllocation_T()
3478  {
3479  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3480 
3481  // Check if owned string was freed.
3482  VMA_ASSERT(m_pUserData == VMA_NULL);
3483  }
3484 
3485  void InitBlockAllocation(
3486  VmaPool hPool,
3487  VmaDeviceMemoryBlock* block,
3488  VkDeviceSize offset,
3489  VkDeviceSize alignment,
3490  VkDeviceSize size,
3491  VmaSuballocationType suballocationType,
3492  bool mapped,
3493  bool canBecomeLost)
3494  {
3495  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3496  VMA_ASSERT(block != VMA_NULL);
3497  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3498  m_Alignment = alignment;
3499  m_Size = size;
3500  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3501  m_SuballocationType = (uint8_t)suballocationType;
3502  m_BlockAllocation.m_hPool = hPool;
3503  m_BlockAllocation.m_Block = block;
3504  m_BlockAllocation.m_Offset = offset;
3505  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3506  }
3507 
3508  void InitLost()
3509  {
3510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3511  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3512  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3513  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3514  m_BlockAllocation.m_Block = VMA_NULL;
3515  m_BlockAllocation.m_Offset = 0;
3516  m_BlockAllocation.m_CanBecomeLost = true;
3517  }
3518 
3519  void ChangeBlockAllocation(
3520  VmaAllocator hAllocator,
3521  VmaDeviceMemoryBlock* block,
3522  VkDeviceSize offset);
3523 
3524  // pMappedData not null means allocation is created with MAPPED flag.
3525  void InitDedicatedAllocation(
3526  uint32_t memoryTypeIndex,
3527  VkDeviceMemory hMemory,
3528  VmaSuballocationType suballocationType,
3529  void* pMappedData,
3530  VkDeviceSize size)
3531  {
3532  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3533  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3534  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3535  m_Alignment = 0;
3536  m_Size = size;
3537  m_SuballocationType = (uint8_t)suballocationType;
3538  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3539  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3540  m_DedicatedAllocation.m_hMemory = hMemory;
3541  m_DedicatedAllocation.m_pMappedData = pMappedData;
3542  }
3543 
3544  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3545  VkDeviceSize GetAlignment() const { return m_Alignment; }
3546  VkDeviceSize GetSize() const { return m_Size; }
3547  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3548  void* GetUserData() const { return m_pUserData; }
3549  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3550  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3551 
3552  VmaDeviceMemoryBlock* GetBlock() const
3553  {
3554  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3555  return m_BlockAllocation.m_Block;
3556  }
3557  VkDeviceSize GetOffset() const;
3558  VkDeviceMemory GetMemory() const;
3559  uint32_t GetMemoryTypeIndex() const;
3560  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3561  void* GetMappedData() const;
3562  bool CanBecomeLost() const;
3563  VmaPool GetPool() const;
3564 
3565  uint32_t GetLastUseFrameIndex() const
3566  {
3567  return m_LastUseFrameIndex.load();
3568  }
3569  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3570  {
3571  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3572  }
3573  /*
3574  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3575  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3576  - Else, returns false.
3577 
3578  If hAllocation is already lost, assert - you should not call it then.
3579  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3580  */
3581  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3582 
3583  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3584  {
3585  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3586  outInfo.blockCount = 1;
3587  outInfo.allocationCount = 1;
3588  outInfo.unusedRangeCount = 0;
3589  outInfo.usedBytes = m_Size;
3590  outInfo.unusedBytes = 0;
3591  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3592  outInfo.unusedRangeSizeMin = UINT64_MAX;
3593  outInfo.unusedRangeSizeMax = 0;
3594  }
3595 
3596  void BlockAllocMap();
3597  void BlockAllocUnmap();
3598  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3599  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3600 
3601 private:
3602  VkDeviceSize m_Alignment;
3603  VkDeviceSize m_Size;
3604  void* m_pUserData;
3605  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3606  uint8_t m_Type; // ALLOCATION_TYPE
3607  uint8_t m_SuballocationType; // VmaSuballocationType
3608  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3609  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3610  uint8_t m_MapCount;
3611  uint8_t m_Flags; // enum FLAGS
3612 
3613  // Allocation out of VmaDeviceMemoryBlock.
3614  struct BlockAllocation
3615  {
3616  VmaPool m_hPool; // Null if belongs to general memory.
3617  VmaDeviceMemoryBlock* m_Block;
3618  VkDeviceSize m_Offset;
3619  bool m_CanBecomeLost;
3620  };
3621 
3622  // Allocation for an object that has its own private VkDeviceMemory.
3623  struct DedicatedAllocation
3624  {
3625  uint32_t m_MemoryTypeIndex;
3626  VkDeviceMemory m_hMemory;
3627  void* m_pMappedData; // Not null means memory is mapped.
3628  };
3629 
3630  union
3631  {
3632  // Allocation out of VmaDeviceMemoryBlock.
3633  BlockAllocation m_BlockAllocation;
3634  // Allocation for an object that has its own private VkDeviceMemory.
3635  DedicatedAllocation m_DedicatedAllocation;
3636  };
3637 
3638  void FreeUserDataString(VmaAllocator hAllocator);
3639 };
3640 
3641 /*
3642 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3643 allocated memory block or free.
3644 */
3645 struct VmaSuballocation
3646 {
3647  VkDeviceSize offset;
3648  VkDeviceSize size;
3649  VmaAllocation hAllocation;
3650  VmaSuballocationType type;
3651 };
3652 
3653 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3654 
3655 // Cost of one additional allocation lost, as equivalent in bytes.
3656 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3657 
3658 /*
3659 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3660 
3661 If canMakeOtherLost was false:
3662 - item points to a FREE suballocation.
3663 - itemsToMakeLostCount is 0.
3664 
3665 If canMakeOtherLost was true:
3666 - item points to first of sequence of suballocations, which are either FREE,
3667  or point to VmaAllocations that can become lost.
3668 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3669  the requested allocation to succeed.
3670 */
3671 struct VmaAllocationRequest
3672 {
3673  VkDeviceSize offset;
3674  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3675  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3676  VmaSuballocationList::iterator item;
3677  size_t itemsToMakeLostCount;
3678 
3679  VkDeviceSize CalcCost() const
3680  {
3681  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3682  }
3683 };
3684 
3685 /*
3686 Data structure used for bookkeeping of allocations and unused ranges of memory
3687 in a single VkDeviceMemory block.
3688 */
3689 class VmaBlockMetadata
3690 {
3691 public:
3692  VmaBlockMetadata(VmaAllocator hAllocator);
3693  ~VmaBlockMetadata();
3694  void Init(VkDeviceSize size);
3695 
3696  // Validates all data structures inside this object. If not valid, returns false.
3697  bool Validate() const;
3698  VkDeviceSize GetSize() const { return m_Size; }
3699  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3700  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3701  VkDeviceSize GetUnusedRangeSizeMax() const;
3702  // Returns true if this block is empty - contains only single free suballocation.
3703  bool IsEmpty() const;
3704 
3705  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3706  void AddPoolStats(VmaPoolStats& inoutStats) const;
3707 
3708 #if VMA_STATS_STRING_ENABLED
3709  void PrintDetailedMap(class VmaJsonWriter& json) const;
3710 #endif
3711 
3712  // Creates trivial request for case when block is empty.
3713  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3714 
3715  // Tries to find a place for suballocation with given parameters inside this block.
3716  // If succeeded, fills pAllocationRequest and returns true.
3717  // If failed, returns false.
3718  bool CreateAllocationRequest(
3719  uint32_t currentFrameIndex,
3720  uint32_t frameInUseCount,
3721  VkDeviceSize bufferImageGranularity,
3722  VkDeviceSize allocSize,
3723  VkDeviceSize allocAlignment,
3724  VmaSuballocationType allocType,
3725  bool canMakeOtherLost,
3726  VmaAllocationRequest* pAllocationRequest);
3727 
3728  bool MakeRequestedAllocationsLost(
3729  uint32_t currentFrameIndex,
3730  uint32_t frameInUseCount,
3731  VmaAllocationRequest* pAllocationRequest);
3732 
3733  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3734 
3735  // Makes actual allocation based on request. Request must already be checked and valid.
3736  void Alloc(
3737  const VmaAllocationRequest& request,
3738  VmaSuballocationType type,
3739  VkDeviceSize allocSize,
3740  VmaAllocation hAllocation);
3741 
3742  // Frees suballocation assigned to given memory region.
3743  void Free(const VmaAllocation allocation);
3744  void FreeAtOffset(VkDeviceSize offset);
3745 
3746 private:
3747  VkDeviceSize m_Size;
3748  uint32_t m_FreeCount;
3749  VkDeviceSize m_SumFreeSize;
3750  VmaSuballocationList m_Suballocations;
3751  // Suballocations that are free and have size greater than certain threshold.
3752  // Sorted by size, ascending.
3753  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3754 
3755  bool ValidateFreeSuballocationList() const;
3756 
3757  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3758  // If yes, fills pOffset and returns true. If no, returns false.
3759  bool CheckAllocation(
3760  uint32_t currentFrameIndex,
3761  uint32_t frameInUseCount,
3762  VkDeviceSize bufferImageGranularity,
3763  VkDeviceSize allocSize,
3764  VkDeviceSize allocAlignment,
3765  VmaSuballocationType allocType,
3766  VmaSuballocationList::const_iterator suballocItem,
3767  bool canMakeOtherLost,
3768  VkDeviceSize* pOffset,
3769  size_t* itemsToMakeLostCount,
3770  VkDeviceSize* pSumFreeSize,
3771  VkDeviceSize* pSumItemSize) const;
3772  // Given free suballocation, it merges it with following one, which must also be free.
3773  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3774  // Releases given suballocation, making it free.
3775  // Merges it with adjacent free suballocations if applicable.
3776  // Returns iterator to new free suballocation at this place.
3777  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3778  // Given free suballocation, it inserts it into sorted list of
3779  // m_FreeSuballocationsBySize if it's suitable.
3780  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3781  // Given free suballocation, it removes it from sorted list of
3782  // m_FreeSuballocationsBySize if it's suitable.
3783  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3784 };
3785 
3786 // Helper class that represents mapped memory. Synchronized internally.
3787 class VmaDeviceMemoryMapping
3788 {
3789 public:
3790  VmaDeviceMemoryMapping();
3791  ~VmaDeviceMemoryMapping();
3792 
3793  void* GetMappedData() const { return m_pMappedData; }
3794 
3795  // ppData can be null.
3796  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3797  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3798 
3799 private:
3800  VMA_MUTEX m_Mutex;
3801  uint32_t m_MapCount;
3802  void* m_pMappedData;
3803 };
3804 
3805 /*
3806 Represents a single block of device memory (`VkDeviceMemory`) with all the
3807 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3808 
3809 Thread-safety: This class must be externally synchronized.
3810 */
3811 class VmaDeviceMemoryBlock
3812 {
3813 public:
3814  uint32_t m_MemoryTypeIndex;
3815  VkDeviceMemory m_hMemory;
3816  VmaDeviceMemoryMapping m_Mapping;
3817  VmaBlockMetadata m_Metadata;
3818 
3819  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3820 
3821  ~VmaDeviceMemoryBlock()
3822  {
3823  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3824  }
3825 
3826  // Always call after construction.
3827  void Init(
3828  uint32_t newMemoryTypeIndex,
3829  VkDeviceMemory newMemory,
3830  VkDeviceSize newSize);
3831  // Always call before destruction.
3832  void Destroy(VmaAllocator allocator);
3833 
3834  // Validates all data structures inside this object. If not valid, returns false.
3835  bool Validate() const;
3836 
3837  // ppData can be null.
3838  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3839  void Unmap(VmaAllocator hAllocator, uint32_t count);
3840 };
3841 
3842 struct VmaPointerLess
3843 {
3844  bool operator()(const void* lhs, const void* rhs) const
3845  {
3846  return lhs < rhs;
3847  }
3848 };
3849 
3850 class VmaDefragmentator;
3851 
3852 /*
3853 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3854 Vulkan memory type.
3855 
3856 Synchronized internally with a mutex.
3857 */
3858 struct VmaBlockVector
3859 {
3860  VmaBlockVector(
3861  VmaAllocator hAllocator,
3862  uint32_t memoryTypeIndex,
3863  VkDeviceSize preferredBlockSize,
3864  size_t minBlockCount,
3865  size_t maxBlockCount,
3866  VkDeviceSize bufferImageGranularity,
3867  uint32_t frameInUseCount,
3868  bool isCustomPool);
3869  ~VmaBlockVector();
3870 
3871  VkResult CreateMinBlocks();
3872 
3873  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3874  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3875  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3876  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3877 
3878  void GetPoolStats(VmaPoolStats* pStats);
3879 
3880  bool IsEmpty() const { return m_Blocks.empty(); }
3881 
3882  VkResult Allocate(
3883  VmaPool hCurrentPool,
3884  uint32_t currentFrameIndex,
3885  const VkMemoryRequirements& vkMemReq,
3886  const VmaAllocationCreateInfo& createInfo,
3887  VmaSuballocationType suballocType,
3888  VmaAllocation* pAllocation);
3889 
3890  void Free(
3891  VmaAllocation hAllocation);
3892 
3893  // Adds statistics of this BlockVector to pStats.
3894  void AddStats(VmaStats* pStats);
3895 
3896 #if VMA_STATS_STRING_ENABLED
3897  void PrintDetailedMap(class VmaJsonWriter& json);
3898 #endif
3899 
3900  void MakePoolAllocationsLost(
3901  uint32_t currentFrameIndex,
3902  size_t* pLostAllocationCount);
3903 
3904  VmaDefragmentator* EnsureDefragmentator(
3905  VmaAllocator hAllocator,
3906  uint32_t currentFrameIndex);
3907 
3908  VkResult Defragment(
3909  VmaDefragmentationStats* pDefragmentationStats,
3910  VkDeviceSize& maxBytesToMove,
3911  uint32_t& maxAllocationsToMove);
3912 
3913  void DestroyDefragmentator();
3914 
3915 private:
3916  friend class VmaDefragmentator;
3917 
3918  const VmaAllocator m_hAllocator;
3919  const uint32_t m_MemoryTypeIndex;
3920  const VkDeviceSize m_PreferredBlockSize;
3921  const size_t m_MinBlockCount;
3922  const size_t m_MaxBlockCount;
3923  const VkDeviceSize m_BufferImageGranularity;
3924  const uint32_t m_FrameInUseCount;
3925  const bool m_IsCustomPool;
3926  VMA_MUTEX m_Mutex;
3927  // Incrementally sorted by sumFreeSize, ascending.
3928  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3929  /* There can be at most one allocation that is completely empty - a
3930  hysteresis to avoid pessimistic case of alternating creation and destruction
3931  of a VkDeviceMemory. */
3932  bool m_HasEmptyBlock;
3933  VmaDefragmentator* m_pDefragmentator;
3934 
3935  size_t CalcMaxBlockSize() const;
3936 
3937  // Finds and removes given block from vector.
3938  void Remove(VmaDeviceMemoryBlock* pBlock);
3939 
3940  // Performs single step in sorting m_Blocks. They may not be fully sorted
3941  // after this call.
3942  void IncrementallySortBlocks();
3943 
3944  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3945 };
3946 
3947 struct VmaPool_T
3948 {
3949 public:
3950  VmaBlockVector m_BlockVector;
3951 
3952  // Takes ownership.
3953  VmaPool_T(
3954  VmaAllocator hAllocator,
3955  const VmaPoolCreateInfo& createInfo);
3956  ~VmaPool_T();
3957 
3958  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3959 
3960 #if VMA_STATS_STRING_ENABLED
3961  //void PrintDetailedMap(class VmaStringBuilder& sb);
3962 #endif
3963 };
3964 
3965 class VmaDefragmentator
3966 {
3967  const VmaAllocator m_hAllocator;
3968  VmaBlockVector* const m_pBlockVector;
3969  uint32_t m_CurrentFrameIndex;
3970  VkDeviceSize m_BytesMoved;
3971  uint32_t m_AllocationsMoved;
3972 
3973  struct AllocationInfo
3974  {
3975  VmaAllocation m_hAllocation;
3976  VkBool32* m_pChanged;
3977 
3978  AllocationInfo() :
3979  m_hAllocation(VK_NULL_HANDLE),
3980  m_pChanged(VMA_NULL)
3981  {
3982  }
3983  };
3984 
3985  struct AllocationInfoSizeGreater
3986  {
3987  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3988  {
3989  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3990  }
3991  };
3992 
3993  // Used between AddAllocation and Defragment.
3994  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3995 
3996  struct BlockInfo
3997  {
3998  VmaDeviceMemoryBlock* m_pBlock;
3999  bool m_HasNonMovableAllocations;
4000  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4001 
4002  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4003  m_pBlock(VMA_NULL),
4004  m_HasNonMovableAllocations(true),
4005  m_Allocations(pAllocationCallbacks),
4006  m_pMappedDataForDefragmentation(VMA_NULL)
4007  {
4008  }
4009 
4010  void CalcHasNonMovableAllocations()
4011  {
4012  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4013  const size_t defragmentAllocCount = m_Allocations.size();
4014  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4015  }
4016 
4017  void SortAllocationsBySizeDescecnding()
4018  {
4019  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4020  }
4021 
4022  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4023  void Unmap(VmaAllocator hAllocator);
4024 
4025  private:
4026  // Not null if mapped for defragmentation only, not originally mapped.
4027  void* m_pMappedDataForDefragmentation;
4028  };
4029 
4030  struct BlockPointerLess
4031  {
4032  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4033  {
4034  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4035  }
4036  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4037  {
4038  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4039  }
4040  };
4041 
4042  // 1. Blocks with some non-movable allocations go first.
4043  // 2. Blocks with smaller sumFreeSize go first.
4044  struct BlockInfoCompareMoveDestination
4045  {
4046  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4047  {
4048  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4049  {
4050  return true;
4051  }
4052  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4053  {
4054  return false;
4055  }
4056  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4057  {
4058  return true;
4059  }
4060  return false;
4061  }
4062  };
4063 
4064  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4065  BlockInfoVector m_Blocks;
4066 
4067  VkResult DefragmentRound(
4068  VkDeviceSize maxBytesToMove,
4069  uint32_t maxAllocationsToMove);
4070 
4071  static bool MoveMakesSense(
4072  size_t dstBlockIndex, VkDeviceSize dstOffset,
4073  size_t srcBlockIndex, VkDeviceSize srcOffset);
4074 
4075 public:
4076  VmaDefragmentator(
4077  VmaAllocator hAllocator,
4078  VmaBlockVector* pBlockVector,
4079  uint32_t currentFrameIndex);
4080 
4081  ~VmaDefragmentator();
4082 
4083  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4084  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4085 
4086  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4087 
4088  VkResult Defragment(
4089  VkDeviceSize maxBytesToMove,
4090  uint32_t maxAllocationsToMove);
4091 };
4092 
4093 // Main allocator object.
4094 struct VmaAllocator_T
4095 {
4096  bool m_UseMutex;
4097  bool m_UseKhrDedicatedAllocation;
4098  VkDevice m_hDevice;
4099  bool m_AllocationCallbacksSpecified;
4100  VkAllocationCallbacks m_AllocationCallbacks;
4101  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4102 
4103  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4104  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4105  VMA_MUTEX m_HeapSizeLimitMutex;
4106 
4107  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4108  VkPhysicalDeviceMemoryProperties m_MemProps;
4109 
4110  // Default pools.
4111  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4112 
4113  // Each vector is sorted by memory (handle value).
4114  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4115  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4116  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4117 
4118  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4119  ~VmaAllocator_T();
4120 
4121  const VkAllocationCallbacks* GetAllocationCallbacks() const
4122  {
4123  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4124  }
4125  const VmaVulkanFunctions& GetVulkanFunctions() const
4126  {
4127  return m_VulkanFunctions;
4128  }
4129 
4130  VkDeviceSize GetBufferImageGranularity() const
4131  {
4132  return VMA_MAX(
4133  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4134  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4135  }
4136 
4137  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4138  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4139 
4140  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4141  {
4142  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4143  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4144  }
4145 
4146  void GetBufferMemoryRequirements(
4147  VkBuffer hBuffer,
4148  VkMemoryRequirements& memReq,
4149  bool& requiresDedicatedAllocation,
4150  bool& prefersDedicatedAllocation) const;
4151  void GetImageMemoryRequirements(
4152  VkImage hImage,
4153  VkMemoryRequirements& memReq,
4154  bool& requiresDedicatedAllocation,
4155  bool& prefersDedicatedAllocation) const;
4156 
4157  // Main allocation function.
4158  VkResult AllocateMemory(
4159  const VkMemoryRequirements& vkMemReq,
4160  bool requiresDedicatedAllocation,
4161  bool prefersDedicatedAllocation,
4162  VkBuffer dedicatedBuffer,
4163  VkImage dedicatedImage,
4164  const VmaAllocationCreateInfo& createInfo,
4165  VmaSuballocationType suballocType,
4166  VmaAllocation* pAllocation);
4167 
4168  // Main deallocation function.
4169  void FreeMemory(const VmaAllocation allocation);
4170 
4171  void CalculateStats(VmaStats* pStats);
4172 
4173 #if VMA_STATS_STRING_ENABLED
4174  void PrintDetailedMap(class VmaJsonWriter& json);
4175 #endif
4176 
4177  VkResult Defragment(
4178  VmaAllocation* pAllocations,
4179  size_t allocationCount,
4180  VkBool32* pAllocationsChanged,
4181  const VmaDefragmentationInfo* pDefragmentationInfo,
4182  VmaDefragmentationStats* pDefragmentationStats);
4183 
4184  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4185  bool TouchAllocation(VmaAllocation hAllocation);
4186 
4187  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4188  void DestroyPool(VmaPool pool);
4189  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4190 
4191  void SetCurrentFrameIndex(uint32_t frameIndex);
4192 
4193  void MakePoolAllocationsLost(
4194  VmaPool hPool,
4195  size_t* pLostAllocationCount);
4196 
4197  void CreateLostAllocation(VmaAllocation* pAllocation);
4198 
4199  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4200  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4201 
4202  VkResult Map(VmaAllocation hAllocation, void** ppData);
4203  void Unmap(VmaAllocation hAllocation);
4204 
4205 private:
4206  VkDeviceSize m_PreferredLargeHeapBlockSize;
4207 
4208  VkPhysicalDevice m_PhysicalDevice;
4209  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4210 
4211  VMA_MUTEX m_PoolsMutex;
4212  // Protected by m_PoolsMutex. Sorted by pointer value.
4213  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4214 
4215  VmaVulkanFunctions m_VulkanFunctions;
4216 
4217  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4218 
4219  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4220 
4221  VkResult AllocateMemoryOfType(
4222  const VkMemoryRequirements& vkMemReq,
4223  bool dedicatedAllocation,
4224  VkBuffer dedicatedBuffer,
4225  VkImage dedicatedImage,
4226  const VmaAllocationCreateInfo& createInfo,
4227  uint32_t memTypeIndex,
4228  VmaSuballocationType suballocType,
4229  VmaAllocation* pAllocation);
4230 
4231  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4232  VkResult AllocateDedicatedMemory(
4233  VkDeviceSize size,
4234  VmaSuballocationType suballocType,
4235  uint32_t memTypeIndex,
4236  bool map,
4237  bool isUserDataString,
4238  void* pUserData,
4239  VkBuffer dedicatedBuffer,
4240  VkImage dedicatedImage,
4241  VmaAllocation* pAllocation);
4242 
4243  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4244  void FreeDedicatedMemory(VmaAllocation allocation);
4245 };
4246 
4248 // Memory allocation #2 after VmaAllocator_T definition
4249 
4250 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4251 {
4252  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4253 }
4254 
4255 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4256 {
4257  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4258 }
4259 
4260 template<typename T>
4261 static T* VmaAllocate(VmaAllocator hAllocator)
4262 {
4263  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4264 }
4265 
4266 template<typename T>
4267 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4268 {
4269  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4270 }
4271 
4272 template<typename T>
4273 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4274 {
4275  if(ptr != VMA_NULL)
4276  {
4277  ptr->~T();
4278  VmaFree(hAllocator, ptr);
4279  }
4280 }
4281 
4282 template<typename T>
4283 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4284 {
4285  if(ptr != VMA_NULL)
4286  {
4287  for(size_t i = count; i--; )
4288  ptr[i].~T();
4289  VmaFree(hAllocator, ptr);
4290  }
4291 }
4292 
4294 // VmaStringBuilder
4295 
4296 #if VMA_STATS_STRING_ENABLED
4297 
4298 class VmaStringBuilder
4299 {
4300 public:
4301  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4302  size_t GetLength() const { return m_Data.size(); }
4303  const char* GetData() const { return m_Data.data(); }
4304 
4305  void Add(char ch) { m_Data.push_back(ch); }
4306  void Add(const char* pStr);
4307  void AddNewLine() { Add('\n'); }
4308  void AddNumber(uint32_t num);
4309  void AddNumber(uint64_t num);
4310  void AddPointer(const void* ptr);
4311 
4312 private:
4313  VmaVector< char, VmaStlAllocator<char> > m_Data;
4314 };
4315 
4316 void VmaStringBuilder::Add(const char* pStr)
4317 {
4318  const size_t strLen = strlen(pStr);
4319  if(strLen > 0)
4320  {
4321  const size_t oldCount = m_Data.size();
4322  m_Data.resize(oldCount + strLen);
4323  memcpy(m_Data.data() + oldCount, pStr, strLen);
4324  }
4325 }
4326 
4327 void VmaStringBuilder::AddNumber(uint32_t num)
4328 {
4329  char buf[11];
4330  VmaUint32ToStr(buf, sizeof(buf), num);
4331  Add(buf);
4332 }
4333 
4334 void VmaStringBuilder::AddNumber(uint64_t num)
4335 {
4336  char buf[21];
4337  VmaUint64ToStr(buf, sizeof(buf), num);
4338  Add(buf);
4339 }
4340 
4341 void VmaStringBuilder::AddPointer(const void* ptr)
4342 {
4343  char buf[21];
4344  VmaPtrToStr(buf, sizeof(buf), ptr);
4345  Add(buf);
4346 }
4347 
4348 #endif // #if VMA_STATS_STRING_ENABLED
4349 
4351 // VmaJsonWriter
4352 
4353 #if VMA_STATS_STRING_ENABLED
4354 
4355 class VmaJsonWriter
4356 {
4357 public:
4358  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4359  ~VmaJsonWriter();
4360 
4361  void BeginObject(bool singleLine = false);
4362  void EndObject();
4363 
4364  void BeginArray(bool singleLine = false);
4365  void EndArray();
4366 
4367  void WriteString(const char* pStr);
4368  void BeginString(const char* pStr = VMA_NULL);
4369  void ContinueString(const char* pStr);
4370  void ContinueString(uint32_t n);
4371  void ContinueString(uint64_t n);
4372  void ContinueString_Pointer(const void* ptr);
4373  void EndString(const char* pStr = VMA_NULL);
4374 
4375  void WriteNumber(uint32_t n);
4376  void WriteNumber(uint64_t n);
4377  void WriteBool(bool b);
4378  void WriteNull();
4379 
4380 private:
4381  static const char* const INDENT;
4382 
4383  enum COLLECTION_TYPE
4384  {
4385  COLLECTION_TYPE_OBJECT,
4386  COLLECTION_TYPE_ARRAY,
4387  };
4388  struct StackItem
4389  {
4390  COLLECTION_TYPE type;
4391  uint32_t valueCount;
4392  bool singleLineMode;
4393  };
4394 
4395  VmaStringBuilder& m_SB;
4396  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4397  bool m_InsideString;
4398 
4399  void BeginValue(bool isString);
4400  void WriteIndent(bool oneLess = false);
4401 };
4402 
4403 const char* const VmaJsonWriter::INDENT = " ";
4404 
4405 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4406  m_SB(sb),
4407  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4408  m_InsideString(false)
4409 {
4410 }
4411 
4412 VmaJsonWriter::~VmaJsonWriter()
4413 {
4414  VMA_ASSERT(!m_InsideString);
4415  VMA_ASSERT(m_Stack.empty());
4416 }
4417 
4418 void VmaJsonWriter::BeginObject(bool singleLine)
4419 {
4420  VMA_ASSERT(!m_InsideString);
4421 
4422  BeginValue(false);
4423  m_SB.Add('{');
4424 
4425  StackItem item;
4426  item.type = COLLECTION_TYPE_OBJECT;
4427  item.valueCount = 0;
4428  item.singleLineMode = singleLine;
4429  m_Stack.push_back(item);
4430 }
4431 
4432 void VmaJsonWriter::EndObject()
4433 {
4434  VMA_ASSERT(!m_InsideString);
4435 
4436  WriteIndent(true);
4437  m_SB.Add('}');
4438 
4439  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4440  m_Stack.pop_back();
4441 }
4442 
4443 void VmaJsonWriter::BeginArray(bool singleLine)
4444 {
4445  VMA_ASSERT(!m_InsideString);
4446 
4447  BeginValue(false);
4448  m_SB.Add('[');
4449 
4450  StackItem item;
4451  item.type = COLLECTION_TYPE_ARRAY;
4452  item.valueCount = 0;
4453  item.singleLineMode = singleLine;
4454  m_Stack.push_back(item);
4455 }
4456 
4457 void VmaJsonWriter::EndArray()
4458 {
4459  VMA_ASSERT(!m_InsideString);
4460 
4461  WriteIndent(true);
4462  m_SB.Add(']');
4463 
4464  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4465  m_Stack.pop_back();
4466 }
4467 
4468 void VmaJsonWriter::WriteString(const char* pStr)
4469 {
4470  BeginString(pStr);
4471  EndString();
4472 }
4473 
4474 void VmaJsonWriter::BeginString(const char* pStr)
4475 {
4476  VMA_ASSERT(!m_InsideString);
4477 
4478  BeginValue(true);
4479  m_SB.Add('"');
4480  m_InsideString = true;
4481  if(pStr != VMA_NULL && pStr[0] != '\0')
4482  {
4483  ContinueString(pStr);
4484  }
4485 }
4486 
4487 void VmaJsonWriter::ContinueString(const char* pStr)
4488 {
4489  VMA_ASSERT(m_InsideString);
4490 
4491  const size_t strLen = strlen(pStr);
4492  for(size_t i = 0; i < strLen; ++i)
4493  {
4494  char ch = pStr[i];
4495  if(ch == '\'')
4496  {
4497  m_SB.Add("\\\\");
4498  }
4499  else if(ch == '"')
4500  {
4501  m_SB.Add("\\\"");
4502  }
4503  else if(ch >= 32)
4504  {
4505  m_SB.Add(ch);
4506  }
4507  else switch(ch)
4508  {
4509  case '\b':
4510  m_SB.Add("\\b");
4511  break;
4512  case '\f':
4513  m_SB.Add("\\f");
4514  break;
4515  case '\n':
4516  m_SB.Add("\\n");
4517  break;
4518  case '\r':
4519  m_SB.Add("\\r");
4520  break;
4521  case '\t':
4522  m_SB.Add("\\t");
4523  break;
4524  default:
4525  VMA_ASSERT(0 && "Character not currently supported.");
4526  break;
4527  }
4528  }
4529 }
4530 
4531 void VmaJsonWriter::ContinueString(uint32_t n)
4532 {
4533  VMA_ASSERT(m_InsideString);
4534  m_SB.AddNumber(n);
4535 }
4536 
4537 void VmaJsonWriter::ContinueString(uint64_t n)
4538 {
4539  VMA_ASSERT(m_InsideString);
4540  m_SB.AddNumber(n);
4541 }
4542 
4543 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4544 {
4545  VMA_ASSERT(m_InsideString);
4546  m_SB.AddPointer(ptr);
4547 }
4548 
4549 void VmaJsonWriter::EndString(const char* pStr)
4550 {
4551  VMA_ASSERT(m_InsideString);
4552  if(pStr != VMA_NULL && pStr[0] != '\0')
4553  {
4554  ContinueString(pStr);
4555  }
4556  m_SB.Add('"');
4557  m_InsideString = false;
4558 }
4559 
4560 void VmaJsonWriter::WriteNumber(uint32_t n)
4561 {
4562  VMA_ASSERT(!m_InsideString);
4563  BeginValue(false);
4564  m_SB.AddNumber(n);
4565 }
4566 
4567 void VmaJsonWriter::WriteNumber(uint64_t n)
4568 {
4569  VMA_ASSERT(!m_InsideString);
4570  BeginValue(false);
4571  m_SB.AddNumber(n);
4572 }
4573 
4574 void VmaJsonWriter::WriteBool(bool b)
4575 {
4576  VMA_ASSERT(!m_InsideString);
4577  BeginValue(false);
4578  m_SB.Add(b ? "true" : "false");
4579 }
4580 
4581 void VmaJsonWriter::WriteNull()
4582 {
4583  VMA_ASSERT(!m_InsideString);
4584  BeginValue(false);
4585  m_SB.Add("null");
4586 }
4587 
4588 void VmaJsonWriter::BeginValue(bool isString)
4589 {
4590  if(!m_Stack.empty())
4591  {
4592  StackItem& currItem = m_Stack.back();
4593  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4594  currItem.valueCount % 2 == 0)
4595  {
4596  VMA_ASSERT(isString);
4597  }
4598 
4599  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4600  currItem.valueCount % 2 != 0)
4601  {
4602  m_SB.Add(": ");
4603  }
4604  else if(currItem.valueCount > 0)
4605  {
4606  m_SB.Add(", ");
4607  WriteIndent();
4608  }
4609  else
4610  {
4611  WriteIndent();
4612  }
4613  ++currItem.valueCount;
4614  }
4615 }
4616 
4617 void VmaJsonWriter::WriteIndent(bool oneLess)
4618 {
4619  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4620  {
4621  m_SB.AddNewLine();
4622 
4623  size_t count = m_Stack.size();
4624  if(count > 0 && oneLess)
4625  {
4626  --count;
4627  }
4628  for(size_t i = 0; i < count; ++i)
4629  {
4630  m_SB.Add(INDENT);
4631  }
4632  }
4633 }
4634 
4635 #endif // #if VMA_STATS_STRING_ENABLED
4636 
4638 
4639 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4640 {
4641  if(IsUserDataString())
4642  {
4643  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4644 
4645  FreeUserDataString(hAllocator);
4646 
4647  if(pUserData != VMA_NULL)
4648  {
4649  const char* const newStrSrc = (char*)pUserData;
4650  const size_t newStrLen = strlen(newStrSrc);
4651  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4652  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4653  m_pUserData = newStrDst;
4654  }
4655  }
4656  else
4657  {
4658  m_pUserData = pUserData;
4659  }
4660 }
4661 
4662 void VmaAllocation_T::ChangeBlockAllocation(
4663  VmaAllocator hAllocator,
4664  VmaDeviceMemoryBlock* block,
4665  VkDeviceSize offset)
4666 {
4667  VMA_ASSERT(block != VMA_NULL);
4668  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4669 
4670  // Move mapping reference counter from old block to new block.
4671  if(block != m_BlockAllocation.m_Block)
4672  {
4673  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4674  if(IsPersistentMap())
4675  ++mapRefCount;
4676  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4677  block->Map(hAllocator, mapRefCount, VMA_NULL);
4678  }
4679 
4680  m_BlockAllocation.m_Block = block;
4681  m_BlockAllocation.m_Offset = offset;
4682 }
4683 
4684 VkDeviceSize VmaAllocation_T::GetOffset() const
4685 {
4686  switch(m_Type)
4687  {
4688  case ALLOCATION_TYPE_BLOCK:
4689  return m_BlockAllocation.m_Offset;
4690  case ALLOCATION_TYPE_DEDICATED:
4691  return 0;
4692  default:
4693  VMA_ASSERT(0);
4694  return 0;
4695  }
4696 }
4697 
4698 VkDeviceMemory VmaAllocation_T::GetMemory() const
4699 {
4700  switch(m_Type)
4701  {
4702  case ALLOCATION_TYPE_BLOCK:
4703  return m_BlockAllocation.m_Block->m_hMemory;
4704  case ALLOCATION_TYPE_DEDICATED:
4705  return m_DedicatedAllocation.m_hMemory;
4706  default:
4707  VMA_ASSERT(0);
4708  return VK_NULL_HANDLE;
4709  }
4710 }
4711 
4712 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4713 {
4714  switch(m_Type)
4715  {
4716  case ALLOCATION_TYPE_BLOCK:
4717  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4718  case ALLOCATION_TYPE_DEDICATED:
4719  return m_DedicatedAllocation.m_MemoryTypeIndex;
4720  default:
4721  VMA_ASSERT(0);
4722  return UINT32_MAX;
4723  }
4724 }
4725 
4726 void* VmaAllocation_T::GetMappedData() const
4727 {
4728  switch(m_Type)
4729  {
4730  case ALLOCATION_TYPE_BLOCK:
4731  if(m_MapCount != 0)
4732  {
4733  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4734  VMA_ASSERT(pBlockData != VMA_NULL);
4735  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4736  }
4737  else
4738  {
4739  return VMA_NULL;
4740  }
4741  break;
4742  case ALLOCATION_TYPE_DEDICATED:
4743  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4744  return m_DedicatedAllocation.m_pMappedData;
4745  default:
4746  VMA_ASSERT(0);
4747  return VMA_NULL;
4748  }
4749 }
4750 
4751 bool VmaAllocation_T::CanBecomeLost() const
4752 {
4753  switch(m_Type)
4754  {
4755  case ALLOCATION_TYPE_BLOCK:
4756  return m_BlockAllocation.m_CanBecomeLost;
4757  case ALLOCATION_TYPE_DEDICATED:
4758  return false;
4759  default:
4760  VMA_ASSERT(0);
4761  return false;
4762  }
4763 }
4764 
4765 VmaPool VmaAllocation_T::GetPool() const
4766 {
4767  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4768  return m_BlockAllocation.m_hPool;
4769 }
4770 
4771 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4772 {
4773  VMA_ASSERT(CanBecomeLost());
4774 
4775  /*
4776  Warning: This is a carefully designed algorithm.
4777  Do not modify unless you really know what you're doing :)
4778  */
4779  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4780  for(;;)
4781  {
4782  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4783  {
4784  VMA_ASSERT(0);
4785  return false;
4786  }
4787  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4788  {
4789  return false;
4790  }
4791  else // Last use time earlier than current time.
4792  {
4793  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4794  {
4795  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4796  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4797  return true;
4798  }
4799  }
4800  }
4801 }
4802 
4803 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4804 {
4805  VMA_ASSERT(IsUserDataString());
4806  if(m_pUserData != VMA_NULL)
4807  {
4808  char* const oldStr = (char*)m_pUserData;
4809  const size_t oldStrLen = strlen(oldStr);
4810  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4811  m_pUserData = VMA_NULL;
4812  }
4813 }
4814 
4815 void VmaAllocation_T::BlockAllocMap()
4816 {
4817  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4818 
4819  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4820  {
4821  ++m_MapCount;
4822  }
4823  else
4824  {
4825  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4826  }
4827 }
4828 
4829 void VmaAllocation_T::BlockAllocUnmap()
4830 {
4831  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4832 
4833  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4834  {
4835  --m_MapCount;
4836  }
4837  else
4838  {
4839  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4840  }
4841 }
4842 
4843 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4844 {
4845  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4846 
4847  if(m_MapCount != 0)
4848  {
4849  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4850  {
4851  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4852  *ppData = m_DedicatedAllocation.m_pMappedData;
4853  ++m_MapCount;
4854  return VK_SUCCESS;
4855  }
4856  else
4857  {
4858  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4859  return VK_ERROR_MEMORY_MAP_FAILED;
4860  }
4861  }
4862  else
4863  {
4864  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4865  hAllocator->m_hDevice,
4866  m_DedicatedAllocation.m_hMemory,
4867  0, // offset
4868  VK_WHOLE_SIZE,
4869  0, // flags
4870  ppData);
4871  if(result == VK_SUCCESS)
4872  {
4873  m_DedicatedAllocation.m_pMappedData = *ppData;
4874  m_MapCount = 1;
4875  }
4876  return result;
4877  }
4878 }
4879 
4880 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4881 {
4882  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4883 
4884  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4885  {
4886  --m_MapCount;
4887  if(m_MapCount == 0)
4888  {
4889  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4890  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4891  hAllocator->m_hDevice,
4892  m_DedicatedAllocation.m_hMemory);
4893  }
4894  }
4895  else
4896  {
4897  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4898  }
4899 }
4900 
4901 #if VMA_STATS_STRING_ENABLED
4902 
4903 // Correspond to values of enum VmaSuballocationType.
4904 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4905  "FREE",
4906  "UNKNOWN",
4907  "BUFFER",
4908  "IMAGE_UNKNOWN",
4909  "IMAGE_LINEAR",
4910  "IMAGE_OPTIMAL",
4911 };
4912 
4913 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4914 {
4915  json.BeginObject();
4916 
4917  json.WriteString("Blocks");
4918  json.WriteNumber(stat.blockCount);
4919 
4920  json.WriteString("Allocations");
4921  json.WriteNumber(stat.allocationCount);
4922 
4923  json.WriteString("UnusedRanges");
4924  json.WriteNumber(stat.unusedRangeCount);
4925 
4926  json.WriteString("UsedBytes");
4927  json.WriteNumber(stat.usedBytes);
4928 
4929  json.WriteString("UnusedBytes");
4930  json.WriteNumber(stat.unusedBytes);
4931 
4932  if(stat.allocationCount > 1)
4933  {
4934  json.WriteString("AllocationSize");
4935  json.BeginObject(true);
4936  json.WriteString("Min");
4937  json.WriteNumber(stat.allocationSizeMin);
4938  json.WriteString("Avg");
4939  json.WriteNumber(stat.allocationSizeAvg);
4940  json.WriteString("Max");
4941  json.WriteNumber(stat.allocationSizeMax);
4942  json.EndObject();
4943  }
4944 
4945  if(stat.unusedRangeCount > 1)
4946  {
4947  json.WriteString("UnusedRangeSize");
4948  json.BeginObject(true);
4949  json.WriteString("Min");
4950  json.WriteNumber(stat.unusedRangeSizeMin);
4951  json.WriteString("Avg");
4952  json.WriteNumber(stat.unusedRangeSizeAvg);
4953  json.WriteString("Max");
4954  json.WriteNumber(stat.unusedRangeSizeMax);
4955  json.EndObject();
4956  }
4957 
4958  json.EndObject();
4959 }
4960 
4961 #endif // #if VMA_STATS_STRING_ENABLED
4962 
4963 struct VmaSuballocationItemSizeLess
4964 {
4965  bool operator()(
4966  const VmaSuballocationList::iterator lhs,
4967  const VmaSuballocationList::iterator rhs) const
4968  {
4969  return lhs->size < rhs->size;
4970  }
4971  bool operator()(
4972  const VmaSuballocationList::iterator lhs,
4973  VkDeviceSize rhsSize) const
4974  {
4975  return lhs->size < rhsSize;
4976  }
4977 };
4978 
4980 // class VmaBlockMetadata
4981 
4982 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4983  m_Size(0),
4984  m_FreeCount(0),
4985  m_SumFreeSize(0),
4986  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4987  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4988 {
4989 }
4990 
4991 VmaBlockMetadata::~VmaBlockMetadata()
4992 {
4993 }
4994 
4995 void VmaBlockMetadata::Init(VkDeviceSize size)
4996 {
4997  m_Size = size;
4998  m_FreeCount = 1;
4999  m_SumFreeSize = size;
5000 
5001  VmaSuballocation suballoc = {};
5002  suballoc.offset = 0;
5003  suballoc.size = size;
5004  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5005  suballoc.hAllocation = VK_NULL_HANDLE;
5006 
5007  m_Suballocations.push_back(suballoc);
5008  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5009  --suballocItem;
5010  m_FreeSuballocationsBySize.push_back(suballocItem);
5011 }
5012 
5013 bool VmaBlockMetadata::Validate() const
5014 {
5015  if(m_Suballocations.empty())
5016  {
5017  return false;
5018  }
5019 
5020  // Expected offset of new suballocation as calculates from previous ones.
5021  VkDeviceSize calculatedOffset = 0;
5022  // Expected number of free suballocations as calculated from traversing their list.
5023  uint32_t calculatedFreeCount = 0;
5024  // Expected sum size of free suballocations as calculated from traversing their list.
5025  VkDeviceSize calculatedSumFreeSize = 0;
5026  // Expected number of free suballocations that should be registered in
5027  // m_FreeSuballocationsBySize calculated from traversing their list.
5028  size_t freeSuballocationsToRegister = 0;
5029  // True if previous visisted suballocation was free.
5030  bool prevFree = false;
5031 
5032  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5033  suballocItem != m_Suballocations.cend();
5034  ++suballocItem)
5035  {
5036  const VmaSuballocation& subAlloc = *suballocItem;
5037 
5038  // Actual offset of this suballocation doesn't match expected one.
5039  if(subAlloc.offset != calculatedOffset)
5040  {
5041  return false;
5042  }
5043 
5044  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5045  // Two adjacent free suballocations are invalid. They should be merged.
5046  if(prevFree && currFree)
5047  {
5048  return false;
5049  }
5050 
5051  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5052  {
5053  return false;
5054  }
5055 
5056  if(currFree)
5057  {
5058  calculatedSumFreeSize += subAlloc.size;
5059  ++calculatedFreeCount;
5060  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5061  {
5062  ++freeSuballocationsToRegister;
5063  }
5064  }
5065  else
5066  {
5067  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5068  {
5069  return false;
5070  }
5071  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5072  {
5073  return false;
5074  }
5075  }
5076 
5077  calculatedOffset += subAlloc.size;
5078  prevFree = currFree;
5079  }
5080 
5081  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5082  // match expected one.
5083  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5084  {
5085  return false;
5086  }
5087 
5088  VkDeviceSize lastSize = 0;
5089  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5090  {
5091  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5092 
5093  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5094  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5095  {
5096  return false;
5097  }
5098  // They must be sorted by size ascending.
5099  if(suballocItem->size < lastSize)
5100  {
5101  return false;
5102  }
5103 
5104  lastSize = suballocItem->size;
5105  }
5106 
5107  // Check if totals match calculacted values.
5108  if(!ValidateFreeSuballocationList() ||
5109  (calculatedOffset != m_Size) ||
5110  (calculatedSumFreeSize != m_SumFreeSize) ||
5111  (calculatedFreeCount != m_FreeCount))
5112  {
5113  return false;
5114  }
5115 
5116  return true;
5117 }
5118 
5119 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5120 {
5121  if(!m_FreeSuballocationsBySize.empty())
5122  {
5123  return m_FreeSuballocationsBySize.back()->size;
5124  }
5125  else
5126  {
5127  return 0;
5128  }
5129 }
5130 
5131 bool VmaBlockMetadata::IsEmpty() const
5132 {
5133  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5134 }
5135 
5136 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5137 {
5138  outInfo.blockCount = 1;
5139 
5140  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5141  outInfo.allocationCount = rangeCount - m_FreeCount;
5142  outInfo.unusedRangeCount = m_FreeCount;
5143 
5144  outInfo.unusedBytes = m_SumFreeSize;
5145  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5146 
5147  outInfo.allocationSizeMin = UINT64_MAX;
5148  outInfo.allocationSizeMax = 0;
5149  outInfo.unusedRangeSizeMin = UINT64_MAX;
5150  outInfo.unusedRangeSizeMax = 0;
5151 
5152  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5153  suballocItem != m_Suballocations.cend();
5154  ++suballocItem)
5155  {
5156  const VmaSuballocation& suballoc = *suballocItem;
5157  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5158  {
5159  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5160  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5161  }
5162  else
5163  {
5164  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5165  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5166  }
5167  }
5168 }
5169 
5170 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5171 {
5172  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5173 
5174  inoutStats.size += m_Size;
5175  inoutStats.unusedSize += m_SumFreeSize;
5176  inoutStats.allocationCount += rangeCount - m_FreeCount;
5177  inoutStats.unusedRangeCount += m_FreeCount;
5178  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5179 }
5180 
5181 #if VMA_STATS_STRING_ENABLED
5182 
5183 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5184 {
5185  json.BeginObject();
5186 
5187  json.WriteString("TotalBytes");
5188  json.WriteNumber(m_Size);
5189 
5190  json.WriteString("UnusedBytes");
5191  json.WriteNumber(m_SumFreeSize);
5192 
5193  json.WriteString("Allocations");
5194  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5195 
5196  json.WriteString("UnusedRanges");
5197  json.WriteNumber(m_FreeCount);
5198 
5199  json.WriteString("Suballocations");
5200  json.BeginArray();
5201  size_t i = 0;
5202  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5203  suballocItem != m_Suballocations.cend();
5204  ++suballocItem, ++i)
5205  {
5206  json.BeginObject(true);
5207 
5208  json.WriteString("Type");
5209  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5210 
5211  json.WriteString("Size");
5212  json.WriteNumber(suballocItem->size);
5213 
5214  json.WriteString("Offset");
5215  json.WriteNumber(suballocItem->offset);
5216 
5217  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5218  {
5219  const void* pUserData = suballocItem->hAllocation->GetUserData();
5220  if(pUserData != VMA_NULL)
5221  {
5222  json.WriteString("UserData");
5223  if(suballocItem->hAllocation->IsUserDataString())
5224  {
5225  json.WriteString((const char*)pUserData);
5226  }
5227  else
5228  {
5229  json.BeginString();
5230  json.ContinueString_Pointer(pUserData);
5231  json.EndString();
5232  }
5233  }
5234  }
5235 
5236  json.EndObject();
5237  }
5238  json.EndArray();
5239 
5240  json.EndObject();
5241 }
5242 
5243 #endif // #if VMA_STATS_STRING_ENABLED
5244 
5245 /*
5246 How many suitable free suballocations to analyze before choosing best one.
5247 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5248  be chosen.
5249 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5250  suballocations will be analized and best one will be chosen.
5251 - Any other value is also acceptable.
5252 */
5253 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5254 
5255 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5256 {
5257  VMA_ASSERT(IsEmpty());
5258  pAllocationRequest->offset = 0;
5259  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5260  pAllocationRequest->sumItemSize = 0;
5261  pAllocationRequest->item = m_Suballocations.begin();
5262  pAllocationRequest->itemsToMakeLostCount = 0;
5263 }
5264 
5265 bool VmaBlockMetadata::CreateAllocationRequest(
5266  uint32_t currentFrameIndex,
5267  uint32_t frameInUseCount,
5268  VkDeviceSize bufferImageGranularity,
5269  VkDeviceSize allocSize,
5270  VkDeviceSize allocAlignment,
5271  VmaSuballocationType allocType,
5272  bool canMakeOtherLost,
5273  VmaAllocationRequest* pAllocationRequest)
5274 {
5275  VMA_ASSERT(allocSize > 0);
5276  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5277  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5278  VMA_HEAVY_ASSERT(Validate());
5279 
5280  // There is not enough total free space in this block to fullfill the request: Early return.
5281  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5282  {
5283  return false;
5284  }
5285 
5286  // New algorithm, efficiently searching freeSuballocationsBySize.
5287  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5288  if(freeSuballocCount > 0)
5289  {
5290  if(VMA_BEST_FIT)
5291  {
5292  // Find first free suballocation with size not less than allocSize.
5293  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5294  m_FreeSuballocationsBySize.data(),
5295  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5296  allocSize,
5297  VmaSuballocationItemSizeLess());
5298  size_t index = it - m_FreeSuballocationsBySize.data();
5299  for(; index < freeSuballocCount; ++index)
5300  {
5301  if(CheckAllocation(
5302  currentFrameIndex,
5303  frameInUseCount,
5304  bufferImageGranularity,
5305  allocSize,
5306  allocAlignment,
5307  allocType,
5308  m_FreeSuballocationsBySize[index],
5309  false, // canMakeOtherLost
5310  &pAllocationRequest->offset,
5311  &pAllocationRequest->itemsToMakeLostCount,
5312  &pAllocationRequest->sumFreeSize,
5313  &pAllocationRequest->sumItemSize))
5314  {
5315  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5316  return true;
5317  }
5318  }
5319  }
5320  else
5321  {
5322  // Search staring from biggest suballocations.
5323  for(size_t index = freeSuballocCount; index--; )
5324  {
5325  if(CheckAllocation(
5326  currentFrameIndex,
5327  frameInUseCount,
5328  bufferImageGranularity,
5329  allocSize,
5330  allocAlignment,
5331  allocType,
5332  m_FreeSuballocationsBySize[index],
5333  false, // canMakeOtherLost
5334  &pAllocationRequest->offset,
5335  &pAllocationRequest->itemsToMakeLostCount,
5336  &pAllocationRequest->sumFreeSize,
5337  &pAllocationRequest->sumItemSize))
5338  {
5339  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5340  return true;
5341  }
5342  }
5343  }
5344  }
5345 
5346  if(canMakeOtherLost)
5347  {
5348  // Brute-force algorithm. TODO: Come up with something better.
5349 
5350  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5351  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5352 
5353  VmaAllocationRequest tmpAllocRequest = {};
5354  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5355  suballocIt != m_Suballocations.end();
5356  ++suballocIt)
5357  {
5358  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5359  suballocIt->hAllocation->CanBecomeLost())
5360  {
5361  if(CheckAllocation(
5362  currentFrameIndex,
5363  frameInUseCount,
5364  bufferImageGranularity,
5365  allocSize,
5366  allocAlignment,
5367  allocType,
5368  suballocIt,
5369  canMakeOtherLost,
5370  &tmpAllocRequest.offset,
5371  &tmpAllocRequest.itemsToMakeLostCount,
5372  &tmpAllocRequest.sumFreeSize,
5373  &tmpAllocRequest.sumItemSize))
5374  {
5375  tmpAllocRequest.item = suballocIt;
5376 
5377  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5378  {
5379  *pAllocationRequest = tmpAllocRequest;
5380  }
5381  }
5382  }
5383  }
5384 
5385  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5386  {
5387  return true;
5388  }
5389  }
5390 
5391  return false;
5392 }
5393 
5394 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5395  uint32_t currentFrameIndex,
5396  uint32_t frameInUseCount,
5397  VmaAllocationRequest* pAllocationRequest)
5398 {
5399  while(pAllocationRequest->itemsToMakeLostCount > 0)
5400  {
5401  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5402  {
5403  ++pAllocationRequest->item;
5404  }
5405  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5406  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5407  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5408  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5409  {
5410  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5411  --pAllocationRequest->itemsToMakeLostCount;
5412  }
5413  else
5414  {
5415  return false;
5416  }
5417  }
5418 
5419  VMA_HEAVY_ASSERT(Validate());
5420  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5421  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5422 
5423  return true;
5424 }
5425 
5426 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5427 {
5428  uint32_t lostAllocationCount = 0;
5429  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5430  it != m_Suballocations.end();
5431  ++it)
5432  {
5433  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5434  it->hAllocation->CanBecomeLost() &&
5435  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5436  {
5437  it = FreeSuballocation(it);
5438  ++lostAllocationCount;
5439  }
5440  }
5441  return lostAllocationCount;
5442 }
5443 
5444 void VmaBlockMetadata::Alloc(
5445  const VmaAllocationRequest& request,
5446  VmaSuballocationType type,
5447  VkDeviceSize allocSize,
5448  VmaAllocation hAllocation)
5449 {
5450  VMA_ASSERT(request.item != m_Suballocations.end());
5451  VmaSuballocation& suballoc = *request.item;
5452  // Given suballocation is a free block.
5453  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5454  // Given offset is inside this suballocation.
5455  VMA_ASSERT(request.offset >= suballoc.offset);
5456  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5457  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5458  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5459 
5460  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5461  // it to become used.
5462  UnregisterFreeSuballocation(request.item);
5463 
5464  suballoc.offset = request.offset;
5465  suballoc.size = allocSize;
5466  suballoc.type = type;
5467  suballoc.hAllocation = hAllocation;
5468 
5469  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5470  if(paddingEnd)
5471  {
5472  VmaSuballocation paddingSuballoc = {};
5473  paddingSuballoc.offset = request.offset + allocSize;
5474  paddingSuballoc.size = paddingEnd;
5475  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5476  VmaSuballocationList::iterator next = request.item;
5477  ++next;
5478  const VmaSuballocationList::iterator paddingEndItem =
5479  m_Suballocations.insert(next, paddingSuballoc);
5480  RegisterFreeSuballocation(paddingEndItem);
5481  }
5482 
5483  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5484  if(paddingBegin)
5485  {
5486  VmaSuballocation paddingSuballoc = {};
5487  paddingSuballoc.offset = request.offset - paddingBegin;
5488  paddingSuballoc.size = paddingBegin;
5489  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5490  const VmaSuballocationList::iterator paddingBeginItem =
5491  m_Suballocations.insert(request.item, paddingSuballoc);
5492  RegisterFreeSuballocation(paddingBeginItem);
5493  }
5494 
5495  // Update totals.
5496  m_FreeCount = m_FreeCount - 1;
5497  if(paddingBegin > 0)
5498  {
5499  ++m_FreeCount;
5500  }
5501  if(paddingEnd > 0)
5502  {
5503  ++m_FreeCount;
5504  }
5505  m_SumFreeSize -= allocSize;
5506 }
5507 
5508 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5509 {
5510  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5511  suballocItem != m_Suballocations.end();
5512  ++suballocItem)
5513  {
5514  VmaSuballocation& suballoc = *suballocItem;
5515  if(suballoc.hAllocation == allocation)
5516  {
5517  FreeSuballocation(suballocItem);
5518  VMA_HEAVY_ASSERT(Validate());
5519  return;
5520  }
5521  }
5522  VMA_ASSERT(0 && "Not found!");
5523 }
5524 
5525 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5526 {
5527  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5528  suballocItem != m_Suballocations.end();
5529  ++suballocItem)
5530  {
5531  VmaSuballocation& suballoc = *suballocItem;
5532  if(suballoc.offset == offset)
5533  {
5534  FreeSuballocation(suballocItem);
5535  return;
5536  }
5537  }
5538  VMA_ASSERT(0 && "Not found!");
5539 }
5540 
5541 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5542 {
5543  VkDeviceSize lastSize = 0;
5544  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5545  {
5546  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5547 
5548  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5549  {
5550  VMA_ASSERT(0);
5551  return false;
5552  }
5553  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5554  {
5555  VMA_ASSERT(0);
5556  return false;
5557  }
5558  if(it->size < lastSize)
5559  {
5560  VMA_ASSERT(0);
5561  return false;
5562  }
5563 
5564  lastSize = it->size;
5565  }
5566  return true;
5567 }
5568 
5569 bool VmaBlockMetadata::CheckAllocation(
5570  uint32_t currentFrameIndex,
5571  uint32_t frameInUseCount,
5572  VkDeviceSize bufferImageGranularity,
5573  VkDeviceSize allocSize,
5574  VkDeviceSize allocAlignment,
5575  VmaSuballocationType allocType,
5576  VmaSuballocationList::const_iterator suballocItem,
5577  bool canMakeOtherLost,
5578  VkDeviceSize* pOffset,
5579  size_t* itemsToMakeLostCount,
5580  VkDeviceSize* pSumFreeSize,
5581  VkDeviceSize* pSumItemSize) const
5582 {
5583  VMA_ASSERT(allocSize > 0);
5584  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5585  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5586  VMA_ASSERT(pOffset != VMA_NULL);
5587 
5588  *itemsToMakeLostCount = 0;
5589  *pSumFreeSize = 0;
5590  *pSumItemSize = 0;
5591 
5592  if(canMakeOtherLost)
5593  {
5594  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5595  {
5596  *pSumFreeSize = suballocItem->size;
5597  }
5598  else
5599  {
5600  if(suballocItem->hAllocation->CanBecomeLost() &&
5601  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5602  {
5603  ++*itemsToMakeLostCount;
5604  *pSumItemSize = suballocItem->size;
5605  }
5606  else
5607  {
5608  return false;
5609  }
5610  }
5611 
5612  // Remaining size is too small for this request: Early return.
5613  if(m_Size - suballocItem->offset < allocSize)
5614  {
5615  return false;
5616  }
5617 
5618  // Start from offset equal to beginning of this suballocation.
5619  *pOffset = suballocItem->offset;
5620 
5621  // Apply VMA_DEBUG_MARGIN at the beginning.
5622  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5623  {
5624  *pOffset += VMA_DEBUG_MARGIN;
5625  }
5626 
5627  // Apply alignment.
5628  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5629  *pOffset = VmaAlignUp(*pOffset, alignment);
5630 
5631  // Check previous suballocations for BufferImageGranularity conflicts.
5632  // Make bigger alignment if necessary.
5633  if(bufferImageGranularity > 1)
5634  {
5635  bool bufferImageGranularityConflict = false;
5636  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5637  while(prevSuballocItem != m_Suballocations.cbegin())
5638  {
5639  --prevSuballocItem;
5640  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5641  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5642  {
5643  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5644  {
5645  bufferImageGranularityConflict = true;
5646  break;
5647  }
5648  }
5649  else
5650  // Already on previous page.
5651  break;
5652  }
5653  if(bufferImageGranularityConflict)
5654  {
5655  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5656  }
5657  }
5658 
5659  // Now that we have final *pOffset, check if we are past suballocItem.
5660  // If yes, return false - this function should be called for another suballocItem as starting point.
5661  if(*pOffset >= suballocItem->offset + suballocItem->size)
5662  {
5663  return false;
5664  }
5665 
5666  // Calculate padding at the beginning based on current offset.
5667  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5668 
5669  // Calculate required margin at the end if this is not last suballocation.
5670  VmaSuballocationList::const_iterator next = suballocItem;
5671  ++next;
5672  const VkDeviceSize requiredEndMargin =
5673  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5674 
5675  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5676  // Another early return check.
5677  if(suballocItem->offset + totalSize > m_Size)
5678  {
5679  return false;
5680  }
5681 
5682  // Advance lastSuballocItem until desired size is reached.
5683  // Update itemsToMakeLostCount.
5684  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5685  if(totalSize > suballocItem->size)
5686  {
5687  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5688  while(remainingSize > 0)
5689  {
5690  ++lastSuballocItem;
5691  if(lastSuballocItem == m_Suballocations.cend())
5692  {
5693  return false;
5694  }
5695  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5696  {
5697  *pSumFreeSize += lastSuballocItem->size;
5698  }
5699  else
5700  {
5701  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5702  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5703  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5704  {
5705  ++*itemsToMakeLostCount;
5706  *pSumItemSize += lastSuballocItem->size;
5707  }
5708  else
5709  {
5710  return false;
5711  }
5712  }
5713  remainingSize = (lastSuballocItem->size < remainingSize) ?
5714  remainingSize - lastSuballocItem->size : 0;
5715  }
5716  }
5717 
5718  // Check next suballocations for BufferImageGranularity conflicts.
5719  // If conflict exists, we must mark more allocations lost or fail.
5720  if(bufferImageGranularity > 1)
5721  {
5722  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5723  ++nextSuballocItem;
5724  while(nextSuballocItem != m_Suballocations.cend())
5725  {
5726  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5727  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5728  {
5729  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5730  {
5731  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5732  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5733  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5734  {
5735  ++*itemsToMakeLostCount;
5736  }
5737  else
5738  {
5739  return false;
5740  }
5741  }
5742  }
5743  else
5744  {
5745  // Already on next page.
5746  break;
5747  }
5748  ++nextSuballocItem;
5749  }
5750  }
5751  }
5752  else
5753  {
5754  const VmaSuballocation& suballoc = *suballocItem;
5755  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5756 
5757  *pSumFreeSize = suballoc.size;
5758 
5759  // Size of this suballocation is too small for this request: Early return.
5760  if(suballoc.size < allocSize)
5761  {
5762  return false;
5763  }
5764 
5765  // Start from offset equal to beginning of this suballocation.
5766  *pOffset = suballoc.offset;
5767 
5768  // Apply VMA_DEBUG_MARGIN at the beginning.
5769  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5770  {
5771  *pOffset += VMA_DEBUG_MARGIN;
5772  }
5773 
5774  // Apply alignment.
5775  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5776  *pOffset = VmaAlignUp(*pOffset, alignment);
5777 
5778  // Check previous suballocations for BufferImageGranularity conflicts.
5779  // Make bigger alignment if necessary.
5780  if(bufferImageGranularity > 1)
5781  {
5782  bool bufferImageGranularityConflict = false;
5783  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5784  while(prevSuballocItem != m_Suballocations.cbegin())
5785  {
5786  --prevSuballocItem;
5787  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5788  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5789  {
5790  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5791  {
5792  bufferImageGranularityConflict = true;
5793  break;
5794  }
5795  }
5796  else
5797  // Already on previous page.
5798  break;
5799  }
5800  if(bufferImageGranularityConflict)
5801  {
5802  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5803  }
5804  }
5805 
5806  // Calculate padding at the beginning based on current offset.
5807  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5808 
5809  // Calculate required margin at the end if this is not last suballocation.
5810  VmaSuballocationList::const_iterator next = suballocItem;
5811  ++next;
5812  const VkDeviceSize requiredEndMargin =
5813  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5814 
5815  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5816  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5817  {
5818  return false;
5819  }
5820 
5821  // Check next suballocations for BufferImageGranularity conflicts.
5822  // If conflict exists, allocation cannot be made here.
5823  if(bufferImageGranularity > 1)
5824  {
5825  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5826  ++nextSuballocItem;
5827  while(nextSuballocItem != m_Suballocations.cend())
5828  {
5829  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5830  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5831  {
5832  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5833  {
5834  return false;
5835  }
5836  }
5837  else
5838  {
5839  // Already on next page.
5840  break;
5841  }
5842  ++nextSuballocItem;
5843  }
5844  }
5845  }
5846 
5847  // All tests passed: Success. pOffset is already filled.
5848  return true;
5849 }
5850 
5851 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5852 {
5853  VMA_ASSERT(item != m_Suballocations.end());
5854  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5855 
5856  VmaSuballocationList::iterator nextItem = item;
5857  ++nextItem;
5858  VMA_ASSERT(nextItem != m_Suballocations.end());
5859  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5860 
5861  item->size += nextItem->size;
5862  --m_FreeCount;
5863  m_Suballocations.erase(nextItem);
5864 }
5865 
5866 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5867 {
5868  // Change this suballocation to be marked as free.
5869  VmaSuballocation& suballoc = *suballocItem;
5870  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5871  suballoc.hAllocation = VK_NULL_HANDLE;
5872 
5873  // Update totals.
5874  ++m_FreeCount;
5875  m_SumFreeSize += suballoc.size;
5876 
5877  // Merge with previous and/or next suballocation if it's also free.
5878  bool mergeWithNext = false;
5879  bool mergeWithPrev = false;
5880 
5881  VmaSuballocationList::iterator nextItem = suballocItem;
5882  ++nextItem;
5883  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5884  {
5885  mergeWithNext = true;
5886  }
5887 
5888  VmaSuballocationList::iterator prevItem = suballocItem;
5889  if(suballocItem != m_Suballocations.begin())
5890  {
5891  --prevItem;
5892  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5893  {
5894  mergeWithPrev = true;
5895  }
5896  }
5897 
5898  if(mergeWithNext)
5899  {
5900  UnregisterFreeSuballocation(nextItem);
5901  MergeFreeWithNext(suballocItem);
5902  }
5903 
5904  if(mergeWithPrev)
5905  {
5906  UnregisterFreeSuballocation(prevItem);
5907  MergeFreeWithNext(prevItem);
5908  RegisterFreeSuballocation(prevItem);
5909  return prevItem;
5910  }
5911  else
5912  {
5913  RegisterFreeSuballocation(suballocItem);
5914  return suballocItem;
5915  }
5916 }
5917 
5918 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5919 {
5920  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5921  VMA_ASSERT(item->size > 0);
5922 
5923  // You may want to enable this validation at the beginning or at the end of
5924  // this function, depending on what do you want to check.
5925  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5926 
5927  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5928  {
5929  if(m_FreeSuballocationsBySize.empty())
5930  {
5931  m_FreeSuballocationsBySize.push_back(item);
5932  }
5933  else
5934  {
5935  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5936  }
5937  }
5938 
5939  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5940 }
5941 
5942 
5943 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5944 {
5945  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5946  VMA_ASSERT(item->size > 0);
5947 
5948  // You may want to enable this validation at the beginning or at the end of
5949  // this function, depending on what do you want to check.
5950  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5951 
5952  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5953  {
5954  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5955  m_FreeSuballocationsBySize.data(),
5956  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5957  item,
5958  VmaSuballocationItemSizeLess());
5959  for(size_t index = it - m_FreeSuballocationsBySize.data();
5960  index < m_FreeSuballocationsBySize.size();
5961  ++index)
5962  {
5963  if(m_FreeSuballocationsBySize[index] == item)
5964  {
5965  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5966  return;
5967  }
5968  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5969  }
5970  VMA_ASSERT(0 && "Not found.");
5971  }
5972 
5973  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5974 }
5975 
5977 // class VmaDeviceMemoryMapping
5978 
5979 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5980  m_MapCount(0),
5981  m_pMappedData(VMA_NULL)
5982 {
5983 }
5984 
5985 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5986 {
5987  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5988 }
5989 
5990 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
5991 {
5992  if(count == 0)
5993  {
5994  return VK_SUCCESS;
5995  }
5996 
5997  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5998  if(m_MapCount != 0)
5999  {
6000  m_MapCount += count;
6001  VMA_ASSERT(m_pMappedData != VMA_NULL);
6002  if(ppData != VMA_NULL)
6003  {
6004  *ppData = m_pMappedData;
6005  }
6006  return VK_SUCCESS;
6007  }
6008  else
6009  {
6010  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6011  hAllocator->m_hDevice,
6012  hMemory,
6013  0, // offset
6014  VK_WHOLE_SIZE,
6015  0, // flags
6016  &m_pMappedData);
6017  if(result == VK_SUCCESS)
6018  {
6019  if(ppData != VMA_NULL)
6020  {
6021  *ppData = m_pMappedData;
6022  }
6023  m_MapCount = count;
6024  }
6025  return result;
6026  }
6027 }
6028 
6029 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
6030 {
6031  if(count == 0)
6032  {
6033  return;
6034  }
6035 
6036  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6037  if(m_MapCount >= count)
6038  {
6039  m_MapCount -= count;
6040  if(m_MapCount == 0)
6041  {
6042  m_pMappedData = VMA_NULL;
6043  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
6044  }
6045  }
6046  else
6047  {
6048  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6049  }
6050 }
6051 
6053 // class VmaDeviceMemoryBlock
6054 
6055 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6056  m_MemoryTypeIndex(UINT32_MAX),
6057  m_hMemory(VK_NULL_HANDLE),
6058  m_Metadata(hAllocator)
6059 {
6060 }
6061 
6062 void VmaDeviceMemoryBlock::Init(
6063  uint32_t newMemoryTypeIndex,
6064  VkDeviceMemory newMemory,
6065  VkDeviceSize newSize)
6066 {
6067  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6068 
6069  m_MemoryTypeIndex = newMemoryTypeIndex;
6070  m_hMemory = newMemory;
6071 
6072  m_Metadata.Init(newSize);
6073 }
6074 
6075 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6076 {
6077  // This is the most important assert in the entire library.
6078  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6079  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6080 
6081  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6082  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6083  m_hMemory = VK_NULL_HANDLE;
6084 }
6085 
6086 bool VmaDeviceMemoryBlock::Validate() const
6087 {
6088  if((m_hMemory == VK_NULL_HANDLE) ||
6089  (m_Metadata.GetSize() == 0))
6090  {
6091  return false;
6092  }
6093 
6094  return m_Metadata.Validate();
6095 }
6096 
6097 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6098 {
6099  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6100 }
6101 
6102 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6103 {
6104  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6105 }
6106 
6107 static void InitStatInfo(VmaStatInfo& outInfo)
6108 {
6109  memset(&outInfo, 0, sizeof(outInfo));
6110  outInfo.allocationSizeMin = UINT64_MAX;
6111  outInfo.unusedRangeSizeMin = UINT64_MAX;
6112 }
6113 
6114 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6115 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6116 {
6117  inoutInfo.blockCount += srcInfo.blockCount;
6118  inoutInfo.allocationCount += srcInfo.allocationCount;
6119  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6120  inoutInfo.usedBytes += srcInfo.usedBytes;
6121  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6122  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6123  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6124  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6125  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6126 }
6127 
6128 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6129 {
6130  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6131  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6132  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6133  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6134 }
6135 
6136 VmaPool_T::VmaPool_T(
6137  VmaAllocator hAllocator,
6138  const VmaPoolCreateInfo& createInfo) :
6139  m_BlockVector(
6140  hAllocator,
6141  createInfo.memoryTypeIndex,
6142  createInfo.blockSize,
6143  createInfo.minBlockCount,
6144  createInfo.maxBlockCount,
6145  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6146  createInfo.frameInUseCount,
6147  true) // isCustomPool
6148 {
6149 }
6150 
6151 VmaPool_T::~VmaPool_T()
6152 {
6153 }
6154 
6155 #if VMA_STATS_STRING_ENABLED
6156 
6157 #endif // #if VMA_STATS_STRING_ENABLED
6158 
6159 VmaBlockVector::VmaBlockVector(
6160  VmaAllocator hAllocator,
6161  uint32_t memoryTypeIndex,
6162  VkDeviceSize preferredBlockSize,
6163  size_t minBlockCount,
6164  size_t maxBlockCount,
6165  VkDeviceSize bufferImageGranularity,
6166  uint32_t frameInUseCount,
6167  bool isCustomPool) :
6168  m_hAllocator(hAllocator),
6169  m_MemoryTypeIndex(memoryTypeIndex),
6170  m_PreferredBlockSize(preferredBlockSize),
6171  m_MinBlockCount(minBlockCount),
6172  m_MaxBlockCount(maxBlockCount),
6173  m_BufferImageGranularity(bufferImageGranularity),
6174  m_FrameInUseCount(frameInUseCount),
6175  m_IsCustomPool(isCustomPool),
6176  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6177  m_HasEmptyBlock(false),
6178  m_pDefragmentator(VMA_NULL)
6179 {
6180 }
6181 
6182 VmaBlockVector::~VmaBlockVector()
6183 {
6184  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6185 
6186  for(size_t i = m_Blocks.size(); i--; )
6187  {
6188  m_Blocks[i]->Destroy(m_hAllocator);
6189  vma_delete(m_hAllocator, m_Blocks[i]);
6190  }
6191 }
6192 
6193 VkResult VmaBlockVector::CreateMinBlocks()
6194 {
6195  for(size_t i = 0; i < m_MinBlockCount; ++i)
6196  {
6197  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6198  if(res != VK_SUCCESS)
6199  {
6200  return res;
6201  }
6202  }
6203  return VK_SUCCESS;
6204 }
6205 
6206 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6207 {
6208  pStats->size = 0;
6209  pStats->unusedSize = 0;
6210  pStats->allocationCount = 0;
6211  pStats->unusedRangeCount = 0;
6212  pStats->unusedRangeSizeMax = 0;
6213 
6214  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6215 
6216  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6217  {
6218  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6219  VMA_ASSERT(pBlock);
6220  VMA_HEAVY_ASSERT(pBlock->Validate());
6221  pBlock->m_Metadata.AddPoolStats(*pStats);
6222  }
6223 }
6224 
6225 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6226 
6227 VkResult VmaBlockVector::Allocate(
6228  VmaPool hCurrentPool,
6229  uint32_t currentFrameIndex,
6230  const VkMemoryRequirements& vkMemReq,
6231  const VmaAllocationCreateInfo& createInfo,
6232  VmaSuballocationType suballocType,
6233  VmaAllocation* pAllocation)
6234 {
6235  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6236  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6237 
6238  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6239 
6240  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6241  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6242  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6243  {
6244  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6245  VMA_ASSERT(pCurrBlock);
6246  VmaAllocationRequest currRequest = {};
6247  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6248  currentFrameIndex,
6249  m_FrameInUseCount,
6250  m_BufferImageGranularity,
6251  vkMemReq.size,
6252  vkMemReq.alignment,
6253  suballocType,
6254  false, // canMakeOtherLost
6255  &currRequest))
6256  {
6257  // Allocate from pCurrBlock.
6258  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6259 
6260  if(mapped)
6261  {
6262  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6263  if(res != VK_SUCCESS)
6264  {
6265  return res;
6266  }
6267  }
6268 
6269  // We no longer have an empty Allocation.
6270  if(pCurrBlock->m_Metadata.IsEmpty())
6271  {
6272  m_HasEmptyBlock = false;
6273  }
6274 
6275  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6276  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6277  (*pAllocation)->InitBlockAllocation(
6278  hCurrentPool,
6279  pCurrBlock,
6280  currRequest.offset,
6281  vkMemReq.alignment,
6282  vkMemReq.size,
6283  suballocType,
6284  mapped,
6285  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6286  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6287  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6288  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6289  return VK_SUCCESS;
6290  }
6291  }
6292 
6293  const bool canCreateNewBlock =
6294  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6295  (m_Blocks.size() < m_MaxBlockCount);
6296 
6297  // 2. Try to create new block.
6298  if(canCreateNewBlock)
6299  {
6300  // Calculate optimal size for new block.
6301  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6302  uint32_t newBlockSizeShift = 0;
6303  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6304 
6305  // Allocating blocks of other sizes is allowed only in default pools.
6306  // In custom pools block size is fixed.
6307  if(m_IsCustomPool == false)
6308  {
6309  // Allocate 1/8, 1/4, 1/2 as first blocks.
6310  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6311  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6312  {
6313  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6314  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6315  {
6316  newBlockSize = smallerNewBlockSize;
6317  ++newBlockSizeShift;
6318  }
6319  else
6320  {
6321  break;
6322  }
6323  }
6324  }
6325 
6326  size_t newBlockIndex = 0;
6327  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6328  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6329  if(m_IsCustomPool == false)
6330  {
6331  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6332  {
6333  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6334  if(smallerNewBlockSize >= vkMemReq.size)
6335  {
6336  newBlockSize = smallerNewBlockSize;
6337  ++newBlockSizeShift;
6338  res = CreateBlock(newBlockSize, &newBlockIndex);
6339  }
6340  else
6341  {
6342  break;
6343  }
6344  }
6345  }
6346 
6347  if(res == VK_SUCCESS)
6348  {
6349  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6350  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6351 
6352  if(mapped)
6353  {
6354  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6355  if(res != VK_SUCCESS)
6356  {
6357  return res;
6358  }
6359  }
6360 
6361  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6362  VmaAllocationRequest allocRequest;
6363  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6364  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6365  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6366  (*pAllocation)->InitBlockAllocation(
6367  hCurrentPool,
6368  pBlock,
6369  allocRequest.offset,
6370  vkMemReq.alignment,
6371  vkMemReq.size,
6372  suballocType,
6373  mapped,
6374  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6375  VMA_HEAVY_ASSERT(pBlock->Validate());
6376  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6377  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6378  return VK_SUCCESS;
6379  }
6380  }
6381 
6382  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6383 
6384  // 3. Try to allocate from existing blocks with making other allocations lost.
6385  if(canMakeOtherLost)
6386  {
6387  uint32_t tryIndex = 0;
6388  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6389  {
6390  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6391  VmaAllocationRequest bestRequest = {};
6392  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6393 
6394  // 1. Search existing allocations.
6395  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6396  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6397  {
6398  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6399  VMA_ASSERT(pCurrBlock);
6400  VmaAllocationRequest currRequest = {};
6401  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6402  currentFrameIndex,
6403  m_FrameInUseCount,
6404  m_BufferImageGranularity,
6405  vkMemReq.size,
6406  vkMemReq.alignment,
6407  suballocType,
6408  canMakeOtherLost,
6409  &currRequest))
6410  {
6411  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6412  if(pBestRequestBlock == VMA_NULL ||
6413  currRequestCost < bestRequestCost)
6414  {
6415  pBestRequestBlock = pCurrBlock;
6416  bestRequest = currRequest;
6417  bestRequestCost = currRequestCost;
6418 
6419  if(bestRequestCost == 0)
6420  {
6421  break;
6422  }
6423  }
6424  }
6425  }
6426 
6427  if(pBestRequestBlock != VMA_NULL)
6428  {
6429  if(mapped)
6430  {
6431  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6432  if(res != VK_SUCCESS)
6433  {
6434  return res;
6435  }
6436  }
6437 
6438  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6439  currentFrameIndex,
6440  m_FrameInUseCount,
6441  &bestRequest))
6442  {
6443  // We no longer have an empty Allocation.
6444  if(pBestRequestBlock->m_Metadata.IsEmpty())
6445  {
6446  m_HasEmptyBlock = false;
6447  }
6448  // Allocate from this pBlock.
6449  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6450  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6451  (*pAllocation)->InitBlockAllocation(
6452  hCurrentPool,
6453  pBestRequestBlock,
6454  bestRequest.offset,
6455  vkMemReq.alignment,
6456  vkMemReq.size,
6457  suballocType,
6458  mapped,
6459  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6460  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6461  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6462  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6463  return VK_SUCCESS;
6464  }
6465  // else: Some allocations must have been touched while we are here. Next try.
6466  }
6467  else
6468  {
6469  // Could not find place in any of the blocks - break outer loop.
6470  break;
6471  }
6472  }
6473  /* Maximum number of tries exceeded - a very unlike event when many other
6474  threads are simultaneously touching allocations making it impossible to make
6475  lost at the same time as we try to allocate. */
6476  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6477  {
6478  return VK_ERROR_TOO_MANY_OBJECTS;
6479  }
6480  }
6481 
6482  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6483 }
6484 
6485 void VmaBlockVector::Free(
6486  VmaAllocation hAllocation)
6487 {
6488  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6489 
6490  // Scope for lock.
6491  {
6492  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6493 
6494  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6495 
6496  if(hAllocation->IsPersistentMap())
6497  {
6498  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6499  }
6500 
6501  pBlock->m_Metadata.Free(hAllocation);
6502  VMA_HEAVY_ASSERT(pBlock->Validate());
6503 
6504  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6505 
6506  // pBlock became empty after this deallocation.
6507  if(pBlock->m_Metadata.IsEmpty())
6508  {
6509  // Already has empty Allocation. We don't want to have two, so delete this one.
6510  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6511  {
6512  pBlockToDelete = pBlock;
6513  Remove(pBlock);
6514  }
6515  // We now have first empty Allocation.
6516  else
6517  {
6518  m_HasEmptyBlock = true;
6519  }
6520  }
6521  // pBlock didn't become empty, but we have another empty block - find and free that one.
6522  // (This is optional, heuristics.)
6523  else if(m_HasEmptyBlock)
6524  {
6525  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6526  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6527  {
6528  pBlockToDelete = pLastBlock;
6529  m_Blocks.pop_back();
6530  m_HasEmptyBlock = false;
6531  }
6532  }
6533 
6534  IncrementallySortBlocks();
6535  }
6536 
6537  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6538  // lock, for performance reason.
6539  if(pBlockToDelete != VMA_NULL)
6540  {
6541  VMA_DEBUG_LOG(" Deleted empty allocation");
6542  pBlockToDelete->Destroy(m_hAllocator);
6543  vma_delete(m_hAllocator, pBlockToDelete);
6544  }
6545 }
6546 
6547 size_t VmaBlockVector::CalcMaxBlockSize() const
6548 {
6549  size_t result = 0;
6550  for(size_t i = m_Blocks.size(); i--; )
6551  {
6552  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6553  if(result >= m_PreferredBlockSize)
6554  {
6555  break;
6556  }
6557  }
6558  return result;
6559 }
6560 
6561 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6562 {
6563  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6564  {
6565  if(m_Blocks[blockIndex] == pBlock)
6566  {
6567  VmaVectorRemove(m_Blocks, blockIndex);
6568  return;
6569  }
6570  }
6571  VMA_ASSERT(0);
6572 }
6573 
6574 void VmaBlockVector::IncrementallySortBlocks()
6575 {
6576  // Bubble sort only until first swap.
6577  for(size_t i = 1; i < m_Blocks.size(); ++i)
6578  {
6579  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6580  {
6581  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6582  return;
6583  }
6584  }
6585 }
6586 
6587 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6588 {
6589  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6590  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6591  allocInfo.allocationSize = blockSize;
6592  VkDeviceMemory mem = VK_NULL_HANDLE;
6593  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6594  if(res < 0)
6595  {
6596  return res;
6597  }
6598 
6599  // New VkDeviceMemory successfully created.
6600 
6601  // Create new Allocation for it.
6602  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6603  pBlock->Init(
6604  m_MemoryTypeIndex,
6605  mem,
6606  allocInfo.allocationSize);
6607 
6608  m_Blocks.push_back(pBlock);
6609  if(pNewBlockIndex != VMA_NULL)
6610  {
6611  *pNewBlockIndex = m_Blocks.size() - 1;
6612  }
6613 
6614  return VK_SUCCESS;
6615 }
6616 
6617 #if VMA_STATS_STRING_ENABLED
6618 
6619 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6620 {
6621  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6622 
6623  json.BeginObject();
6624 
6625  if(m_IsCustomPool)
6626  {
6627  json.WriteString("MemoryTypeIndex");
6628  json.WriteNumber(m_MemoryTypeIndex);
6629 
6630  json.WriteString("BlockSize");
6631  json.WriteNumber(m_PreferredBlockSize);
6632 
6633  json.WriteString("BlockCount");
6634  json.BeginObject(true);
6635  if(m_MinBlockCount > 0)
6636  {
6637  json.WriteString("Min");
6638  json.WriteNumber((uint64_t)m_MinBlockCount);
6639  }
6640  if(m_MaxBlockCount < SIZE_MAX)
6641  {
6642  json.WriteString("Max");
6643  json.WriteNumber((uint64_t)m_MaxBlockCount);
6644  }
6645  json.WriteString("Cur");
6646  json.WriteNumber((uint64_t)m_Blocks.size());
6647  json.EndObject();
6648 
6649  if(m_FrameInUseCount > 0)
6650  {
6651  json.WriteString("FrameInUseCount");
6652  json.WriteNumber(m_FrameInUseCount);
6653  }
6654  }
6655  else
6656  {
6657  json.WriteString("PreferredBlockSize");
6658  json.WriteNumber(m_PreferredBlockSize);
6659  }
6660 
6661  json.WriteString("Blocks");
6662  json.BeginArray();
6663  for(size_t i = 0; i < m_Blocks.size(); ++i)
6664  {
6665  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6666  }
6667  json.EndArray();
6668 
6669  json.EndObject();
6670 }
6671 
6672 #endif // #if VMA_STATS_STRING_ENABLED
6673 
6674 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6675  VmaAllocator hAllocator,
6676  uint32_t currentFrameIndex)
6677 {
6678  if(m_pDefragmentator == VMA_NULL)
6679  {
6680  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6681  hAllocator,
6682  this,
6683  currentFrameIndex);
6684  }
6685 
6686  return m_pDefragmentator;
6687 }
6688 
6689 VkResult VmaBlockVector::Defragment(
6690  VmaDefragmentationStats* pDefragmentationStats,
6691  VkDeviceSize& maxBytesToMove,
6692  uint32_t& maxAllocationsToMove)
6693 {
6694  if(m_pDefragmentator == VMA_NULL)
6695  {
6696  return VK_SUCCESS;
6697  }
6698 
6699  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6700 
6701  // Defragment.
6702  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6703 
6704  // Accumulate statistics.
6705  if(pDefragmentationStats != VMA_NULL)
6706  {
6707  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6708  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6709  pDefragmentationStats->bytesMoved += bytesMoved;
6710  pDefragmentationStats->allocationsMoved += allocationsMoved;
6711  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6712  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6713  maxBytesToMove -= bytesMoved;
6714  maxAllocationsToMove -= allocationsMoved;
6715  }
6716 
6717  // Free empty blocks.
6718  m_HasEmptyBlock = false;
6719  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6720  {
6721  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6722  if(pBlock->m_Metadata.IsEmpty())
6723  {
6724  if(m_Blocks.size() > m_MinBlockCount)
6725  {
6726  if(pDefragmentationStats != VMA_NULL)
6727  {
6728  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6729  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6730  }
6731 
6732  VmaVectorRemove(m_Blocks, blockIndex);
6733  pBlock->Destroy(m_hAllocator);
6734  vma_delete(m_hAllocator, pBlock);
6735  }
6736  else
6737  {
6738  m_HasEmptyBlock = true;
6739  }
6740  }
6741  }
6742 
6743  return result;
6744 }
6745 
6746 void VmaBlockVector::DestroyDefragmentator()
6747 {
6748  if(m_pDefragmentator != VMA_NULL)
6749  {
6750  vma_delete(m_hAllocator, m_pDefragmentator);
6751  m_pDefragmentator = VMA_NULL;
6752  }
6753 }
6754 
6755 void VmaBlockVector::MakePoolAllocationsLost(
6756  uint32_t currentFrameIndex,
6757  size_t* pLostAllocationCount)
6758 {
6759  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6760  size_t lostAllocationCount = 0;
6761  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6762  {
6763  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6764  VMA_ASSERT(pBlock);
6765  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6766  }
6767  if(pLostAllocationCount != VMA_NULL)
6768  {
6769  *pLostAllocationCount = lostAllocationCount;
6770  }
6771 }
6772 
6773 void VmaBlockVector::AddStats(VmaStats* pStats)
6774 {
6775  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6776  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6777 
6778  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6779 
6780  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6781  {
6782  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6783  VMA_ASSERT(pBlock);
6784  VMA_HEAVY_ASSERT(pBlock->Validate());
6785  VmaStatInfo allocationStatInfo;
6786  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6787  VmaAddStatInfo(pStats->total, allocationStatInfo);
6788  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6789  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6790  }
6791 }
6792 
6794 // VmaDefragmentator members definition
6795 
6796 VmaDefragmentator::VmaDefragmentator(
6797  VmaAllocator hAllocator,
6798  VmaBlockVector* pBlockVector,
6799  uint32_t currentFrameIndex) :
6800  m_hAllocator(hAllocator),
6801  m_pBlockVector(pBlockVector),
6802  m_CurrentFrameIndex(currentFrameIndex),
6803  m_BytesMoved(0),
6804  m_AllocationsMoved(0),
6805  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6806  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6807 {
6808 }
6809 
6810 VmaDefragmentator::~VmaDefragmentator()
6811 {
6812  for(size_t i = m_Blocks.size(); i--; )
6813  {
6814  vma_delete(m_hAllocator, m_Blocks[i]);
6815  }
6816 }
6817 
6818 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6819 {
6820  AllocationInfo allocInfo;
6821  allocInfo.m_hAllocation = hAlloc;
6822  allocInfo.m_pChanged = pChanged;
6823  m_Allocations.push_back(allocInfo);
6824 }
6825 
6826 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6827 {
6828  // It has already been mapped for defragmentation.
6829  if(m_pMappedDataForDefragmentation)
6830  {
6831  *ppMappedData = m_pMappedDataForDefragmentation;
6832  return VK_SUCCESS;
6833  }
6834 
6835  // It is originally mapped.
6836  if(m_pBlock->m_Mapping.GetMappedData())
6837  {
6838  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6839  return VK_SUCCESS;
6840  }
6841 
6842  // Map on first usage.
6843  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6844  *ppMappedData = m_pMappedDataForDefragmentation;
6845  return res;
6846 }
6847 
6848 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6849 {
6850  if(m_pMappedDataForDefragmentation != VMA_NULL)
6851  {
6852  m_pBlock->Unmap(hAllocator, 1);
6853  }
6854 }
6855 
6856 VkResult VmaDefragmentator::DefragmentRound(
6857  VkDeviceSize maxBytesToMove,
6858  uint32_t maxAllocationsToMove)
6859 {
6860  if(m_Blocks.empty())
6861  {
6862  return VK_SUCCESS;
6863  }
6864 
6865  size_t srcBlockIndex = m_Blocks.size() - 1;
6866  size_t srcAllocIndex = SIZE_MAX;
6867  for(;;)
6868  {
6869  // 1. Find next allocation to move.
6870  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6871  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6872  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6873  {
6874  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6875  {
6876  // Finished: no more allocations to process.
6877  if(srcBlockIndex == 0)
6878  {
6879  return VK_SUCCESS;
6880  }
6881  else
6882  {
6883  --srcBlockIndex;
6884  srcAllocIndex = SIZE_MAX;
6885  }
6886  }
6887  else
6888  {
6889  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6890  }
6891  }
6892 
6893  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6894  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6895 
6896  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6897  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6898  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6899  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6900 
6901  // 2. Try to find new place for this allocation in preceding or current block.
6902  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6903  {
6904  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6905  VmaAllocationRequest dstAllocRequest;
6906  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6907  m_CurrentFrameIndex,
6908  m_pBlockVector->GetFrameInUseCount(),
6909  m_pBlockVector->GetBufferImageGranularity(),
6910  size,
6911  alignment,
6912  suballocType,
6913  false, // canMakeOtherLost
6914  &dstAllocRequest) &&
6915  MoveMakesSense(
6916  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6917  {
6918  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6919 
6920  // Reached limit on number of allocations or bytes to move.
6921  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6922  (m_BytesMoved + size > maxBytesToMove))
6923  {
6924  return VK_INCOMPLETE;
6925  }
6926 
6927  void* pDstMappedData = VMA_NULL;
6928  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6929  if(res != VK_SUCCESS)
6930  {
6931  return res;
6932  }
6933 
6934  void* pSrcMappedData = VMA_NULL;
6935  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6936  if(res != VK_SUCCESS)
6937  {
6938  return res;
6939  }
6940 
6941  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6942  memcpy(
6943  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6944  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6945  static_cast<size_t>(size));
6946 
6947  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6948  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6949 
6950  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6951 
6952  if(allocInfo.m_pChanged != VMA_NULL)
6953  {
6954  *allocInfo.m_pChanged = VK_TRUE;
6955  }
6956 
6957  ++m_AllocationsMoved;
6958  m_BytesMoved += size;
6959 
6960  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6961 
6962  break;
6963  }
6964  }
6965 
6966  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6967 
6968  if(srcAllocIndex > 0)
6969  {
6970  --srcAllocIndex;
6971  }
6972  else
6973  {
6974  if(srcBlockIndex > 0)
6975  {
6976  --srcBlockIndex;
6977  srcAllocIndex = SIZE_MAX;
6978  }
6979  else
6980  {
6981  return VK_SUCCESS;
6982  }
6983  }
6984  }
6985 }
6986 
6987 VkResult VmaDefragmentator::Defragment(
6988  VkDeviceSize maxBytesToMove,
6989  uint32_t maxAllocationsToMove)
6990 {
6991  if(m_Allocations.empty())
6992  {
6993  return VK_SUCCESS;
6994  }
6995 
6996  // Create block info for each block.
6997  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6998  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6999  {
7000  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7001  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7002  m_Blocks.push_back(pBlockInfo);
7003  }
7004 
7005  // Sort them by m_pBlock pointer value.
7006  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7007 
7008  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7009  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7010  {
7011  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7012  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7013  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7014  {
7015  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7016  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7017  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7018  {
7019  (*it)->m_Allocations.push_back(allocInfo);
7020  }
7021  else
7022  {
7023  VMA_ASSERT(0);
7024  }
7025  }
7026  }
7027  m_Allocations.clear();
7028 
7029  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7030  {
7031  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7032  pBlockInfo->CalcHasNonMovableAllocations();
7033  pBlockInfo->SortAllocationsBySizeDescecnding();
7034  }
7035 
7036  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7037  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7038 
7039  // Execute defragmentation rounds (the main part).
7040  VkResult result = VK_SUCCESS;
7041  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7042  {
7043  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7044  }
7045 
7046  // Unmap blocks that were mapped for defragmentation.
7047  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7048  {
7049  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7050  }
7051 
7052  return result;
7053 }
7054 
7055 bool VmaDefragmentator::MoveMakesSense(
7056  size_t dstBlockIndex, VkDeviceSize dstOffset,
7057  size_t srcBlockIndex, VkDeviceSize srcOffset)
7058 {
7059  if(dstBlockIndex < srcBlockIndex)
7060  {
7061  return true;
7062  }
7063  if(dstBlockIndex > srcBlockIndex)
7064  {
7065  return false;
7066  }
7067  if(dstOffset < srcOffset)
7068  {
7069  return true;
7070  }
7071  return false;
7072 }
7073 
7075 // VmaAllocator_T
7076 
7077 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7078  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7079  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7080  m_hDevice(pCreateInfo->device),
7081  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7082  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7083  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7084  m_PreferredLargeHeapBlockSize(0),
7085  m_PhysicalDevice(pCreateInfo->physicalDevice),
7086  m_CurrentFrameIndex(0),
7087  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7088 {
7089  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7090 
7091  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7092  memset(&m_MemProps, 0, sizeof(m_MemProps));
7093  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7094 
7095  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7096  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7097 
7098  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7099  {
7100  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7101  }
7102 
7103  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7104  {
7105  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7106  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7107  }
7108 
7109  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7110 
7111  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7112  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7113 
7114  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7115  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7116 
7117  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7118  {
7119  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7120  {
7121  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7122  if(limit != VK_WHOLE_SIZE)
7123  {
7124  m_HeapSizeLimit[heapIndex] = limit;
7125  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7126  {
7127  m_MemProps.memoryHeaps[heapIndex].size = limit;
7128  }
7129  }
7130  }
7131  }
7132 
7133  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7134  {
7135  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7136 
7137  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7138  this,
7139  memTypeIndex,
7140  preferredBlockSize,
7141  0,
7142  SIZE_MAX,
7143  GetBufferImageGranularity(),
7144  pCreateInfo->frameInUseCount,
7145  false); // isCustomPool
7146  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7147  // becase minBlockCount is 0.
7148  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7149  }
7150 }
7151 
7152 VmaAllocator_T::~VmaAllocator_T()
7153 {
7154  VMA_ASSERT(m_Pools.empty());
7155 
7156  for(size_t i = GetMemoryTypeCount(); i--; )
7157  {
7158  vma_delete(this, m_pDedicatedAllocations[i]);
7159  vma_delete(this, m_pBlockVectors[i]);
7160  }
7161 }
7162 
7163 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7164 {
7165 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7166  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7167  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7168  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7169  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7170  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7171  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7172  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7173  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7174  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7175  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7176  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7177  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7178  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7179  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7180  if(m_UseKhrDedicatedAllocation)
7181  {
7182  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7183  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7184  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7185  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7186  }
7187 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7188 
7189 #define VMA_COPY_IF_NOT_NULL(funcName) \
7190  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7191 
7192  if(pVulkanFunctions != VMA_NULL)
7193  {
7194  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7195  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7196  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7197  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7198  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7199  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7200  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7201  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7202  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7203  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7204  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7205  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7206  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7207  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7208  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7209  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7210  }
7211 
7212 #undef VMA_COPY_IF_NOT_NULL
7213 
7214  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7215  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7216  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7217  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7218  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7219  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7220  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7221  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7222  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7223  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7224  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7225  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7226  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7227  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7228  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7229  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7230  if(m_UseKhrDedicatedAllocation)
7231  {
7232  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7233  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7234  }
7235 }
7236 
7237 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7238 {
7239  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7240  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7241  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7242  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7243 }
7244 
7245 VkResult VmaAllocator_T::AllocateMemoryOfType(
7246  const VkMemoryRequirements& vkMemReq,
7247  bool dedicatedAllocation,
7248  VkBuffer dedicatedBuffer,
7249  VkImage dedicatedImage,
7250  const VmaAllocationCreateInfo& createInfo,
7251  uint32_t memTypeIndex,
7252  VmaSuballocationType suballocType,
7253  VmaAllocation* pAllocation)
7254 {
7255  VMA_ASSERT(pAllocation != VMA_NULL);
7256  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7257 
7258  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7259 
7260  // If memory type is not HOST_VISIBLE, disable MAPPED.
7261  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7262  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7263  {
7264  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7265  }
7266 
7267  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7268  VMA_ASSERT(blockVector);
7269 
7270  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7271  bool preferDedicatedMemory =
7272  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7273  dedicatedAllocation ||
7274  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7275  vkMemReq.size > preferredBlockSize / 2;
7276 
7277  if(preferDedicatedMemory &&
7278  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7279  finalCreateInfo.pool == VK_NULL_HANDLE)
7280  {
7282  }
7283 
7284  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7285  {
7286  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7287  {
7288  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7289  }
7290  else
7291  {
7292  return AllocateDedicatedMemory(
7293  vkMemReq.size,
7294  suballocType,
7295  memTypeIndex,
7296  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7297  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7298  finalCreateInfo.pUserData,
7299  dedicatedBuffer,
7300  dedicatedImage,
7301  pAllocation);
7302  }
7303  }
7304  else
7305  {
7306  VkResult res = blockVector->Allocate(
7307  VK_NULL_HANDLE, // hCurrentPool
7308  m_CurrentFrameIndex.load(),
7309  vkMemReq,
7310  finalCreateInfo,
7311  suballocType,
7312  pAllocation);
7313  if(res == VK_SUCCESS)
7314  {
7315  return res;
7316  }
7317 
7318  // 5. Try dedicated memory.
7319  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7320  {
7321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7322  }
7323  else
7324  {
7325  res = AllocateDedicatedMemory(
7326  vkMemReq.size,
7327  suballocType,
7328  memTypeIndex,
7329  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7330  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7331  finalCreateInfo.pUserData,
7332  dedicatedBuffer,
7333  dedicatedImage,
7334  pAllocation);
7335  if(res == VK_SUCCESS)
7336  {
7337  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7338  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7339  return VK_SUCCESS;
7340  }
7341  else
7342  {
7343  // Everything failed: Return error code.
7344  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7345  return res;
7346  }
7347  }
7348  }
7349 }
7350 
7351 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7352  VkDeviceSize size,
7353  VmaSuballocationType suballocType,
7354  uint32_t memTypeIndex,
7355  bool map,
7356  bool isUserDataString,
7357  void* pUserData,
7358  VkBuffer dedicatedBuffer,
7359  VkImage dedicatedImage,
7360  VmaAllocation* pAllocation)
7361 {
7362  VMA_ASSERT(pAllocation);
7363 
7364  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7365  allocInfo.memoryTypeIndex = memTypeIndex;
7366  allocInfo.allocationSize = size;
7367 
7368  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7369  if(m_UseKhrDedicatedAllocation)
7370  {
7371  if(dedicatedBuffer != VK_NULL_HANDLE)
7372  {
7373  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7374  dedicatedAllocInfo.buffer = dedicatedBuffer;
7375  allocInfo.pNext = &dedicatedAllocInfo;
7376  }
7377  else if(dedicatedImage != VK_NULL_HANDLE)
7378  {
7379  dedicatedAllocInfo.image = dedicatedImage;
7380  allocInfo.pNext = &dedicatedAllocInfo;
7381  }
7382  }
7383 
7384  // Allocate VkDeviceMemory.
7385  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7386  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7387  if(res < 0)
7388  {
7389  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7390  return res;
7391  }
7392 
7393  void* pMappedData = VMA_NULL;
7394  if(map)
7395  {
7396  res = (*m_VulkanFunctions.vkMapMemory)(
7397  m_hDevice,
7398  hMemory,
7399  0,
7400  VK_WHOLE_SIZE,
7401  0,
7402  &pMappedData);
7403  if(res < 0)
7404  {
7405  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7406  FreeVulkanMemory(memTypeIndex, size, hMemory);
7407  return res;
7408  }
7409  }
7410 
7411  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7412  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7413  (*pAllocation)->SetUserData(this, pUserData);
7414 
7415  // Register it in m_pDedicatedAllocations.
7416  {
7417  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7418  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7419  VMA_ASSERT(pDedicatedAllocations);
7420  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7421  }
7422 
7423  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7424 
7425  return VK_SUCCESS;
7426 }
7427 
7428 void VmaAllocator_T::GetBufferMemoryRequirements(
7429  VkBuffer hBuffer,
7430  VkMemoryRequirements& memReq,
7431  bool& requiresDedicatedAllocation,
7432  bool& prefersDedicatedAllocation) const
7433 {
7434  if(m_UseKhrDedicatedAllocation)
7435  {
7436  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7437  memReqInfo.buffer = hBuffer;
7438 
7439  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7440 
7441  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7442  memReq2.pNext = &memDedicatedReq;
7443 
7444  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7445 
7446  memReq = memReq2.memoryRequirements;
7447  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7448  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7449  }
7450  else
7451  {
7452  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7453  requiresDedicatedAllocation = false;
7454  prefersDedicatedAllocation = false;
7455  }
7456 }
7457 
7458 void VmaAllocator_T::GetImageMemoryRequirements(
7459  VkImage hImage,
7460  VkMemoryRequirements& memReq,
7461  bool& requiresDedicatedAllocation,
7462  bool& prefersDedicatedAllocation) const
7463 {
7464  if(m_UseKhrDedicatedAllocation)
7465  {
7466  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7467  memReqInfo.image = hImage;
7468 
7469  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7470 
7471  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7472  memReq2.pNext = &memDedicatedReq;
7473 
7474  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7475 
7476  memReq = memReq2.memoryRequirements;
7477  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7478  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7479  }
7480  else
7481  {
7482  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7483  requiresDedicatedAllocation = false;
7484  prefersDedicatedAllocation = false;
7485  }
7486 }
7487 
7488 VkResult VmaAllocator_T::AllocateMemory(
7489  const VkMemoryRequirements& vkMemReq,
7490  bool requiresDedicatedAllocation,
7491  bool prefersDedicatedAllocation,
7492  VkBuffer dedicatedBuffer,
7493  VkImage dedicatedImage,
7494  const VmaAllocationCreateInfo& createInfo,
7495  VmaSuballocationType suballocType,
7496  VmaAllocation* pAllocation)
7497 {
7498  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7499  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7500  {
7501  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7502  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7503  }
7504  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7506  {
7507  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7508  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7509  }
7510  if(requiresDedicatedAllocation)
7511  {
7512  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7513  {
7514  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7515  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7516  }
7517  if(createInfo.pool != VK_NULL_HANDLE)
7518  {
7519  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7520  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7521  }
7522  }
7523  if((createInfo.pool != VK_NULL_HANDLE) &&
7524  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7525  {
7526  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7527  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7528  }
7529 
7530  if(createInfo.pool != VK_NULL_HANDLE)
7531  {
7532  return createInfo.pool->m_BlockVector.Allocate(
7533  createInfo.pool,
7534  m_CurrentFrameIndex.load(),
7535  vkMemReq,
7536  createInfo,
7537  suballocType,
7538  pAllocation);
7539  }
7540  else
7541  {
7542  // Bit mask of memory Vulkan types acceptable for this allocation.
7543  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7544  uint32_t memTypeIndex = UINT32_MAX;
7545  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7546  if(res == VK_SUCCESS)
7547  {
7548  res = AllocateMemoryOfType(
7549  vkMemReq,
7550  requiresDedicatedAllocation || prefersDedicatedAllocation,
7551  dedicatedBuffer,
7552  dedicatedImage,
7553  createInfo,
7554  memTypeIndex,
7555  suballocType,
7556  pAllocation);
7557  // Succeeded on first try.
7558  if(res == VK_SUCCESS)
7559  {
7560  return res;
7561  }
7562  // Allocation from this memory type failed. Try other compatible memory types.
7563  else
7564  {
7565  for(;;)
7566  {
7567  // Remove old memTypeIndex from list of possibilities.
7568  memoryTypeBits &= ~(1u << memTypeIndex);
7569  // Find alternative memTypeIndex.
7570  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7571  if(res == VK_SUCCESS)
7572  {
7573  res = AllocateMemoryOfType(
7574  vkMemReq,
7575  requiresDedicatedAllocation || prefersDedicatedAllocation,
7576  dedicatedBuffer,
7577  dedicatedImage,
7578  createInfo,
7579  memTypeIndex,
7580  suballocType,
7581  pAllocation);
7582  // Allocation from this alternative memory type succeeded.
7583  if(res == VK_SUCCESS)
7584  {
7585  return res;
7586  }
7587  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7588  }
7589  // No other matching memory type index could be found.
7590  else
7591  {
7592  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7593  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7594  }
7595  }
7596  }
7597  }
7598  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7599  else
7600  return res;
7601  }
7602 }
7603 
7604 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7605 {
7606  VMA_ASSERT(allocation);
7607 
7608  if(allocation->CanBecomeLost() == false ||
7609  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7610  {
7611  switch(allocation->GetType())
7612  {
7613  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7614  {
7615  VmaBlockVector* pBlockVector = VMA_NULL;
7616  VmaPool hPool = allocation->GetPool();
7617  if(hPool != VK_NULL_HANDLE)
7618  {
7619  pBlockVector = &hPool->m_BlockVector;
7620  }
7621  else
7622  {
7623  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7624  pBlockVector = m_pBlockVectors[memTypeIndex];
7625  }
7626  pBlockVector->Free(allocation);
7627  }
7628  break;
7629  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7630  FreeDedicatedMemory(allocation);
7631  break;
7632  default:
7633  VMA_ASSERT(0);
7634  }
7635  }
7636 
7637  allocation->SetUserData(this, VMA_NULL);
7638  vma_delete(this, allocation);
7639 }
7640 
7641 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7642 {
7643  // Initialize.
7644  InitStatInfo(pStats->total);
7645  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7646  InitStatInfo(pStats->memoryType[i]);
7647  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7648  InitStatInfo(pStats->memoryHeap[i]);
7649 
7650  // Process default pools.
7651  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7652  {
7653  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7654  VMA_ASSERT(pBlockVector);
7655  pBlockVector->AddStats(pStats);
7656  }
7657 
7658  // Process custom pools.
7659  {
7660  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7661  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7662  {
7663  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7664  }
7665  }
7666 
7667  // Process dedicated allocations.
7668  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7669  {
7670  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7671  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7672  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7673  VMA_ASSERT(pDedicatedAllocVector);
7674  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7675  {
7676  VmaStatInfo allocationStatInfo;
7677  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7678  VmaAddStatInfo(pStats->total, allocationStatInfo);
7679  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7680  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7681  }
7682  }
7683 
7684  // Postprocess.
7685  VmaPostprocessCalcStatInfo(pStats->total);
7686  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7687  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7688  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7689  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7690 }
7691 
7692 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7693 
7694 VkResult VmaAllocator_T::Defragment(
7695  VmaAllocation* pAllocations,
7696  size_t allocationCount,
7697  VkBool32* pAllocationsChanged,
7698  const VmaDefragmentationInfo* pDefragmentationInfo,
7699  VmaDefragmentationStats* pDefragmentationStats)
7700 {
7701  if(pAllocationsChanged != VMA_NULL)
7702  {
7703  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7704  }
7705  if(pDefragmentationStats != VMA_NULL)
7706  {
7707  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7708  }
7709 
7710  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7711 
7712  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7713 
7714  const size_t poolCount = m_Pools.size();
7715 
7716  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7717  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7718  {
7719  VmaAllocation hAlloc = pAllocations[allocIndex];
7720  VMA_ASSERT(hAlloc);
7721  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7722  // DedicatedAlloc cannot be defragmented.
7723  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7724  // Only HOST_VISIBLE memory types can be defragmented.
7725  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7726  // Lost allocation cannot be defragmented.
7727  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7728  {
7729  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7730 
7731  const VmaPool hAllocPool = hAlloc->GetPool();
7732  // This allocation belongs to custom pool.
7733  if(hAllocPool != VK_NULL_HANDLE)
7734  {
7735  pAllocBlockVector = &hAllocPool->GetBlockVector();
7736  }
7737  // This allocation belongs to general pool.
7738  else
7739  {
7740  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7741  }
7742 
7743  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7744 
7745  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7746  &pAllocationsChanged[allocIndex] : VMA_NULL;
7747  pDefragmentator->AddAllocation(hAlloc, pChanged);
7748  }
7749  }
7750 
7751  VkResult result = VK_SUCCESS;
7752 
7753  // ======== Main processing.
7754 
7755  VkDeviceSize maxBytesToMove = SIZE_MAX;
7756  uint32_t maxAllocationsToMove = UINT32_MAX;
7757  if(pDefragmentationInfo != VMA_NULL)
7758  {
7759  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7760  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7761  }
7762 
7763  // Process standard memory.
7764  for(uint32_t memTypeIndex = 0;
7765  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7766  ++memTypeIndex)
7767  {
7768  // Only HOST_VISIBLE memory types can be defragmented.
7769  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7770  {
7771  result = m_pBlockVectors[memTypeIndex]->Defragment(
7772  pDefragmentationStats,
7773  maxBytesToMove,
7774  maxAllocationsToMove);
7775  }
7776  }
7777 
7778  // Process custom pools.
7779  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7780  {
7781  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7782  pDefragmentationStats,
7783  maxBytesToMove,
7784  maxAllocationsToMove);
7785  }
7786 
7787  // ======== Destroy defragmentators.
7788 
7789  // Process custom pools.
7790  for(size_t poolIndex = poolCount; poolIndex--; )
7791  {
7792  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7793  }
7794 
7795  // Process standard memory.
7796  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7797  {
7798  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7799  {
7800  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7801  }
7802  }
7803 
7804  return result;
7805 }
7806 
7807 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7808 {
7809  if(hAllocation->CanBecomeLost())
7810  {
7811  /*
7812  Warning: This is a carefully designed algorithm.
7813  Do not modify unless you really know what you're doing :)
7814  */
7815  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7816  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7817  for(;;)
7818  {
7819  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7820  {
7821  pAllocationInfo->memoryType = UINT32_MAX;
7822  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7823  pAllocationInfo->offset = 0;
7824  pAllocationInfo->size = hAllocation->GetSize();
7825  pAllocationInfo->pMappedData = VMA_NULL;
7826  pAllocationInfo->pUserData = hAllocation->GetUserData();
7827  return;
7828  }
7829  else if(localLastUseFrameIndex == localCurrFrameIndex)
7830  {
7831  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7832  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7833  pAllocationInfo->offset = hAllocation->GetOffset();
7834  pAllocationInfo->size = hAllocation->GetSize();
7835  pAllocationInfo->pMappedData = VMA_NULL;
7836  pAllocationInfo->pUserData = hAllocation->GetUserData();
7837  return;
7838  }
7839  else // Last use time earlier than current time.
7840  {
7841  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7842  {
7843  localLastUseFrameIndex = localCurrFrameIndex;
7844  }
7845  }
7846  }
7847  }
7848  else
7849  {
7850  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7851  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7852  pAllocationInfo->offset = hAllocation->GetOffset();
7853  pAllocationInfo->size = hAllocation->GetSize();
7854  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7855  pAllocationInfo->pUserData = hAllocation->GetUserData();
7856  }
7857 }
7858 
7859 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7860 {
7861  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7862  if(hAllocation->CanBecomeLost())
7863  {
7864  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7865  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7866  for(;;)
7867  {
7868  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7869  {
7870  return false;
7871  }
7872  else if(localLastUseFrameIndex == localCurrFrameIndex)
7873  {
7874  return true;
7875  }
7876  else // Last use time earlier than current time.
7877  {
7878  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7879  {
7880  localLastUseFrameIndex = localCurrFrameIndex;
7881  }
7882  }
7883  }
7884  }
7885  else
7886  {
7887  return true;
7888  }
7889 }
7890 
7891 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7892 {
7893  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7894 
7895  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7896 
7897  if(newCreateInfo.maxBlockCount == 0)
7898  {
7899  newCreateInfo.maxBlockCount = SIZE_MAX;
7900  }
7901  if(newCreateInfo.blockSize == 0)
7902  {
7903  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7904  }
7905 
7906  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7907 
7908  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7909  if(res != VK_SUCCESS)
7910  {
7911  vma_delete(this, *pPool);
7912  *pPool = VMA_NULL;
7913  return res;
7914  }
7915 
7916  // Add to m_Pools.
7917  {
7918  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7919  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7920  }
7921 
7922  return VK_SUCCESS;
7923 }
7924 
7925 void VmaAllocator_T::DestroyPool(VmaPool pool)
7926 {
7927  // Remove from m_Pools.
7928  {
7929  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7930  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7931  VMA_ASSERT(success && "Pool not found in Allocator.");
7932  }
7933 
7934  vma_delete(this, pool);
7935 }
7936 
7937 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7938 {
7939  pool->m_BlockVector.GetPoolStats(pPoolStats);
7940 }
7941 
7942 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7943 {
7944  m_CurrentFrameIndex.store(frameIndex);
7945 }
7946 
7947 void VmaAllocator_T::MakePoolAllocationsLost(
7948  VmaPool hPool,
7949  size_t* pLostAllocationCount)
7950 {
7951  hPool->m_BlockVector.MakePoolAllocationsLost(
7952  m_CurrentFrameIndex.load(),
7953  pLostAllocationCount);
7954 }
7955 
7956 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7957 {
7958  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7959  (*pAllocation)->InitLost();
7960 }
7961 
7962 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7963 {
7964  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7965 
7966  VkResult res;
7967  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7968  {
7969  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7970  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7971  {
7972  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7973  if(res == VK_SUCCESS)
7974  {
7975  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7976  }
7977  }
7978  else
7979  {
7980  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7981  }
7982  }
7983  else
7984  {
7985  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7986  }
7987 
7988  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7989  {
7990  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7991  }
7992 
7993  return res;
7994 }
7995 
7996 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7997 {
7998  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7999  {
8000  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8001  }
8002 
8003  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8004 
8005  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8006  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8007  {
8008  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8009  m_HeapSizeLimit[heapIndex] += size;
8010  }
8011 }
8012 
8013 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8014 {
8015  if(hAllocation->CanBecomeLost())
8016  {
8017  return VK_ERROR_MEMORY_MAP_FAILED;
8018  }
8019 
8020  switch(hAllocation->GetType())
8021  {
8022  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8023  {
8024  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8025  char *pBytes = VMA_NULL;
8026  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8027  if(res == VK_SUCCESS)
8028  {
8029  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8030  hAllocation->BlockAllocMap();
8031  }
8032  return res;
8033  }
8034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8035  return hAllocation->DedicatedAllocMap(this, ppData);
8036  default:
8037  VMA_ASSERT(0);
8038  return VK_ERROR_MEMORY_MAP_FAILED;
8039  }
8040 }
8041 
8042 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8043 {
8044  switch(hAllocation->GetType())
8045  {
8046  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8047  {
8048  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8049  hAllocation->BlockAllocUnmap();
8050  pBlock->Unmap(this, 1);
8051  }
8052  break;
8053  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8054  hAllocation->DedicatedAllocUnmap(this);
8055  break;
8056  default:
8057  VMA_ASSERT(0);
8058  }
8059 }
8060 
8061 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8062 {
8063  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8064 
8065  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8066  {
8067  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8068  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8069  VMA_ASSERT(pDedicatedAllocations);
8070  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8071  VMA_ASSERT(success);
8072  }
8073 
8074  VkDeviceMemory hMemory = allocation->GetMemory();
8075 
8076  if(allocation->GetMappedData() != VMA_NULL)
8077  {
8078  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8079  }
8080 
8081  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8082 
8083  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8084 }
8085 
8086 #if VMA_STATS_STRING_ENABLED
8087 
8088 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8089 {
8090  bool dedicatedAllocationsStarted = false;
8091  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8092  {
8093  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8094  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8095  VMA_ASSERT(pDedicatedAllocVector);
8096  if(pDedicatedAllocVector->empty() == false)
8097  {
8098  if(dedicatedAllocationsStarted == false)
8099  {
8100  dedicatedAllocationsStarted = true;
8101  json.WriteString("DedicatedAllocations");
8102  json.BeginObject();
8103  }
8104 
8105  json.BeginString("Type ");
8106  json.ContinueString(memTypeIndex);
8107  json.EndString();
8108 
8109  json.BeginArray();
8110 
8111  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8112  {
8113  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8114  json.BeginObject(true);
8115 
8116  json.WriteString("Type");
8117  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8118 
8119  json.WriteString("Size");
8120  json.WriteNumber(hAlloc->GetSize());
8121 
8122  const void* pUserData = hAlloc->GetUserData();
8123  if(pUserData != VMA_NULL)
8124  {
8125  json.WriteString("UserData");
8126  if(hAlloc->IsUserDataString())
8127  {
8128  json.WriteString((const char*)pUserData);
8129  }
8130  else
8131  {
8132  json.BeginString();
8133  json.ContinueString_Pointer(pUserData);
8134  json.EndString();
8135  }
8136  }
8137 
8138  json.EndObject();
8139  }
8140 
8141  json.EndArray();
8142  }
8143  }
8144  if(dedicatedAllocationsStarted)
8145  {
8146  json.EndObject();
8147  }
8148 
8149  {
8150  bool allocationsStarted = false;
8151  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8152  {
8153  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8154  {
8155  if(allocationsStarted == false)
8156  {
8157  allocationsStarted = true;
8158  json.WriteString("DefaultPools");
8159  json.BeginObject();
8160  }
8161 
8162  json.BeginString("Type ");
8163  json.ContinueString(memTypeIndex);
8164  json.EndString();
8165 
8166  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8167  }
8168  }
8169  if(allocationsStarted)
8170  {
8171  json.EndObject();
8172  }
8173  }
8174 
8175  {
8176  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8177  const size_t poolCount = m_Pools.size();
8178  if(poolCount > 0)
8179  {
8180  json.WriteString("Pools");
8181  json.BeginArray();
8182  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8183  {
8184  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8185  }
8186  json.EndArray();
8187  }
8188  }
8189 }
8190 
8191 #endif // #if VMA_STATS_STRING_ENABLED
8192 
8193 static VkResult AllocateMemoryForImage(
8194  VmaAllocator allocator,
8195  VkImage image,
8196  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8197  VmaSuballocationType suballocType,
8198  VmaAllocation* pAllocation)
8199 {
8200  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8201 
8202  VkMemoryRequirements vkMemReq = {};
8203  bool requiresDedicatedAllocation = false;
8204  bool prefersDedicatedAllocation = false;
8205  allocator->GetImageMemoryRequirements(image, vkMemReq,
8206  requiresDedicatedAllocation, prefersDedicatedAllocation);
8207 
8208  return allocator->AllocateMemory(
8209  vkMemReq,
8210  requiresDedicatedAllocation,
8211  prefersDedicatedAllocation,
8212  VK_NULL_HANDLE, // dedicatedBuffer
8213  image, // dedicatedImage
8214  *pAllocationCreateInfo,
8215  suballocType,
8216  pAllocation);
8217 }
8218 
8220 // Public interface
8221 
8222 VkResult vmaCreateAllocator(
8223  const VmaAllocatorCreateInfo* pCreateInfo,
8224  VmaAllocator* pAllocator)
8225 {
8226  VMA_ASSERT(pCreateInfo && pAllocator);
8227  VMA_DEBUG_LOG("vmaCreateAllocator");
8228  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8229  return VK_SUCCESS;
8230 }
8231 
8232 void vmaDestroyAllocator(
8233  VmaAllocator allocator)
8234 {
8235  if(allocator != VK_NULL_HANDLE)
8236  {
8237  VMA_DEBUG_LOG("vmaDestroyAllocator");
8238  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8239  vma_delete(&allocationCallbacks, allocator);
8240  }
8241 }
8242 
8244  VmaAllocator allocator,
8245  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8246 {
8247  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8248  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8249 }
8250 
8252  VmaAllocator allocator,
8253  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8254 {
8255  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8256  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8257 }
8258 
8260  VmaAllocator allocator,
8261  uint32_t memoryTypeIndex,
8262  VkMemoryPropertyFlags* pFlags)
8263 {
8264  VMA_ASSERT(allocator && pFlags);
8265  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8266  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8267 }
8268 
8270  VmaAllocator allocator,
8271  uint32_t frameIndex)
8272 {
8273  VMA_ASSERT(allocator);
8274  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8275 
8276  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8277 
8278  allocator->SetCurrentFrameIndex(frameIndex);
8279 }
8280 
8281 void vmaCalculateStats(
8282  VmaAllocator allocator,
8283  VmaStats* pStats)
8284 {
8285  VMA_ASSERT(allocator && pStats);
8286  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8287  allocator->CalculateStats(pStats);
8288 }
8289 
8290 #if VMA_STATS_STRING_ENABLED
8291 
8292 void vmaBuildStatsString(
8293  VmaAllocator allocator,
8294  char** ppStatsString,
8295  VkBool32 detailedMap)
8296 {
8297  VMA_ASSERT(allocator && ppStatsString);
8298  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8299 
8300  VmaStringBuilder sb(allocator);
8301  {
8302  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8303  json.BeginObject();
8304 
8305  VmaStats stats;
8306  allocator->CalculateStats(&stats);
8307 
8308  json.WriteString("Total");
8309  VmaPrintStatInfo(json, stats.total);
8310 
8311  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8312  {
8313  json.BeginString("Heap ");
8314  json.ContinueString(heapIndex);
8315  json.EndString();
8316  json.BeginObject();
8317 
8318  json.WriteString("Size");
8319  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8320 
8321  json.WriteString("Flags");
8322  json.BeginArray(true);
8323  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8324  {
8325  json.WriteString("DEVICE_LOCAL");
8326  }
8327  json.EndArray();
8328 
8329  if(stats.memoryHeap[heapIndex].blockCount > 0)
8330  {
8331  json.WriteString("Stats");
8332  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8333  }
8334 
8335  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8336  {
8337  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8338  {
8339  json.BeginString("Type ");
8340  json.ContinueString(typeIndex);
8341  json.EndString();
8342 
8343  json.BeginObject();
8344 
8345  json.WriteString("Flags");
8346  json.BeginArray(true);
8347  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8348  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8349  {
8350  json.WriteString("DEVICE_LOCAL");
8351  }
8352  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8353  {
8354  json.WriteString("HOST_VISIBLE");
8355  }
8356  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8357  {
8358  json.WriteString("HOST_COHERENT");
8359  }
8360  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8361  {
8362  json.WriteString("HOST_CACHED");
8363  }
8364  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8365  {
8366  json.WriteString("LAZILY_ALLOCATED");
8367  }
8368  json.EndArray();
8369 
8370  if(stats.memoryType[typeIndex].blockCount > 0)
8371  {
8372  json.WriteString("Stats");
8373  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8374  }
8375 
8376  json.EndObject();
8377  }
8378  }
8379 
8380  json.EndObject();
8381  }
8382  if(detailedMap == VK_TRUE)
8383  {
8384  allocator->PrintDetailedMap(json);
8385  }
8386 
8387  json.EndObject();
8388  }
8389 
8390  const size_t len = sb.GetLength();
8391  char* const pChars = vma_new_array(allocator, char, len + 1);
8392  if(len > 0)
8393  {
8394  memcpy(pChars, sb.GetData(), len);
8395  }
8396  pChars[len] = '\0';
8397  *ppStatsString = pChars;
8398 }
8399 
8400 void vmaFreeStatsString(
8401  VmaAllocator allocator,
8402  char* pStatsString)
8403 {
8404  if(pStatsString != VMA_NULL)
8405  {
8406  VMA_ASSERT(allocator);
8407  size_t len = strlen(pStatsString);
8408  vma_delete_array(allocator, pStatsString, len + 1);
8409  }
8410 }
8411 
8412 #endif // #if VMA_STATS_STRING_ENABLED
8413 
8414 /*
8415 This function is not protected by any mutex because it just reads immutable data.
8416 */
8417 VkResult vmaFindMemoryTypeIndex(
8418  VmaAllocator allocator,
8419  uint32_t memoryTypeBits,
8420  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8421  uint32_t* pMemoryTypeIndex)
8422 {
8423  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8424  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8425  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8426 
8427  if(pAllocationCreateInfo->memoryTypeBits != 0)
8428  {
8429  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8430  }
8431 
8432  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8433  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8434 
8435  // Convert usage to requiredFlags and preferredFlags.
8436  switch(pAllocationCreateInfo->usage)
8437  {
8439  break;
8441  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8442  break;
8444  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8445  break;
8447  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8448  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8449  break;
8451  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8452  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8453  break;
8454  default:
8455  break;
8456  }
8457 
8458  *pMemoryTypeIndex = UINT32_MAX;
8459  uint32_t minCost = UINT32_MAX;
8460  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8461  memTypeIndex < allocator->GetMemoryTypeCount();
8462  ++memTypeIndex, memTypeBit <<= 1)
8463  {
8464  // This memory type is acceptable according to memoryTypeBits bitmask.
8465  if((memTypeBit & memoryTypeBits) != 0)
8466  {
8467  const VkMemoryPropertyFlags currFlags =
8468  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8469  // This memory type contains requiredFlags.
8470  if((requiredFlags & ~currFlags) == 0)
8471  {
8472  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8473  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8474  // Remember memory type with lowest cost.
8475  if(currCost < minCost)
8476  {
8477  *pMemoryTypeIndex = memTypeIndex;
8478  if(currCost == 0)
8479  {
8480  return VK_SUCCESS;
8481  }
8482  minCost = currCost;
8483  }
8484  }
8485  }
8486  }
8487  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8488 }
8489 
8491  VmaAllocator allocator,
8492  const VkBufferCreateInfo* pBufferCreateInfo,
8493  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8494  uint32_t* pMemoryTypeIndex)
8495 {
8496  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8497  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8498  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8499  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8500 
8501  const VkDevice hDev = allocator->m_hDevice;
8502  VkBuffer hBuffer = VK_NULL_HANDLE;
8503  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8504  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8505  if(res == VK_SUCCESS)
8506  {
8507  VkMemoryRequirements memReq = {};
8508  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8509  hDev, hBuffer, &memReq);
8510 
8511  res = vmaFindMemoryTypeIndex(
8512  allocator,
8513  memReq.memoryTypeBits,
8514  pAllocationCreateInfo,
8515  pMemoryTypeIndex);
8516 
8517  allocator->GetVulkanFunctions().vkDestroyBuffer(
8518  hDev, hBuffer, allocator->GetAllocationCallbacks());
8519  }
8520  return res;
8521 }
8522 
8524  VmaAllocator allocator,
8525  const VkImageCreateInfo* pImageCreateInfo,
8526  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8527  uint32_t* pMemoryTypeIndex)
8528 {
8529  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8530  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8531  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8532  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8533 
8534  const VkDevice hDev = allocator->m_hDevice;
8535  VkImage hImage = VK_NULL_HANDLE;
8536  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8537  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8538  if(res == VK_SUCCESS)
8539  {
8540  VkMemoryRequirements memReq = {};
8541  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8542  hDev, hImage, &memReq);
8543 
8544  res = vmaFindMemoryTypeIndex(
8545  allocator,
8546  memReq.memoryTypeBits,
8547  pAllocationCreateInfo,
8548  pMemoryTypeIndex);
8549 
8550  allocator->GetVulkanFunctions().vkDestroyImage(
8551  hDev, hImage, allocator->GetAllocationCallbacks());
8552  }
8553  return res;
8554 }
8555 
8556 VkResult vmaCreatePool(
8557  VmaAllocator allocator,
8558  const VmaPoolCreateInfo* pCreateInfo,
8559  VmaPool* pPool)
8560 {
8561  VMA_ASSERT(allocator && pCreateInfo && pPool);
8562 
8563  VMA_DEBUG_LOG("vmaCreatePool");
8564 
8565  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8566 
8567  return allocator->CreatePool(pCreateInfo, pPool);
8568 }
8569 
8570 void vmaDestroyPool(
8571  VmaAllocator allocator,
8572  VmaPool pool)
8573 {
8574  VMA_ASSERT(allocator);
8575 
8576  if(pool == VK_NULL_HANDLE)
8577  {
8578  return;
8579  }
8580 
8581  VMA_DEBUG_LOG("vmaDestroyPool");
8582 
8583  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8584 
8585  allocator->DestroyPool(pool);
8586 }
8587 
8588 void vmaGetPoolStats(
8589  VmaAllocator allocator,
8590  VmaPool pool,
8591  VmaPoolStats* pPoolStats)
8592 {
8593  VMA_ASSERT(allocator && pool && pPoolStats);
8594 
8595  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8596 
8597  allocator->GetPoolStats(pool, pPoolStats);
8598 }
8599 
8601  VmaAllocator allocator,
8602  VmaPool pool,
8603  size_t* pLostAllocationCount)
8604 {
8605  VMA_ASSERT(allocator && pool);
8606 
8607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8608 
8609  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8610 }
8611 
8612 VkResult vmaAllocateMemory(
8613  VmaAllocator allocator,
8614  const VkMemoryRequirements* pVkMemoryRequirements,
8615  const VmaAllocationCreateInfo* pCreateInfo,
8616  VmaAllocation* pAllocation,
8617  VmaAllocationInfo* pAllocationInfo)
8618 {
8619  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8620 
8621  VMA_DEBUG_LOG("vmaAllocateMemory");
8622 
8623  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8624 
8625  VkResult result = allocator->AllocateMemory(
8626  *pVkMemoryRequirements,
8627  false, // requiresDedicatedAllocation
8628  false, // prefersDedicatedAllocation
8629  VK_NULL_HANDLE, // dedicatedBuffer
8630  VK_NULL_HANDLE, // dedicatedImage
8631  *pCreateInfo,
8632  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8633  pAllocation);
8634 
8635  if(pAllocationInfo && result == VK_SUCCESS)
8636  {
8637  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8638  }
8639 
8640  return result;
8641 }
8642 
8644  VmaAllocator allocator,
8645  VkBuffer buffer,
8646  const VmaAllocationCreateInfo* pCreateInfo,
8647  VmaAllocation* pAllocation,
8648  VmaAllocationInfo* pAllocationInfo)
8649 {
8650  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8651 
8652  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8653 
8654  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8655 
8656  VkMemoryRequirements vkMemReq = {};
8657  bool requiresDedicatedAllocation = false;
8658  bool prefersDedicatedAllocation = false;
8659  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8660  requiresDedicatedAllocation,
8661  prefersDedicatedAllocation);
8662 
8663  VkResult result = allocator->AllocateMemory(
8664  vkMemReq,
8665  requiresDedicatedAllocation,
8666  prefersDedicatedAllocation,
8667  buffer, // dedicatedBuffer
8668  VK_NULL_HANDLE, // dedicatedImage
8669  *pCreateInfo,
8670  VMA_SUBALLOCATION_TYPE_BUFFER,
8671  pAllocation);
8672 
8673  if(pAllocationInfo && result == VK_SUCCESS)
8674  {
8675  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8676  }
8677 
8678  return result;
8679 }
8680 
8681 VkResult vmaAllocateMemoryForImage(
8682  VmaAllocator allocator,
8683  VkImage image,
8684  const VmaAllocationCreateInfo* pCreateInfo,
8685  VmaAllocation* pAllocation,
8686  VmaAllocationInfo* pAllocationInfo)
8687 {
8688  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8689 
8690  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8691 
8692  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8693 
8694  VkResult result = AllocateMemoryForImage(
8695  allocator,
8696  image,
8697  pCreateInfo,
8698  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8699  pAllocation);
8700 
8701  if(pAllocationInfo && result == VK_SUCCESS)
8702  {
8703  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8704  }
8705 
8706  return result;
8707 }
8708 
8709 void vmaFreeMemory(
8710  VmaAllocator allocator,
8711  VmaAllocation allocation)
8712 {
8713  VMA_ASSERT(allocator && allocation);
8714 
8715  VMA_DEBUG_LOG("vmaFreeMemory");
8716 
8717  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8718 
8719  allocator->FreeMemory(allocation);
8720 }
8721 
8723  VmaAllocator allocator,
8724  VmaAllocation allocation,
8725  VmaAllocationInfo* pAllocationInfo)
8726 {
8727  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8728 
8729  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8730 
8731  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8732 }
8733 
8734 VkBool32 vmaTouchAllocation(
8735  VmaAllocator allocator,
8736  VmaAllocation allocation)
8737 {
8738  VMA_ASSERT(allocator && allocation);
8739 
8740  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8741 
8742  return allocator->TouchAllocation(allocation);
8743 }
8744 
8746  VmaAllocator allocator,
8747  VmaAllocation allocation,
8748  void* pUserData)
8749 {
8750  VMA_ASSERT(allocator && allocation);
8751 
8752  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8753 
8754  allocation->SetUserData(allocator, pUserData);
8755 }
8756 
8758  VmaAllocator allocator,
8759  VmaAllocation* pAllocation)
8760 {
8761  VMA_ASSERT(allocator && pAllocation);
8762 
8763  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8764 
8765  allocator->CreateLostAllocation(pAllocation);
8766 }
8767 
8768 VkResult vmaMapMemory(
8769  VmaAllocator allocator,
8770  VmaAllocation allocation,
8771  void** ppData)
8772 {
8773  VMA_ASSERT(allocator && allocation && ppData);
8774 
8775  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8776 
8777  return allocator->Map(allocation, ppData);
8778 }
8779 
8780 void vmaUnmapMemory(
8781  VmaAllocator allocator,
8782  VmaAllocation allocation)
8783 {
8784  VMA_ASSERT(allocator && allocation);
8785 
8786  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8787 
8788  allocator->Unmap(allocation);
8789 }
8790 
8791 VkResult vmaDefragment(
8792  VmaAllocator allocator,
8793  VmaAllocation* pAllocations,
8794  size_t allocationCount,
8795  VkBool32* pAllocationsChanged,
8796  const VmaDefragmentationInfo *pDefragmentationInfo,
8797  VmaDefragmentationStats* pDefragmentationStats)
8798 {
8799  VMA_ASSERT(allocator && pAllocations);
8800 
8801  VMA_DEBUG_LOG("vmaDefragment");
8802 
8803  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8804 
8805  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8806 }
8807 
8808 VkResult vmaCreateBuffer(
8809  VmaAllocator allocator,
8810  const VkBufferCreateInfo* pBufferCreateInfo,
8811  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8812  VkBuffer* pBuffer,
8813  VmaAllocation* pAllocation,
8814  VmaAllocationInfo* pAllocationInfo)
8815 {
8816  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8817 
8818  VMA_DEBUG_LOG("vmaCreateBuffer");
8819 
8820  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8821 
8822  *pBuffer = VK_NULL_HANDLE;
8823  *pAllocation = VK_NULL_HANDLE;
8824 
8825  // 1. Create VkBuffer.
8826  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8827  allocator->m_hDevice,
8828  pBufferCreateInfo,
8829  allocator->GetAllocationCallbacks(),
8830  pBuffer);
8831  if(res >= 0)
8832  {
8833  // 2. vkGetBufferMemoryRequirements.
8834  VkMemoryRequirements vkMemReq = {};
8835  bool requiresDedicatedAllocation = false;
8836  bool prefersDedicatedAllocation = false;
8837  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8838  requiresDedicatedAllocation, prefersDedicatedAllocation);
8839 
8840  // Make sure alignment requirements for specific buffer usages reported
8841  // in Physical Device Properties are included in alignment reported by memory requirements.
8842  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8843  {
8844  VMA_ASSERT(vkMemReq.alignment %
8845  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8846  }
8847  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8848  {
8849  VMA_ASSERT(vkMemReq.alignment %
8850  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8851  }
8852  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8853  {
8854  VMA_ASSERT(vkMemReq.alignment %
8855  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8856  }
8857 
8858  // 3. Allocate memory using allocator.
8859  res = allocator->AllocateMemory(
8860  vkMemReq,
8861  requiresDedicatedAllocation,
8862  prefersDedicatedAllocation,
8863  *pBuffer, // dedicatedBuffer
8864  VK_NULL_HANDLE, // dedicatedImage
8865  *pAllocationCreateInfo,
8866  VMA_SUBALLOCATION_TYPE_BUFFER,
8867  pAllocation);
8868  if(res >= 0)
8869  {
8870  // 3. Bind buffer with memory.
8871  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8872  allocator->m_hDevice,
8873  *pBuffer,
8874  (*pAllocation)->GetMemory(),
8875  (*pAllocation)->GetOffset());
8876  if(res >= 0)
8877  {
8878  // All steps succeeded.
8879  if(pAllocationInfo != VMA_NULL)
8880  {
8881  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8882  }
8883  return VK_SUCCESS;
8884  }
8885  allocator->FreeMemory(*pAllocation);
8886  *pAllocation = VK_NULL_HANDLE;
8887  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8888  *pBuffer = VK_NULL_HANDLE;
8889  return res;
8890  }
8891  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8892  *pBuffer = VK_NULL_HANDLE;
8893  return res;
8894  }
8895  return res;
8896 }
8897 
8898 void vmaDestroyBuffer(
8899  VmaAllocator allocator,
8900  VkBuffer buffer,
8901  VmaAllocation allocation)
8902 {
8903  if(buffer != VK_NULL_HANDLE)
8904  {
8905  VMA_ASSERT(allocator);
8906 
8907  VMA_DEBUG_LOG("vmaDestroyBuffer");
8908 
8909  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8910 
8911  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8912 
8913  allocator->FreeMemory(allocation);
8914  }
8915 }
8916 
8917 VkResult vmaCreateImage(
8918  VmaAllocator allocator,
8919  const VkImageCreateInfo* pImageCreateInfo,
8920  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8921  VkImage* pImage,
8922  VmaAllocation* pAllocation,
8923  VmaAllocationInfo* pAllocationInfo)
8924 {
8925  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8926 
8927  VMA_DEBUG_LOG("vmaCreateImage");
8928 
8929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8930 
8931  *pImage = VK_NULL_HANDLE;
8932  *pAllocation = VK_NULL_HANDLE;
8933 
8934  // 1. Create VkImage.
8935  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8936  allocator->m_hDevice,
8937  pImageCreateInfo,
8938  allocator->GetAllocationCallbacks(),
8939  pImage);
8940  if(res >= 0)
8941  {
8942  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8943  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8944  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8945 
8946  // 2. Allocate memory using allocator.
8947  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8948  if(res >= 0)
8949  {
8950  // 3. Bind image with memory.
8951  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8952  allocator->m_hDevice,
8953  *pImage,
8954  (*pAllocation)->GetMemory(),
8955  (*pAllocation)->GetOffset());
8956  if(res >= 0)
8957  {
8958  // All steps succeeded.
8959  if(pAllocationInfo != VMA_NULL)
8960  {
8961  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8962  }
8963  return VK_SUCCESS;
8964  }
8965  allocator->FreeMemory(*pAllocation);
8966  *pAllocation = VK_NULL_HANDLE;
8967  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8968  *pImage = VK_NULL_HANDLE;
8969  return res;
8970  }
8971  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8972  *pImage = VK_NULL_HANDLE;
8973  return res;
8974  }
8975  return res;
8976 }
8977 
8978 void vmaDestroyImage(
8979  VmaAllocator allocator,
8980  VkImage image,
8981  VmaAllocation allocation)
8982 {
8983  if(image != VK_NULL_HANDLE)
8984  {
8985  VMA_ASSERT(allocator);
8986 
8987  VMA_DEBUG_LOG("vmaDestroyImage");
8988 
8989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8990 
8991  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8992 
8993  allocator->FreeMemory(allocation);
8994  }
8995 }
8996 
8997 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:938
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1192
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:963
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:948
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1149
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:942
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1498
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:960
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1697
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1368
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1422
Definition: vk_mem_alloc.h:1229
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:931
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1267
Definition: vk_mem_alloc.h:1176
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:972
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1025
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:957
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1180
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1090
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:945
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1089
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:953
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1701
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:989
VmaStatInfo total
Definition: vk_mem_alloc.h:1099
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1709
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1251
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1692
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:946
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:873
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:966
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1376
Definition: vk_mem_alloc.h:1370
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1508
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:943
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1288
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1392
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1428
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:929
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1379
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1127
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1687
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1705
Definition: vk_mem_alloc.h:1166
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1275
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:944
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1095
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:879
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:900
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:905
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1707
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1262
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1438
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:939
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1078
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1387
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:892
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1236
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1091
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:896
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1382
Definition: vk_mem_alloc.h:1175
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1257
Definition: vk_mem_alloc.h:1248
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1081
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:941
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1400
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:975
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1431
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1246
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1281
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1013
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1097
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1216
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1090
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:950
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:894
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:949
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1414
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1522
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:969
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1090
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1087
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1419
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1503
Definition: vk_mem_alloc.h:1244
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1703
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:937
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:952
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1085
Definition: vk_mem_alloc.h:1132
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1372
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1083
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:947
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:951
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1203
Definition: vk_mem_alloc.h:1159
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1517
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:927
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:940
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1484
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1350
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1091
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1098
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1425
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1091
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1489