Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
826 #include <vulkan/vulkan.h>
827 
828 VK_DEFINE_HANDLE(VmaAllocator)
829 
830 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
832  VmaAllocator allocator,
833  uint32_t memoryType,
834  VkDeviceMemory memory,
835  VkDeviceSize size);
837 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
838  VmaAllocator allocator,
839  uint32_t memoryType,
840  VkDeviceMemory memory,
841  VkDeviceSize size);
842 
850 typedef struct VmaDeviceMemoryCallbacks {
856 
886 
889 typedef VkFlags VmaAllocatorCreateFlags;
890 
895 typedef struct VmaVulkanFunctions {
896  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
897  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
898  PFN_vkAllocateMemory vkAllocateMemory;
899  PFN_vkFreeMemory vkFreeMemory;
900  PFN_vkMapMemory vkMapMemory;
901  PFN_vkUnmapMemory vkUnmapMemory;
902  PFN_vkBindBufferMemory vkBindBufferMemory;
903  PFN_vkBindImageMemory vkBindImageMemory;
904  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
905  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
906  PFN_vkCreateBuffer vkCreateBuffer;
907  PFN_vkDestroyBuffer vkDestroyBuffer;
908  PFN_vkCreateImage vkCreateImage;
909  PFN_vkDestroyImage vkDestroyImage;
910  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
911  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
913 
916 {
918  VmaAllocatorCreateFlags flags;
920 
921  VkPhysicalDevice physicalDevice;
923 
924  VkDevice device;
926 
929 
930  const VkAllocationCallbacks* pAllocationCallbacks;
932 
947  uint32_t frameInUseCount;
971  const VkDeviceSize* pHeapSizeLimit;
985 
987 VkResult vmaCreateAllocator(
988  const VmaAllocatorCreateInfo* pCreateInfo,
989  VmaAllocator* pAllocator);
990 
993  VmaAllocator allocator);
994 
1000  VmaAllocator allocator,
1001  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1002 
1008  VmaAllocator allocator,
1009  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1010 
1018  VmaAllocator allocator,
1019  uint32_t memoryTypeIndex,
1020  VkMemoryPropertyFlags* pFlags);
1021 
1031  VmaAllocator allocator,
1032  uint32_t frameIndex);
1033 
1036 typedef struct VmaStatInfo
1037 {
1039  uint32_t blockCount;
1045  VkDeviceSize usedBytes;
1047  VkDeviceSize unusedBytes;
1048  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1049  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1050 } VmaStatInfo;
1051 
1053 typedef struct VmaStats
1054 {
1055  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1056  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1058 } VmaStats;
1059 
1061 void vmaCalculateStats(
1062  VmaAllocator allocator,
1063  VmaStats* pStats);
1064 
1065 #define VMA_STATS_STRING_ENABLED 1
1066 
1067 #if VMA_STATS_STRING_ENABLED
1068 
1070 
1072 void vmaBuildStatsString(
1073  VmaAllocator allocator,
1074  char** ppStatsString,
1075  VkBool32 detailedMap);
1076 
1077 void vmaFreeStatsString(
1078  VmaAllocator allocator,
1079  char* pStatsString);
1080 
1081 #endif // #if VMA_STATS_STRING_ENABLED
1082 
1083 VK_DEFINE_HANDLE(VmaPool)
1084 
1085 typedef enum VmaMemoryUsage
1086 {
1135 } VmaMemoryUsage;
1136 
1151 
1201 
1205 
1207 {
1209  VmaAllocationCreateFlags flags;
1220  VkMemoryPropertyFlags requiredFlags;
1225  VkMemoryPropertyFlags preferredFlags;
1233  uint32_t memoryTypeBits;
1239  VmaPool pool;
1246  void* pUserData;
1248 
1263 VkResult vmaFindMemoryTypeIndex(
1264  VmaAllocator allocator,
1265  uint32_t memoryTypeBits,
1266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1267  uint32_t* pMemoryTypeIndex);
1268 
1289 
1292 typedef VkFlags VmaPoolCreateFlags;
1293 
1296 typedef struct VmaPoolCreateInfo {
1302  VmaPoolCreateFlags flags;
1307  VkDeviceSize blockSize;
1336 
1339 typedef struct VmaPoolStats {
1342  VkDeviceSize size;
1345  VkDeviceSize unusedSize;
1358  VkDeviceSize unusedRangeSizeMax;
1359 } VmaPoolStats;
1360 
1367 VkResult vmaCreatePool(
1368  VmaAllocator allocator,
1369  const VmaPoolCreateInfo* pCreateInfo,
1370  VmaPool* pPool);
1371 
1374 void vmaDestroyPool(
1375  VmaAllocator allocator,
1376  VmaPool pool);
1377 
1384 void vmaGetPoolStats(
1385  VmaAllocator allocator,
1386  VmaPool pool,
1387  VmaPoolStats* pPoolStats);
1388 
1396  VmaAllocator allocator,
1397  VmaPool pool,
1398  size_t* pLostAllocationCount);
1399 
1400 VK_DEFINE_HANDLE(VmaAllocation)
1401 
1402 
1404 typedef struct VmaAllocationInfo {
1409  uint32_t memoryType;
1418  VkDeviceMemory deviceMemory;
1423  VkDeviceSize offset;
1428  VkDeviceSize size;
1442  void* pUserData;
1444 
1455 VkResult vmaAllocateMemory(
1456  VmaAllocator allocator,
1457  const VkMemoryRequirements* pVkMemoryRequirements,
1458  const VmaAllocationCreateInfo* pCreateInfo,
1459  VmaAllocation* pAllocation,
1460  VmaAllocationInfo* pAllocationInfo);
1461 
1469  VmaAllocator allocator,
1470  VkBuffer buffer,
1471  const VmaAllocationCreateInfo* pCreateInfo,
1472  VmaAllocation* pAllocation,
1473  VmaAllocationInfo* pAllocationInfo);
1474 
1476 VkResult vmaAllocateMemoryForImage(
1477  VmaAllocator allocator,
1478  VkImage image,
1479  const VmaAllocationCreateInfo* pCreateInfo,
1480  VmaAllocation* pAllocation,
1481  VmaAllocationInfo* pAllocationInfo);
1482 
1484 void vmaFreeMemory(
1485  VmaAllocator allocator,
1486  VmaAllocation allocation);
1487 
1490  VmaAllocator allocator,
1491  VmaAllocation allocation,
1492  VmaAllocationInfo* pAllocationInfo);
1493 
1508  VmaAllocator allocator,
1509  VmaAllocation allocation,
1510  void* pUserData);
1511 
1523  VmaAllocator allocator,
1524  VmaAllocation* pAllocation);
1525 
1560 VkResult vmaMapMemory(
1561  VmaAllocator allocator,
1562  VmaAllocation allocation,
1563  void** ppData);
1564 
1569 void vmaUnmapMemory(
1570  VmaAllocator allocator,
1571  VmaAllocation allocation);
1572 
1574 typedef struct VmaDefragmentationInfo {
1579  VkDeviceSize maxBytesToMove;
1586 
1588 typedef struct VmaDefragmentationStats {
1590  VkDeviceSize bytesMoved;
1592  VkDeviceSize bytesFreed;
1598 
1681 VkResult vmaDefragment(
1682  VmaAllocator allocator,
1683  VmaAllocation* pAllocations,
1684  size_t allocationCount,
1685  VkBool32* pAllocationsChanged,
1686  const VmaDefragmentationInfo *pDefragmentationInfo,
1687  VmaDefragmentationStats* pDefragmentationStats);
1688 
1715 VkResult vmaCreateBuffer(
1716  VmaAllocator allocator,
1717  const VkBufferCreateInfo* pBufferCreateInfo,
1718  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1719  VkBuffer* pBuffer,
1720  VmaAllocation* pAllocation,
1721  VmaAllocationInfo* pAllocationInfo);
1722 
1734 void vmaDestroyBuffer(
1735  VmaAllocator allocator,
1736  VkBuffer buffer,
1737  VmaAllocation allocation);
1738 
1740 VkResult vmaCreateImage(
1741  VmaAllocator allocator,
1742  const VkImageCreateInfo* pImageCreateInfo,
1743  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1744  VkImage* pImage,
1745  VmaAllocation* pAllocation,
1746  VmaAllocationInfo* pAllocationInfo);
1747 
1759 void vmaDestroyImage(
1760  VmaAllocator allocator,
1761  VkImage image,
1762  VmaAllocation allocation);
1763 
1764 #ifdef __cplusplus
1765 }
1766 #endif
1767 
1768 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1769 
1770 // For Visual Studio IntelliSense.
1771 #ifdef __INTELLISENSE__
1772 #define VMA_IMPLEMENTATION
1773 #endif
1774 
1775 #ifdef VMA_IMPLEMENTATION
1776 #undef VMA_IMPLEMENTATION
1777 
1778 #include <cstdint>
1779 #include <cstdlib>
1780 #include <cstring>
1781 
1782 /*******************************************************************************
1783 CONFIGURATION SECTION
1784 
1785 Define some of these macros before each #include of this header or change them
1786 here if you need other then default behavior depending on your environment.
1787 */
1788 
1789 /*
1790 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1791 internally, like:
1792 
1793  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1794 
1795 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1796 VmaAllocatorCreateInfo::pVulkanFunctions.
1797 */
1798 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1799 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1800 #endif
1801 
1802 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1803 //#define VMA_USE_STL_CONTAINERS 1
1804 
1805 /* Set this macro to 1 to make the library including and using STL containers:
1806 std::pair, std::vector, std::list, std::unordered_map.
1807 
1808 Set it to 0 or undefined to make the library using its own implementation of
1809 the containers.
1810 */
1811 #if VMA_USE_STL_CONTAINERS
1812  #define VMA_USE_STL_VECTOR 1
1813  #define VMA_USE_STL_UNORDERED_MAP 1
1814  #define VMA_USE_STL_LIST 1
1815 #endif
1816 
1817 #if VMA_USE_STL_VECTOR
1818  #include <vector>
1819 #endif
1820 
1821 #if VMA_USE_STL_UNORDERED_MAP
1822  #include <unordered_map>
1823 #endif
1824 
1825 #if VMA_USE_STL_LIST
1826  #include <list>
1827 #endif
1828 
1829 /*
1830 Following headers are used in this CONFIGURATION section only, so feel free to
1831 remove them if not needed.
1832 */
1833 #include <cassert> // for assert
1834 #include <algorithm> // for min, max
1835 #include <mutex> // for std::mutex
1836 #include <atomic> // for std::atomic
1837 
1838 #if !defined(_WIN32) && !defined(__APPLE__)
1839  #include <malloc.h> // for aligned_alloc()
1840 #endif
1841 
1842 #if defined(__APPLE__)
1843 #include <cstdlib>
1844 void *aligned_alloc(size_t alignment, size_t size)
1845 {
1846  // alignment must be >= sizeof(void*)
1847  if(alignment < sizeof(void*))
1848  {
1849  alignment = sizeof(void*);
1850  }
1851 
1852  void *pointer;
1853  if(posix_memalign(&pointer, alignment, size) == 0)
1854  return pointer;
1855  return VMA_NULL;
1856 }
1857 #endif
1858 
1859 // Normal assert to check for programmer's errors, especially in Debug configuration.
1860 #ifndef VMA_ASSERT
1861  #ifdef _DEBUG
1862  #define VMA_ASSERT(expr) assert(expr)
1863  #else
1864  #define VMA_ASSERT(expr)
1865  #endif
1866 #endif
1867 
1868 // Assert that will be called very often, like inside data structures e.g. operator[].
1869 // Making it non-empty can make program slow.
1870 #ifndef VMA_HEAVY_ASSERT
1871  #ifdef _DEBUG
1872  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1873  #else
1874  #define VMA_HEAVY_ASSERT(expr)
1875  #endif
1876 #endif
1877 
1878 #ifndef VMA_NULL
1879  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1880  #define VMA_NULL nullptr
1881 #endif
1882 
1883 #ifndef VMA_ALIGN_OF
1884  #define VMA_ALIGN_OF(type) (__alignof(type))
1885 #endif
1886 
1887 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1888  #if defined(_WIN32)
1889  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1890  #else
1891  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1892  #endif
1893 #endif
1894 
1895 #ifndef VMA_SYSTEM_FREE
1896  #if defined(_WIN32)
1897  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1898  #else
1899  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1900  #endif
1901 #endif
1902 
1903 #ifndef VMA_MIN
1904  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1905 #endif
1906 
1907 #ifndef VMA_MAX
1908  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1909 #endif
1910 
1911 #ifndef VMA_SWAP
1912  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1913 #endif
1914 
1915 #ifndef VMA_SORT
1916  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1917 #endif
1918 
1919 #ifndef VMA_DEBUG_LOG
1920  #define VMA_DEBUG_LOG(format, ...)
1921  /*
1922  #define VMA_DEBUG_LOG(format, ...) do { \
1923  printf(format, __VA_ARGS__); \
1924  printf("\n"); \
1925  } while(false)
1926  */
1927 #endif
1928 
1929 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1930 #if VMA_STATS_STRING_ENABLED
1931  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1932  {
1933  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1934  }
1935  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1936  {
1937  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1938  }
1939  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1940  {
1941  snprintf(outStr, strLen, "%p", ptr);
1942  }
1943 #endif
1944 
1945 #ifndef VMA_MUTEX
1946  class VmaMutex
1947  {
1948  public:
1949  VmaMutex() { }
1950  ~VmaMutex() { }
1951  void Lock() { m_Mutex.lock(); }
1952  void Unlock() { m_Mutex.unlock(); }
1953  private:
1954  std::mutex m_Mutex;
1955  };
1956  #define VMA_MUTEX VmaMutex
1957 #endif
1958 
1959 /*
1960 If providing your own implementation, you need to implement a subset of std::atomic:
1961 
1962 - Constructor(uint32_t desired)
1963 - uint32_t load() const
1964 - void store(uint32_t desired)
1965 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1966 */
1967 #ifndef VMA_ATOMIC_UINT32
1968  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1969 #endif
1970 
1971 #ifndef VMA_BEST_FIT
1972 
1984  #define VMA_BEST_FIT (1)
1985 #endif
1986 
1987 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1988 
1992  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1993 #endif
1994 
1995 #ifndef VMA_DEBUG_ALIGNMENT
1996 
2000  #define VMA_DEBUG_ALIGNMENT (1)
2001 #endif
2002 
2003 #ifndef VMA_DEBUG_MARGIN
2004 
2008  #define VMA_DEBUG_MARGIN (0)
2009 #endif
2010 
2011 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2012 
2016  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2017 #endif
2018 
2019 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2020 
2024  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2025 #endif
2026 
2027 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2028  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2030 #endif
2031 
2032 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2033  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2035 #endif
2036 
2037 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2038 
2039 /*******************************************************************************
2040 END OF CONFIGURATION
2041 */
2042 
2043 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2044  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2045 
2046 // Returns number of bits set to 1 in (v).
2047 static inline uint32_t VmaCountBitsSet(uint32_t v)
2048 {
2049  uint32_t c = v - ((v >> 1) & 0x55555555);
2050  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2051  c = ((c >> 4) + c) & 0x0F0F0F0F;
2052  c = ((c >> 8) + c) & 0x00FF00FF;
2053  c = ((c >> 16) + c) & 0x0000FFFF;
2054  return c;
2055 }
2056 
2057 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2058 // Use types like uint32_t, uint64_t as T.
2059 template <typename T>
2060 static inline T VmaAlignUp(T val, T align)
2061 {
2062  return (val + align - 1) / align * align;
2063 }
2064 
2065 // Division with mathematical rounding to nearest number.
2066 template <typename T>
2067 inline T VmaRoundDiv(T x, T y)
2068 {
2069  return (x + (y / (T)2)) / y;
2070 }
2071 
2072 #ifndef VMA_SORT
2073 
2074 template<typename Iterator, typename Compare>
2075 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2076 {
2077  Iterator centerValue = end; --centerValue;
2078  Iterator insertIndex = beg;
2079  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2080  {
2081  if(cmp(*memTypeIndex, *centerValue))
2082  {
2083  if(insertIndex != memTypeIndex)
2084  {
2085  VMA_SWAP(*memTypeIndex, *insertIndex);
2086  }
2087  ++insertIndex;
2088  }
2089  }
2090  if(insertIndex != centerValue)
2091  {
2092  VMA_SWAP(*insertIndex, *centerValue);
2093  }
2094  return insertIndex;
2095 }
2096 
2097 template<typename Iterator, typename Compare>
2098 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2099 {
2100  if(beg < end)
2101  {
2102  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2103  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2104  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2105  }
2106 }
2107 
2108 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2109 
2110 #endif // #ifndef VMA_SORT
2111 
2112 /*
2113 Returns true if two memory blocks occupy overlapping pages.
2114 ResourceA must be in less memory offset than ResourceB.
2115 
2116 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2117 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2118 */
2119 static inline bool VmaBlocksOnSamePage(
2120  VkDeviceSize resourceAOffset,
2121  VkDeviceSize resourceASize,
2122  VkDeviceSize resourceBOffset,
2123  VkDeviceSize pageSize)
2124 {
2125  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2126  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2127  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2128  VkDeviceSize resourceBStart = resourceBOffset;
2129  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2130  return resourceAEndPage == resourceBStartPage;
2131 }
2132 
2133 enum VmaSuballocationType
2134 {
2135  VMA_SUBALLOCATION_TYPE_FREE = 0,
2136  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2137  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2138  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2139  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2140  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2141  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2142 };
2143 
2144 /*
2145 Returns true if given suballocation types could conflict and must respect
2146 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2147 or linear image and another one is optimal image. If type is unknown, behave
2148 conservatively.
2149 */
2150 static inline bool VmaIsBufferImageGranularityConflict(
2151  VmaSuballocationType suballocType1,
2152  VmaSuballocationType suballocType2)
2153 {
2154  if(suballocType1 > suballocType2)
2155  {
2156  VMA_SWAP(suballocType1, suballocType2);
2157  }
2158 
2159  switch(suballocType1)
2160  {
2161  case VMA_SUBALLOCATION_TYPE_FREE:
2162  return false;
2163  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2164  return true;
2165  case VMA_SUBALLOCATION_TYPE_BUFFER:
2166  return
2167  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2168  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2169  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2170  return
2171  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2172  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2173  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2174  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2175  return
2176  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2177  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2178  return false;
2179  default:
2180  VMA_ASSERT(0);
2181  return true;
2182  }
2183 }
2184 
2185 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2186 struct VmaMutexLock
2187 {
2188 public:
2189  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2190  m_pMutex(useMutex ? &mutex : VMA_NULL)
2191  {
2192  if(m_pMutex)
2193  {
2194  m_pMutex->Lock();
2195  }
2196  }
2197 
2198  ~VmaMutexLock()
2199  {
2200  if(m_pMutex)
2201  {
2202  m_pMutex->Unlock();
2203  }
2204  }
2205 
2206 private:
2207  VMA_MUTEX* m_pMutex;
2208 };
2209 
2210 #if VMA_DEBUG_GLOBAL_MUTEX
2211  static VMA_MUTEX gDebugGlobalMutex;
2212  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2213 #else
2214  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2215 #endif
2216 
2217 // Minimum size of a free suballocation to register it in the free suballocation collection.
2218 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2219 
2220 /*
2221 Performs binary search and returns iterator to first element that is greater or
2222 equal to (key), according to comparison (cmp).
2223 
2224 Cmp should return true if first argument is less than second argument.
2225 
2226 Returned value is the found element, if present in the collection or place where
2227 new element with value (key) should be inserted.
2228 */
2229 template <typename IterT, typename KeyT, typename CmpT>
2230 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2231 {
2232  size_t down = 0, up = (end - beg);
2233  while(down < up)
2234  {
2235  const size_t mid = (down + up) / 2;
2236  if(cmp(*(beg+mid), key))
2237  {
2238  down = mid + 1;
2239  }
2240  else
2241  {
2242  up = mid;
2243  }
2244  }
2245  return beg + down;
2246 }
2247 
2249 // Memory allocation
2250 
2251 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2252 {
2253  if((pAllocationCallbacks != VMA_NULL) &&
2254  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2255  {
2256  return (*pAllocationCallbacks->pfnAllocation)(
2257  pAllocationCallbacks->pUserData,
2258  size,
2259  alignment,
2260  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2261  }
2262  else
2263  {
2264  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2265  }
2266 }
2267 
2268 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2269 {
2270  if((pAllocationCallbacks != VMA_NULL) &&
2271  (pAllocationCallbacks->pfnFree != VMA_NULL))
2272  {
2273  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2274  }
2275  else
2276  {
2277  VMA_SYSTEM_FREE(ptr);
2278  }
2279 }
2280 
2281 template<typename T>
2282 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2283 {
2284  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2285 }
2286 
2287 template<typename T>
2288 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2289 {
2290  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2291 }
2292 
2293 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2294 
2295 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2296 
2297 template<typename T>
2298 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2299 {
2300  ptr->~T();
2301  VmaFree(pAllocationCallbacks, ptr);
2302 }
2303 
2304 template<typename T>
2305 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2306 {
2307  if(ptr != VMA_NULL)
2308  {
2309  for(size_t i = count; i--; )
2310  {
2311  ptr[i].~T();
2312  }
2313  VmaFree(pAllocationCallbacks, ptr);
2314  }
2315 }
2316 
2317 // STL-compatible allocator.
2318 template<typename T>
2319 class VmaStlAllocator
2320 {
2321 public:
2322  const VkAllocationCallbacks* const m_pCallbacks;
2323  typedef T value_type;
2324 
2325  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2326  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2327 
2328  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2329  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2330 
2331  template<typename U>
2332  bool operator==(const VmaStlAllocator<U>& rhs) const
2333  {
2334  return m_pCallbacks == rhs.m_pCallbacks;
2335  }
2336  template<typename U>
2337  bool operator!=(const VmaStlAllocator<U>& rhs) const
2338  {
2339  return m_pCallbacks != rhs.m_pCallbacks;
2340  }
2341 
2342  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2343 };
2344 
2345 #if VMA_USE_STL_VECTOR
2346 
2347 #define VmaVector std::vector
2348 
2349 template<typename T, typename allocatorT>
2350 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2351 {
2352  vec.insert(vec.begin() + index, item);
2353 }
2354 
2355 template<typename T, typename allocatorT>
2356 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2357 {
2358  vec.erase(vec.begin() + index);
2359 }
2360 
2361 #else // #if VMA_USE_STL_VECTOR
2362 
2363 /* Class with interface compatible with subset of std::vector.
2364 T must be POD because constructors and destructors are not called and memcpy is
2365 used for these objects. */
2366 template<typename T, typename AllocatorT>
2367 class VmaVector
2368 {
2369 public:
2370  typedef T value_type;
2371 
2372  VmaVector(const AllocatorT& allocator) :
2373  m_Allocator(allocator),
2374  m_pArray(VMA_NULL),
2375  m_Count(0),
2376  m_Capacity(0)
2377  {
2378  }
2379 
2380  VmaVector(size_t count, const AllocatorT& allocator) :
2381  m_Allocator(allocator),
2382  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2383  m_Count(count),
2384  m_Capacity(count)
2385  {
2386  }
2387 
2388  VmaVector(const VmaVector<T, AllocatorT>& src) :
2389  m_Allocator(src.m_Allocator),
2390  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2391  m_Count(src.m_Count),
2392  m_Capacity(src.m_Count)
2393  {
2394  if(m_Count != 0)
2395  {
2396  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2397  }
2398  }
2399 
2400  ~VmaVector()
2401  {
2402  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2403  }
2404 
2405  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2406  {
2407  if(&rhs != this)
2408  {
2409  resize(rhs.m_Count);
2410  if(m_Count != 0)
2411  {
2412  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2413  }
2414  }
2415  return *this;
2416  }
2417 
2418  bool empty() const { return m_Count == 0; }
2419  size_t size() const { return m_Count; }
2420  T* data() { return m_pArray; }
2421  const T* data() const { return m_pArray; }
2422 
2423  T& operator[](size_t index)
2424  {
2425  VMA_HEAVY_ASSERT(index < m_Count);
2426  return m_pArray[index];
2427  }
2428  const T& operator[](size_t index) const
2429  {
2430  VMA_HEAVY_ASSERT(index < m_Count);
2431  return m_pArray[index];
2432  }
2433 
2434  T& front()
2435  {
2436  VMA_HEAVY_ASSERT(m_Count > 0);
2437  return m_pArray[0];
2438  }
2439  const T& front() const
2440  {
2441  VMA_HEAVY_ASSERT(m_Count > 0);
2442  return m_pArray[0];
2443  }
2444  T& back()
2445  {
2446  VMA_HEAVY_ASSERT(m_Count > 0);
2447  return m_pArray[m_Count - 1];
2448  }
2449  const T& back() const
2450  {
2451  VMA_HEAVY_ASSERT(m_Count > 0);
2452  return m_pArray[m_Count - 1];
2453  }
2454 
2455  void reserve(size_t newCapacity, bool freeMemory = false)
2456  {
2457  newCapacity = VMA_MAX(newCapacity, m_Count);
2458 
2459  if((newCapacity < m_Capacity) && !freeMemory)
2460  {
2461  newCapacity = m_Capacity;
2462  }
2463 
2464  if(newCapacity != m_Capacity)
2465  {
2466  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2467  if(m_Count != 0)
2468  {
2469  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2470  }
2471  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2472  m_Capacity = newCapacity;
2473  m_pArray = newArray;
2474  }
2475  }
2476 
2477  void resize(size_t newCount, bool freeMemory = false)
2478  {
2479  size_t newCapacity = m_Capacity;
2480  if(newCount > m_Capacity)
2481  {
2482  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2483  }
2484  else if(freeMemory)
2485  {
2486  newCapacity = newCount;
2487  }
2488 
2489  if(newCapacity != m_Capacity)
2490  {
2491  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2492  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2493  if(elementsToCopy != 0)
2494  {
2495  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2496  }
2497  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2498  m_Capacity = newCapacity;
2499  m_pArray = newArray;
2500  }
2501 
2502  m_Count = newCount;
2503  }
2504 
2505  void clear(bool freeMemory = false)
2506  {
2507  resize(0, freeMemory);
2508  }
2509 
2510  void insert(size_t index, const T& src)
2511  {
2512  VMA_HEAVY_ASSERT(index <= m_Count);
2513  const size_t oldCount = size();
2514  resize(oldCount + 1);
2515  if(index < oldCount)
2516  {
2517  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2518  }
2519  m_pArray[index] = src;
2520  }
2521 
2522  void remove(size_t index)
2523  {
2524  VMA_HEAVY_ASSERT(index < m_Count);
2525  const size_t oldCount = size();
2526  if(index < oldCount - 1)
2527  {
2528  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2529  }
2530  resize(oldCount - 1);
2531  }
2532 
2533  void push_back(const T& src)
2534  {
2535  const size_t newIndex = size();
2536  resize(newIndex + 1);
2537  m_pArray[newIndex] = src;
2538  }
2539 
2540  void pop_back()
2541  {
2542  VMA_HEAVY_ASSERT(m_Count > 0);
2543  resize(size() - 1);
2544  }
2545 
2546  void push_front(const T& src)
2547  {
2548  insert(0, src);
2549  }
2550 
2551  void pop_front()
2552  {
2553  VMA_HEAVY_ASSERT(m_Count > 0);
2554  remove(0);
2555  }
2556 
2557  typedef T* iterator;
2558 
2559  iterator begin() { return m_pArray; }
2560  iterator end() { return m_pArray + m_Count; }
2561 
2562 private:
2563  AllocatorT m_Allocator;
2564  T* m_pArray;
2565  size_t m_Count;
2566  size_t m_Capacity;
2567 };
2568 
2569 template<typename T, typename allocatorT>
2570 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2571 {
2572  vec.insert(index, item);
2573 }
2574 
2575 template<typename T, typename allocatorT>
2576 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2577 {
2578  vec.remove(index);
2579 }
2580 
2581 #endif // #if VMA_USE_STL_VECTOR
2582 
2583 template<typename CmpLess, typename VectorT>
2584 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2585 {
2586  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2587  vector.data(),
2588  vector.data() + vector.size(),
2589  value,
2590  CmpLess()) - vector.data();
2591  VmaVectorInsert(vector, indexToInsert, value);
2592  return indexToInsert;
2593 }
2594 
2595 template<typename CmpLess, typename VectorT>
2596 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2597 {
2598  CmpLess comparator;
2599  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2600  vector.begin(),
2601  vector.end(),
2602  value,
2603  comparator);
2604  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2605  {
2606  size_t indexToRemove = it - vector.begin();
2607  VmaVectorRemove(vector, indexToRemove);
2608  return true;
2609  }
2610  return false;
2611 }
2612 
2613 template<typename CmpLess, typename VectorT>
2614 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2615 {
2616  CmpLess comparator;
2617  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2618  vector.data(),
2619  vector.data() + vector.size(),
2620  value,
2621  comparator);
2622  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2623  {
2624  return it - vector.begin();
2625  }
2626  else
2627  {
2628  return vector.size();
2629  }
2630 }
2631 
2633 // class VmaPoolAllocator
2634 
2635 /*
2636 Allocator for objects of type T using a list of arrays (pools) to speed up
2637 allocation. Number of elements that can be allocated is not bounded because
2638 allocator can create multiple blocks.
2639 */
2640 template<typename T>
2641 class VmaPoolAllocator
2642 {
2643 public:
2644  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2645  ~VmaPoolAllocator();
2646  void Clear();
2647  T* Alloc();
2648  void Free(T* ptr);
2649 
2650 private:
2651  union Item
2652  {
2653  uint32_t NextFreeIndex;
2654  T Value;
2655  };
2656 
2657  struct ItemBlock
2658  {
2659  Item* pItems;
2660  uint32_t FirstFreeIndex;
2661  };
2662 
2663  const VkAllocationCallbacks* m_pAllocationCallbacks;
2664  size_t m_ItemsPerBlock;
2665  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2666 
2667  ItemBlock& CreateNewBlock();
2668 };
2669 
2670 template<typename T>
2671 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2672  m_pAllocationCallbacks(pAllocationCallbacks),
2673  m_ItemsPerBlock(itemsPerBlock),
2674  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2675 {
2676  VMA_ASSERT(itemsPerBlock > 0);
2677 }
2678 
2679 template<typename T>
2680 VmaPoolAllocator<T>::~VmaPoolAllocator()
2681 {
2682  Clear();
2683 }
2684 
2685 template<typename T>
2686 void VmaPoolAllocator<T>::Clear()
2687 {
2688  for(size_t i = m_ItemBlocks.size(); i--; )
2689  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2690  m_ItemBlocks.clear();
2691 }
2692 
2693 template<typename T>
2694 T* VmaPoolAllocator<T>::Alloc()
2695 {
2696  for(size_t i = m_ItemBlocks.size(); i--; )
2697  {
2698  ItemBlock& block = m_ItemBlocks[i];
2699  // This block has some free items: Use first one.
2700  if(block.FirstFreeIndex != UINT32_MAX)
2701  {
2702  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2703  block.FirstFreeIndex = pItem->NextFreeIndex;
2704  return &pItem->Value;
2705  }
2706  }
2707 
2708  // No block has free item: Create new one and use it.
2709  ItemBlock& newBlock = CreateNewBlock();
2710  Item* const pItem = &newBlock.pItems[0];
2711  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2712  return &pItem->Value;
2713 }
2714 
2715 template<typename T>
2716 void VmaPoolAllocator<T>::Free(T* ptr)
2717 {
2718  // Search all memory blocks to find ptr.
2719  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2720  {
2721  ItemBlock& block = m_ItemBlocks[i];
2722 
2723  // Casting to union.
2724  Item* pItemPtr;
2725  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2726 
2727  // Check if pItemPtr is in address range of this block.
2728  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2729  {
2730  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2731  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2732  block.FirstFreeIndex = index;
2733  return;
2734  }
2735  }
2736  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2737 }
2738 
2739 template<typename T>
2740 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2741 {
2742  ItemBlock newBlock = {
2743  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2744 
2745  m_ItemBlocks.push_back(newBlock);
2746 
2747  // Setup singly-linked list of all free items in this block.
2748  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2749  newBlock.pItems[i].NextFreeIndex = i + 1;
2750  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2751  return m_ItemBlocks.back();
2752 }
2753 
2755 // class VmaRawList, VmaList
2756 
2757 #if VMA_USE_STL_LIST
2758 
2759 #define VmaList std::list
2760 
2761 #else // #if VMA_USE_STL_LIST
2762 
2763 template<typename T>
2764 struct VmaListItem
2765 {
2766  VmaListItem* pPrev;
2767  VmaListItem* pNext;
2768  T Value;
2769 };
2770 
2771 // Doubly linked list.
2772 template<typename T>
2773 class VmaRawList
2774 {
2775 public:
2776  typedef VmaListItem<T> ItemType;
2777 
2778  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2779  ~VmaRawList();
2780  void Clear();
2781 
2782  size_t GetCount() const { return m_Count; }
2783  bool IsEmpty() const { return m_Count == 0; }
2784 
2785  ItemType* Front() { return m_pFront; }
2786  const ItemType* Front() const { return m_pFront; }
2787  ItemType* Back() { return m_pBack; }
2788  const ItemType* Back() const { return m_pBack; }
2789 
2790  ItemType* PushBack();
2791  ItemType* PushFront();
2792  ItemType* PushBack(const T& value);
2793  ItemType* PushFront(const T& value);
2794  void PopBack();
2795  void PopFront();
2796 
2797  // Item can be null - it means PushBack.
2798  ItemType* InsertBefore(ItemType* pItem);
2799  // Item can be null - it means PushFront.
2800  ItemType* InsertAfter(ItemType* pItem);
2801 
2802  ItemType* InsertBefore(ItemType* pItem, const T& value);
2803  ItemType* InsertAfter(ItemType* pItem, const T& value);
2804 
2805  void Remove(ItemType* pItem);
2806 
2807 private:
2808  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2809  VmaPoolAllocator<ItemType> m_ItemAllocator;
2810  ItemType* m_pFront;
2811  ItemType* m_pBack;
2812  size_t m_Count;
2813 
2814  // Declared not defined, to block copy constructor and assignment operator.
2815  VmaRawList(const VmaRawList<T>& src);
2816  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2817 };
2818 
2819 template<typename T>
2820 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2821  m_pAllocationCallbacks(pAllocationCallbacks),
2822  m_ItemAllocator(pAllocationCallbacks, 128),
2823  m_pFront(VMA_NULL),
2824  m_pBack(VMA_NULL),
2825  m_Count(0)
2826 {
2827 }
2828 
2829 template<typename T>
2830 VmaRawList<T>::~VmaRawList()
2831 {
2832  // Intentionally not calling Clear, because that would be unnecessary
2833  // computations to return all items to m_ItemAllocator as free.
2834 }
2835 
2836 template<typename T>
2837 void VmaRawList<T>::Clear()
2838 {
2839  if(IsEmpty() == false)
2840  {
2841  ItemType* pItem = m_pBack;
2842  while(pItem != VMA_NULL)
2843  {
2844  ItemType* const pPrevItem = pItem->pPrev;
2845  m_ItemAllocator.Free(pItem);
2846  pItem = pPrevItem;
2847  }
2848  m_pFront = VMA_NULL;
2849  m_pBack = VMA_NULL;
2850  m_Count = 0;
2851  }
2852 }
2853 
2854 template<typename T>
2855 VmaListItem<T>* VmaRawList<T>::PushBack()
2856 {
2857  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2858  pNewItem->pNext = VMA_NULL;
2859  if(IsEmpty())
2860  {
2861  pNewItem->pPrev = VMA_NULL;
2862  m_pFront = pNewItem;
2863  m_pBack = pNewItem;
2864  m_Count = 1;
2865  }
2866  else
2867  {
2868  pNewItem->pPrev = m_pBack;
2869  m_pBack->pNext = pNewItem;
2870  m_pBack = pNewItem;
2871  ++m_Count;
2872  }
2873  return pNewItem;
2874 }
2875 
2876 template<typename T>
2877 VmaListItem<T>* VmaRawList<T>::PushFront()
2878 {
2879  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2880  pNewItem->pPrev = VMA_NULL;
2881  if(IsEmpty())
2882  {
2883  pNewItem->pNext = VMA_NULL;
2884  m_pFront = pNewItem;
2885  m_pBack = pNewItem;
2886  m_Count = 1;
2887  }
2888  else
2889  {
2890  pNewItem->pNext = m_pFront;
2891  m_pFront->pPrev = pNewItem;
2892  m_pFront = pNewItem;
2893  ++m_Count;
2894  }
2895  return pNewItem;
2896 }
2897 
2898 template<typename T>
2899 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2900 {
2901  ItemType* const pNewItem = PushBack();
2902  pNewItem->Value = value;
2903  return pNewItem;
2904 }
2905 
2906 template<typename T>
2907 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2908 {
2909  ItemType* const pNewItem = PushFront();
2910  pNewItem->Value = value;
2911  return pNewItem;
2912 }
2913 
2914 template<typename T>
2915 void VmaRawList<T>::PopBack()
2916 {
2917  VMA_HEAVY_ASSERT(m_Count > 0);
2918  ItemType* const pBackItem = m_pBack;
2919  ItemType* const pPrevItem = pBackItem->pPrev;
2920  if(pPrevItem != VMA_NULL)
2921  {
2922  pPrevItem->pNext = VMA_NULL;
2923  }
2924  m_pBack = pPrevItem;
2925  m_ItemAllocator.Free(pBackItem);
2926  --m_Count;
2927 }
2928 
2929 template<typename T>
2930 void VmaRawList<T>::PopFront()
2931 {
2932  VMA_HEAVY_ASSERT(m_Count > 0);
2933  ItemType* const pFrontItem = m_pFront;
2934  ItemType* const pNextItem = pFrontItem->pNext;
2935  if(pNextItem != VMA_NULL)
2936  {
2937  pNextItem->pPrev = VMA_NULL;
2938  }
2939  m_pFront = pNextItem;
2940  m_ItemAllocator.Free(pFrontItem);
2941  --m_Count;
2942 }
2943 
2944 template<typename T>
2945 void VmaRawList<T>::Remove(ItemType* pItem)
2946 {
2947  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2948  VMA_HEAVY_ASSERT(m_Count > 0);
2949 
2950  if(pItem->pPrev != VMA_NULL)
2951  {
2952  pItem->pPrev->pNext = pItem->pNext;
2953  }
2954  else
2955  {
2956  VMA_HEAVY_ASSERT(m_pFront == pItem);
2957  m_pFront = pItem->pNext;
2958  }
2959 
2960  if(pItem->pNext != VMA_NULL)
2961  {
2962  pItem->pNext->pPrev = pItem->pPrev;
2963  }
2964  else
2965  {
2966  VMA_HEAVY_ASSERT(m_pBack == pItem);
2967  m_pBack = pItem->pPrev;
2968  }
2969 
2970  m_ItemAllocator.Free(pItem);
2971  --m_Count;
2972 }
2973 
2974 template<typename T>
2975 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2976 {
2977  if(pItem != VMA_NULL)
2978  {
2979  ItemType* const prevItem = pItem->pPrev;
2980  ItemType* const newItem = m_ItemAllocator.Alloc();
2981  newItem->pPrev = prevItem;
2982  newItem->pNext = pItem;
2983  pItem->pPrev = newItem;
2984  if(prevItem != VMA_NULL)
2985  {
2986  prevItem->pNext = newItem;
2987  }
2988  else
2989  {
2990  VMA_HEAVY_ASSERT(m_pFront == pItem);
2991  m_pFront = newItem;
2992  }
2993  ++m_Count;
2994  return newItem;
2995  }
2996  else
2997  return PushBack();
2998 }
2999 
3000 template<typename T>
3001 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3002 {
3003  if(pItem != VMA_NULL)
3004  {
3005  ItemType* const nextItem = pItem->pNext;
3006  ItemType* const newItem = m_ItemAllocator.Alloc();
3007  newItem->pNext = nextItem;
3008  newItem->pPrev = pItem;
3009  pItem->pNext = newItem;
3010  if(nextItem != VMA_NULL)
3011  {
3012  nextItem->pPrev = newItem;
3013  }
3014  else
3015  {
3016  VMA_HEAVY_ASSERT(m_pBack == pItem);
3017  m_pBack = newItem;
3018  }
3019  ++m_Count;
3020  return newItem;
3021  }
3022  else
3023  return PushFront();
3024 }
3025 
3026 template<typename T>
3027 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3028 {
3029  ItemType* const newItem = InsertBefore(pItem);
3030  newItem->Value = value;
3031  return newItem;
3032 }
3033 
3034 template<typename T>
3035 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3036 {
3037  ItemType* const newItem = InsertAfter(pItem);
3038  newItem->Value = value;
3039  return newItem;
3040 }
3041 
3042 template<typename T, typename AllocatorT>
3043 class VmaList
3044 {
3045 public:
3046  class iterator
3047  {
3048  public:
3049  iterator() :
3050  m_pList(VMA_NULL),
3051  m_pItem(VMA_NULL)
3052  {
3053  }
3054 
3055  T& operator*() const
3056  {
3057  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3058  return m_pItem->Value;
3059  }
3060  T* operator->() const
3061  {
3062  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3063  return &m_pItem->Value;
3064  }
3065 
3066  iterator& operator++()
3067  {
3068  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3069  m_pItem = m_pItem->pNext;
3070  return *this;
3071  }
3072  iterator& operator--()
3073  {
3074  if(m_pItem != VMA_NULL)
3075  {
3076  m_pItem = m_pItem->pPrev;
3077  }
3078  else
3079  {
3080  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3081  m_pItem = m_pList->Back();
3082  }
3083  return *this;
3084  }
3085 
3086  iterator operator++(int)
3087  {
3088  iterator result = *this;
3089  ++*this;
3090  return result;
3091  }
3092  iterator operator--(int)
3093  {
3094  iterator result = *this;
3095  --*this;
3096  return result;
3097  }
3098 
3099  bool operator==(const iterator& rhs) const
3100  {
3101  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3102  return m_pItem == rhs.m_pItem;
3103  }
3104  bool operator!=(const iterator& rhs) const
3105  {
3106  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3107  return m_pItem != rhs.m_pItem;
3108  }
3109 
3110  private:
3111  VmaRawList<T>* m_pList;
3112  VmaListItem<T>* m_pItem;
3113 
3114  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3115  m_pList(pList),
3116  m_pItem(pItem)
3117  {
3118  }
3119 
3120  friend class VmaList<T, AllocatorT>;
3121  };
3122 
3123  class const_iterator
3124  {
3125  public:
3126  const_iterator() :
3127  m_pList(VMA_NULL),
3128  m_pItem(VMA_NULL)
3129  {
3130  }
3131 
3132  const_iterator(const iterator& src) :
3133  m_pList(src.m_pList),
3134  m_pItem(src.m_pItem)
3135  {
3136  }
3137 
3138  const T& operator*() const
3139  {
3140  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3141  return m_pItem->Value;
3142  }
3143  const T* operator->() const
3144  {
3145  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3146  return &m_pItem->Value;
3147  }
3148 
3149  const_iterator& operator++()
3150  {
3151  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3152  m_pItem = m_pItem->pNext;
3153  return *this;
3154  }
3155  const_iterator& operator--()
3156  {
3157  if(m_pItem != VMA_NULL)
3158  {
3159  m_pItem = m_pItem->pPrev;
3160  }
3161  else
3162  {
3163  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3164  m_pItem = m_pList->Back();
3165  }
3166  return *this;
3167  }
3168 
3169  const_iterator operator++(int)
3170  {
3171  const_iterator result = *this;
3172  ++*this;
3173  return result;
3174  }
3175  const_iterator operator--(int)
3176  {
3177  const_iterator result = *this;
3178  --*this;
3179  return result;
3180  }
3181 
3182  bool operator==(const const_iterator& rhs) const
3183  {
3184  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3185  return m_pItem == rhs.m_pItem;
3186  }
3187  bool operator!=(const const_iterator& rhs) const
3188  {
3189  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3190  return m_pItem != rhs.m_pItem;
3191  }
3192 
3193  private:
3194  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3195  m_pList(pList),
3196  m_pItem(pItem)
3197  {
3198  }
3199 
3200  const VmaRawList<T>* m_pList;
3201  const VmaListItem<T>* m_pItem;
3202 
3203  friend class VmaList<T, AllocatorT>;
3204  };
3205 
3206  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3207 
3208  bool empty() const { return m_RawList.IsEmpty(); }
3209  size_t size() const { return m_RawList.GetCount(); }
3210 
3211  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3212  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3213 
3214  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3215  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3216 
3217  void clear() { m_RawList.Clear(); }
3218  void push_back(const T& value) { m_RawList.PushBack(value); }
3219  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3220  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3221 
3222 private:
3223  VmaRawList<T> m_RawList;
3224 };
3225 
3226 #endif // #if VMA_USE_STL_LIST
3227 
3229 // class VmaMap
3230 
3231 // Unused in this version.
3232 #if 0
3233 
3234 #if VMA_USE_STL_UNORDERED_MAP
3235 
3236 #define VmaPair std::pair
3237 
3238 #define VMA_MAP_TYPE(KeyT, ValueT) \
3239  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3240 
3241 #else // #if VMA_USE_STL_UNORDERED_MAP
3242 
3243 template<typename T1, typename T2>
3244 struct VmaPair
3245 {
3246  T1 first;
3247  T2 second;
3248 
3249  VmaPair() : first(), second() { }
3250  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3251 };
3252 
3253 /* Class compatible with subset of interface of std::unordered_map.
3254 KeyT, ValueT must be POD because they will be stored in VmaVector.
3255 */
3256 template<typename KeyT, typename ValueT>
3257 class VmaMap
3258 {
3259 public:
3260  typedef VmaPair<KeyT, ValueT> PairType;
3261  typedef PairType* iterator;
3262 
3263  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3264 
3265  iterator begin() { return m_Vector.begin(); }
3266  iterator end() { return m_Vector.end(); }
3267 
3268  void insert(const PairType& pair);
3269  iterator find(const KeyT& key);
3270  void erase(iterator it);
3271 
3272 private:
3273  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3274 };
3275 
3276 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3277 
3278 template<typename FirstT, typename SecondT>
3279 struct VmaPairFirstLess
3280 {
3281  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3282  {
3283  return lhs.first < rhs.first;
3284  }
3285  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3286  {
3287  return lhs.first < rhsFirst;
3288  }
3289 };
3290 
3291 template<typename KeyT, typename ValueT>
3292 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3293 {
3294  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3295  m_Vector.data(),
3296  m_Vector.data() + m_Vector.size(),
3297  pair,
3298  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3299  VmaVectorInsert(m_Vector, indexToInsert, pair);
3300 }
3301 
3302 template<typename KeyT, typename ValueT>
3303 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3304 {
3305  PairType* it = VmaBinaryFindFirstNotLess(
3306  m_Vector.data(),
3307  m_Vector.data() + m_Vector.size(),
3308  key,
3309  VmaPairFirstLess<KeyT, ValueT>());
3310  if((it != m_Vector.end()) && (it->first == key))
3311  {
3312  return it;
3313  }
3314  else
3315  {
3316  return m_Vector.end();
3317  }
3318 }
3319 
3320 template<typename KeyT, typename ValueT>
3321 void VmaMap<KeyT, ValueT>::erase(iterator it)
3322 {
3323  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3324 }
3325 
3326 #endif // #if VMA_USE_STL_UNORDERED_MAP
3327 
3328 #endif // #if 0
3329 
3331 
3332 class VmaDeviceMemoryBlock;
3333 
3334 struct VmaAllocation_T
3335 {
3336 private:
3337  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3338 
3339  enum FLAGS
3340  {
3341  FLAG_USER_DATA_STRING = 0x01,
3342  };
3343 
3344 public:
3345  enum ALLOCATION_TYPE
3346  {
3347  ALLOCATION_TYPE_NONE,
3348  ALLOCATION_TYPE_BLOCK,
3349  ALLOCATION_TYPE_DEDICATED,
3350  };
3351 
3352  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3353  m_Alignment(1),
3354  m_Size(0),
3355  m_pUserData(VMA_NULL),
3356  m_LastUseFrameIndex(currentFrameIndex),
3357  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3358  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3359  m_MapCount(0),
3360  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3361  {
3362  }
3363 
3364  ~VmaAllocation_T()
3365  {
3366  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3367 
3368  // Check if owned string was freed.
3369  VMA_ASSERT(m_pUserData == VMA_NULL);
3370  }
3371 
3372  void InitBlockAllocation(
3373  VmaPool hPool,
3374  VmaDeviceMemoryBlock* block,
3375  VkDeviceSize offset,
3376  VkDeviceSize alignment,
3377  VkDeviceSize size,
3378  VmaSuballocationType suballocationType,
3379  bool mapped,
3380  bool canBecomeLost)
3381  {
3382  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3383  VMA_ASSERT(block != VMA_NULL);
3384  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3385  m_Alignment = alignment;
3386  m_Size = size;
3387  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3388  m_SuballocationType = (uint8_t)suballocationType;
3389  m_BlockAllocation.m_hPool = hPool;
3390  m_BlockAllocation.m_Block = block;
3391  m_BlockAllocation.m_Offset = offset;
3392  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3393  }
3394 
3395  void InitLost()
3396  {
3397  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3398  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3399  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3400  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3401  m_BlockAllocation.m_Block = VMA_NULL;
3402  m_BlockAllocation.m_Offset = 0;
3403  m_BlockAllocation.m_CanBecomeLost = true;
3404  }
3405 
3406  void ChangeBlockAllocation(
3407  VmaAllocator hAllocator,
3408  VmaDeviceMemoryBlock* block,
3409  VkDeviceSize offset);
3410 
3411  // pMappedData not null means allocation is created with MAPPED flag.
3412  void InitDedicatedAllocation(
3413  uint32_t memoryTypeIndex,
3414  VkDeviceMemory hMemory,
3415  VmaSuballocationType suballocationType,
3416  void* pMappedData,
3417  VkDeviceSize size)
3418  {
3419  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3420  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3421  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3422  m_Alignment = 0;
3423  m_Size = size;
3424  m_SuballocationType = (uint8_t)suballocationType;
3425  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3426  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3427  m_DedicatedAllocation.m_hMemory = hMemory;
3428  m_DedicatedAllocation.m_pMappedData = pMappedData;
3429  }
3430 
3431  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3432  VkDeviceSize GetAlignment() const { return m_Alignment; }
3433  VkDeviceSize GetSize() const { return m_Size; }
3434  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3435  void* GetUserData() const { return m_pUserData; }
3436  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3437  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3438 
3439  VmaDeviceMemoryBlock* GetBlock() const
3440  {
3441  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3442  return m_BlockAllocation.m_Block;
3443  }
3444  VkDeviceSize GetOffset() const;
3445  VkDeviceMemory GetMemory() const;
3446  uint32_t GetMemoryTypeIndex() const;
3447  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3448  void* GetMappedData() const;
3449  bool CanBecomeLost() const;
3450  VmaPool GetPool() const;
3451 
3452  uint32_t GetLastUseFrameIndex() const
3453  {
3454  return m_LastUseFrameIndex.load();
3455  }
3456  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3457  {
3458  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3459  }
3460  /*
3461  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3462  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3463  - Else, returns false.
3464 
3465  If hAllocation is already lost, assert - you should not call it then.
3466  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3467  */
3468  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3469 
3470  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3471  {
3472  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3473  outInfo.blockCount = 1;
3474  outInfo.allocationCount = 1;
3475  outInfo.unusedRangeCount = 0;
3476  outInfo.usedBytes = m_Size;
3477  outInfo.unusedBytes = 0;
3478  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3479  outInfo.unusedRangeSizeMin = UINT64_MAX;
3480  outInfo.unusedRangeSizeMax = 0;
3481  }
3482 
3483  void BlockAllocMap();
3484  void BlockAllocUnmap();
3485  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3486  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3487 
3488 private:
3489  VkDeviceSize m_Alignment;
3490  VkDeviceSize m_Size;
3491  void* m_pUserData;
3492  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3493  uint8_t m_Type; // ALLOCATION_TYPE
3494  uint8_t m_SuballocationType; // VmaSuballocationType
3495  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3496  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3497  uint8_t m_MapCount;
3498  uint8_t m_Flags; // enum FLAGS
3499 
3500  // Allocation out of VmaDeviceMemoryBlock.
3501  struct BlockAllocation
3502  {
3503  VmaPool m_hPool; // Null if belongs to general memory.
3504  VmaDeviceMemoryBlock* m_Block;
3505  VkDeviceSize m_Offset;
3506  bool m_CanBecomeLost;
3507  };
3508 
3509  // Allocation for an object that has its own private VkDeviceMemory.
3510  struct DedicatedAllocation
3511  {
3512  uint32_t m_MemoryTypeIndex;
3513  VkDeviceMemory m_hMemory;
3514  void* m_pMappedData; // Not null means memory is mapped.
3515  };
3516 
3517  union
3518  {
3519  // Allocation out of VmaDeviceMemoryBlock.
3520  BlockAllocation m_BlockAllocation;
3521  // Allocation for an object that has its own private VkDeviceMemory.
3522  DedicatedAllocation m_DedicatedAllocation;
3523  };
3524 
3525  void FreeUserDataString(VmaAllocator hAllocator);
3526 };
3527 
3528 /*
3529 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3530 allocated memory block or free.
3531 */
3532 struct VmaSuballocation
3533 {
3534  VkDeviceSize offset;
3535  VkDeviceSize size;
3536  VmaAllocation hAllocation;
3537  VmaSuballocationType type;
3538 };
3539 
3540 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3541 
3542 // Cost of one additional allocation lost, as equivalent in bytes.
3543 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3544 
3545 /*
3546 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3547 
3548 If canMakeOtherLost was false:
3549 - item points to a FREE suballocation.
3550 - itemsToMakeLostCount is 0.
3551 
3552 If canMakeOtherLost was true:
3553 - item points to first of sequence of suballocations, which are either FREE,
3554  or point to VmaAllocations that can become lost.
3555 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3556  the requested allocation to succeed.
3557 */
3558 struct VmaAllocationRequest
3559 {
3560  VkDeviceSize offset;
3561  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3562  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3563  VmaSuballocationList::iterator item;
3564  size_t itemsToMakeLostCount;
3565 
3566  VkDeviceSize CalcCost() const
3567  {
3568  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3569  }
3570 };
3571 
3572 /*
3573 Data structure used for bookkeeping of allocations and unused ranges of memory
3574 in a single VkDeviceMemory block.
3575 */
3576 class VmaBlockMetadata
3577 {
3578 public:
3579  VmaBlockMetadata(VmaAllocator hAllocator);
3580  ~VmaBlockMetadata();
3581  void Init(VkDeviceSize size);
3582 
3583  // Validates all data structures inside this object. If not valid, returns false.
3584  bool Validate() const;
3585  VkDeviceSize GetSize() const { return m_Size; }
3586  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3587  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3588  VkDeviceSize GetUnusedRangeSizeMax() const;
3589  // Returns true if this block is empty - contains only single free suballocation.
3590  bool IsEmpty() const;
3591 
3592  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3593  void AddPoolStats(VmaPoolStats& inoutStats) const;
3594 
3595 #if VMA_STATS_STRING_ENABLED
3596  void PrintDetailedMap(class VmaJsonWriter& json) const;
3597 #endif
3598 
3599  // Creates trivial request for case when block is empty.
3600  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3601 
3602  // Tries to find a place for suballocation with given parameters inside this block.
3603  // If succeeded, fills pAllocationRequest and returns true.
3604  // If failed, returns false.
3605  bool CreateAllocationRequest(
3606  uint32_t currentFrameIndex,
3607  uint32_t frameInUseCount,
3608  VkDeviceSize bufferImageGranularity,
3609  VkDeviceSize allocSize,
3610  VkDeviceSize allocAlignment,
3611  VmaSuballocationType allocType,
3612  bool canMakeOtherLost,
3613  VmaAllocationRequest* pAllocationRequest);
3614 
3615  bool MakeRequestedAllocationsLost(
3616  uint32_t currentFrameIndex,
3617  uint32_t frameInUseCount,
3618  VmaAllocationRequest* pAllocationRequest);
3619 
3620  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3621 
3622  // Makes actual allocation based on request. Request must already be checked and valid.
3623  void Alloc(
3624  const VmaAllocationRequest& request,
3625  VmaSuballocationType type,
3626  VkDeviceSize allocSize,
3627  VmaAllocation hAllocation);
3628 
3629  // Frees suballocation assigned to given memory region.
3630  void Free(const VmaAllocation allocation);
3631  void FreeAtOffset(VkDeviceSize offset);
3632 
3633 private:
3634  VkDeviceSize m_Size;
3635  uint32_t m_FreeCount;
3636  VkDeviceSize m_SumFreeSize;
3637  VmaSuballocationList m_Suballocations;
3638  // Suballocations that are free and have size greater than certain threshold.
3639  // Sorted by size, ascending.
3640  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3641 
3642  bool ValidateFreeSuballocationList() const;
3643 
3644  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3645  // If yes, fills pOffset and returns true. If no, returns false.
3646  bool CheckAllocation(
3647  uint32_t currentFrameIndex,
3648  uint32_t frameInUseCount,
3649  VkDeviceSize bufferImageGranularity,
3650  VkDeviceSize allocSize,
3651  VkDeviceSize allocAlignment,
3652  VmaSuballocationType allocType,
3653  VmaSuballocationList::const_iterator suballocItem,
3654  bool canMakeOtherLost,
3655  VkDeviceSize* pOffset,
3656  size_t* itemsToMakeLostCount,
3657  VkDeviceSize* pSumFreeSize,
3658  VkDeviceSize* pSumItemSize) const;
3659  // Given free suballocation, it merges it with following one, which must also be free.
3660  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3661  // Releases given suballocation, making it free.
3662  // Merges it with adjacent free suballocations if applicable.
3663  // Returns iterator to new free suballocation at this place.
3664  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3665  // Given free suballocation, it inserts it into sorted list of
3666  // m_FreeSuballocationsBySize if it's suitable.
3667  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3668  // Given free suballocation, it removes it from sorted list of
3669  // m_FreeSuballocationsBySize if it's suitable.
3670  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3671 };
3672 
3673 // Helper class that represents mapped memory. Synchronized internally.
3674 class VmaDeviceMemoryMapping
3675 {
3676 public:
3677  VmaDeviceMemoryMapping();
3678  ~VmaDeviceMemoryMapping();
3679 
3680  void* GetMappedData() const { return m_pMappedData; }
3681 
3682  // ppData can be null.
3683  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3684  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3685 
3686 private:
3687  VMA_MUTEX m_Mutex;
3688  uint32_t m_MapCount;
3689  void* m_pMappedData;
3690 };
3691 
3692 /*
3693 Represents a single block of device memory (`VkDeviceMemory`) with all the
3694 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3695 
3696 Thread-safety: This class must be externally synchronized.
3697 */
3698 class VmaDeviceMemoryBlock
3699 {
3700 public:
3701  uint32_t m_MemoryTypeIndex;
3702  VkDeviceMemory m_hMemory;
3703  VmaDeviceMemoryMapping m_Mapping;
3704  VmaBlockMetadata m_Metadata;
3705 
3706  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3707 
3708  ~VmaDeviceMemoryBlock()
3709  {
3710  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3711  }
3712 
3713  // Always call after construction.
3714  void Init(
3715  uint32_t newMemoryTypeIndex,
3716  VkDeviceMemory newMemory,
3717  VkDeviceSize newSize);
3718  // Always call before destruction.
3719  void Destroy(VmaAllocator allocator);
3720 
3721  // Validates all data structures inside this object. If not valid, returns false.
3722  bool Validate() const;
3723 
3724  // ppData can be null.
3725  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3726  void Unmap(VmaAllocator hAllocator, uint32_t count);
3727 };
3728 
3729 struct VmaPointerLess
3730 {
3731  bool operator()(const void* lhs, const void* rhs) const
3732  {
3733  return lhs < rhs;
3734  }
3735 };
3736 
3737 class VmaDefragmentator;
3738 
3739 /*
3740 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3741 Vulkan memory type.
3742 
3743 Synchronized internally with a mutex.
3744 */
3745 struct VmaBlockVector
3746 {
3747  VmaBlockVector(
3748  VmaAllocator hAllocator,
3749  uint32_t memoryTypeIndex,
3750  VkDeviceSize preferredBlockSize,
3751  size_t minBlockCount,
3752  size_t maxBlockCount,
3753  VkDeviceSize bufferImageGranularity,
3754  uint32_t frameInUseCount,
3755  bool isCustomPool);
3756  ~VmaBlockVector();
3757 
3758  VkResult CreateMinBlocks();
3759 
3760  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3761  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3762  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3763  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3764 
3765  void GetPoolStats(VmaPoolStats* pStats);
3766 
3767  bool IsEmpty() const { return m_Blocks.empty(); }
3768 
3769  VkResult Allocate(
3770  VmaPool hCurrentPool,
3771  uint32_t currentFrameIndex,
3772  const VkMemoryRequirements& vkMemReq,
3773  const VmaAllocationCreateInfo& createInfo,
3774  VmaSuballocationType suballocType,
3775  VmaAllocation* pAllocation);
3776 
3777  void Free(
3778  VmaAllocation hAllocation);
3779 
3780  // Adds statistics of this BlockVector to pStats.
3781  void AddStats(VmaStats* pStats);
3782 
3783 #if VMA_STATS_STRING_ENABLED
3784  void PrintDetailedMap(class VmaJsonWriter& json);
3785 #endif
3786 
3787  void MakePoolAllocationsLost(
3788  uint32_t currentFrameIndex,
3789  size_t* pLostAllocationCount);
3790 
3791  VmaDefragmentator* EnsureDefragmentator(
3792  VmaAllocator hAllocator,
3793  uint32_t currentFrameIndex);
3794 
3795  VkResult Defragment(
3796  VmaDefragmentationStats* pDefragmentationStats,
3797  VkDeviceSize& maxBytesToMove,
3798  uint32_t& maxAllocationsToMove);
3799 
3800  void DestroyDefragmentator();
3801 
3802 private:
3803  friend class VmaDefragmentator;
3804 
3805  const VmaAllocator m_hAllocator;
3806  const uint32_t m_MemoryTypeIndex;
3807  const VkDeviceSize m_PreferredBlockSize;
3808  const size_t m_MinBlockCount;
3809  const size_t m_MaxBlockCount;
3810  const VkDeviceSize m_BufferImageGranularity;
3811  const uint32_t m_FrameInUseCount;
3812  const bool m_IsCustomPool;
3813  VMA_MUTEX m_Mutex;
3814  // Incrementally sorted by sumFreeSize, ascending.
3815  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3816  /* There can be at most one allocation that is completely empty - a
3817  hysteresis to avoid pessimistic case of alternating creation and destruction
3818  of a VkDeviceMemory. */
3819  bool m_HasEmptyBlock;
3820  VmaDefragmentator* m_pDefragmentator;
3821 
3822  size_t CalcMaxBlockSize() const;
3823 
3824  // Finds and removes given block from vector.
3825  void Remove(VmaDeviceMemoryBlock* pBlock);
3826 
3827  // Performs single step in sorting m_Blocks. They may not be fully sorted
3828  // after this call.
3829  void IncrementallySortBlocks();
3830 
3831  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3832 };
3833 
3834 struct VmaPool_T
3835 {
3836 public:
3837  VmaBlockVector m_BlockVector;
3838 
3839  // Takes ownership.
3840  VmaPool_T(
3841  VmaAllocator hAllocator,
3842  const VmaPoolCreateInfo& createInfo);
3843  ~VmaPool_T();
3844 
3845  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3846 
3847 #if VMA_STATS_STRING_ENABLED
3848  //void PrintDetailedMap(class VmaStringBuilder& sb);
3849 #endif
3850 };
3851 
3852 class VmaDefragmentator
3853 {
3854  const VmaAllocator m_hAllocator;
3855  VmaBlockVector* const m_pBlockVector;
3856  uint32_t m_CurrentFrameIndex;
3857  VkDeviceSize m_BytesMoved;
3858  uint32_t m_AllocationsMoved;
3859 
3860  struct AllocationInfo
3861  {
3862  VmaAllocation m_hAllocation;
3863  VkBool32* m_pChanged;
3864 
3865  AllocationInfo() :
3866  m_hAllocation(VK_NULL_HANDLE),
3867  m_pChanged(VMA_NULL)
3868  {
3869  }
3870  };
3871 
3872  struct AllocationInfoSizeGreater
3873  {
3874  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3875  {
3876  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3877  }
3878  };
3879 
3880  // Used between AddAllocation and Defragment.
3881  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3882 
3883  struct BlockInfo
3884  {
3885  VmaDeviceMemoryBlock* m_pBlock;
3886  bool m_HasNonMovableAllocations;
3887  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3888 
3889  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3890  m_pBlock(VMA_NULL),
3891  m_HasNonMovableAllocations(true),
3892  m_Allocations(pAllocationCallbacks),
3893  m_pMappedDataForDefragmentation(VMA_NULL)
3894  {
3895  }
3896 
3897  void CalcHasNonMovableAllocations()
3898  {
3899  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3900  const size_t defragmentAllocCount = m_Allocations.size();
3901  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3902  }
3903 
3904  void SortAllocationsBySizeDescecnding()
3905  {
3906  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3907  }
3908 
3909  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3910  void Unmap(VmaAllocator hAllocator);
3911 
3912  private:
3913  // Not null if mapped for defragmentation only, not originally mapped.
3914  void* m_pMappedDataForDefragmentation;
3915  };
3916 
3917  struct BlockPointerLess
3918  {
3919  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3920  {
3921  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3922  }
3923  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3924  {
3925  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3926  }
3927  };
3928 
3929  // 1. Blocks with some non-movable allocations go first.
3930  // 2. Blocks with smaller sumFreeSize go first.
3931  struct BlockInfoCompareMoveDestination
3932  {
3933  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3934  {
3935  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3936  {
3937  return true;
3938  }
3939  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3940  {
3941  return false;
3942  }
3943  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3944  {
3945  return true;
3946  }
3947  return false;
3948  }
3949  };
3950 
3951  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3952  BlockInfoVector m_Blocks;
3953 
3954  VkResult DefragmentRound(
3955  VkDeviceSize maxBytesToMove,
3956  uint32_t maxAllocationsToMove);
3957 
3958  static bool MoveMakesSense(
3959  size_t dstBlockIndex, VkDeviceSize dstOffset,
3960  size_t srcBlockIndex, VkDeviceSize srcOffset);
3961 
3962 public:
3963  VmaDefragmentator(
3964  VmaAllocator hAllocator,
3965  VmaBlockVector* pBlockVector,
3966  uint32_t currentFrameIndex);
3967 
3968  ~VmaDefragmentator();
3969 
3970  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3971  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3972 
3973  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3974 
3975  VkResult Defragment(
3976  VkDeviceSize maxBytesToMove,
3977  uint32_t maxAllocationsToMove);
3978 };
3979 
3980 // Main allocator object.
3981 struct VmaAllocator_T
3982 {
3983  bool m_UseMutex;
3984  bool m_UseKhrDedicatedAllocation;
3985  VkDevice m_hDevice;
3986  bool m_AllocationCallbacksSpecified;
3987  VkAllocationCallbacks m_AllocationCallbacks;
3988  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3989 
3990  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3991  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3992  VMA_MUTEX m_HeapSizeLimitMutex;
3993 
3994  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3995  VkPhysicalDeviceMemoryProperties m_MemProps;
3996 
3997  // Default pools.
3998  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3999 
4000  // Each vector is sorted by memory (handle value).
4001  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4002  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4003  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4004 
4005  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4006  ~VmaAllocator_T();
4007 
4008  const VkAllocationCallbacks* GetAllocationCallbacks() const
4009  {
4010  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4011  }
4012  const VmaVulkanFunctions& GetVulkanFunctions() const
4013  {
4014  return m_VulkanFunctions;
4015  }
4016 
4017  VkDeviceSize GetBufferImageGranularity() const
4018  {
4019  return VMA_MAX(
4020  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4021  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4022  }
4023 
4024  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4025  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4026 
4027  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4028  {
4029  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4030  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4031  }
4032 
4033  void GetBufferMemoryRequirements(
4034  VkBuffer hBuffer,
4035  VkMemoryRequirements& memReq,
4036  bool& requiresDedicatedAllocation,
4037  bool& prefersDedicatedAllocation) const;
4038  void GetImageMemoryRequirements(
4039  VkImage hImage,
4040  VkMemoryRequirements& memReq,
4041  bool& requiresDedicatedAllocation,
4042  bool& prefersDedicatedAllocation) const;
4043 
4044  // Main allocation function.
4045  VkResult AllocateMemory(
4046  const VkMemoryRequirements& vkMemReq,
4047  bool requiresDedicatedAllocation,
4048  bool prefersDedicatedAllocation,
4049  VkBuffer dedicatedBuffer,
4050  VkImage dedicatedImage,
4051  const VmaAllocationCreateInfo& createInfo,
4052  VmaSuballocationType suballocType,
4053  VmaAllocation* pAllocation);
4054 
4055  // Main deallocation function.
4056  void FreeMemory(const VmaAllocation allocation);
4057 
4058  void CalculateStats(VmaStats* pStats);
4059 
4060 #if VMA_STATS_STRING_ENABLED
4061  void PrintDetailedMap(class VmaJsonWriter& json);
4062 #endif
4063 
4064  VkResult Defragment(
4065  VmaAllocation* pAllocations,
4066  size_t allocationCount,
4067  VkBool32* pAllocationsChanged,
4068  const VmaDefragmentationInfo* pDefragmentationInfo,
4069  VmaDefragmentationStats* pDefragmentationStats);
4070 
4071  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4072 
4073  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4074  void DestroyPool(VmaPool pool);
4075  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4076 
4077  void SetCurrentFrameIndex(uint32_t frameIndex);
4078 
4079  void MakePoolAllocationsLost(
4080  VmaPool hPool,
4081  size_t* pLostAllocationCount);
4082 
4083  void CreateLostAllocation(VmaAllocation* pAllocation);
4084 
4085  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4086  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4087 
4088  VkResult Map(VmaAllocation hAllocation, void** ppData);
4089  void Unmap(VmaAllocation hAllocation);
4090 
4091 private:
4092  VkDeviceSize m_PreferredLargeHeapBlockSize;
4093 
4094  VkPhysicalDevice m_PhysicalDevice;
4095  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4096 
4097  VMA_MUTEX m_PoolsMutex;
4098  // Protected by m_PoolsMutex. Sorted by pointer value.
4099  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4100 
4101  VmaVulkanFunctions m_VulkanFunctions;
4102 
4103  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4104 
4105  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4106 
4107  VkResult AllocateMemoryOfType(
4108  const VkMemoryRequirements& vkMemReq,
4109  bool dedicatedAllocation,
4110  VkBuffer dedicatedBuffer,
4111  VkImage dedicatedImage,
4112  const VmaAllocationCreateInfo& createInfo,
4113  uint32_t memTypeIndex,
4114  VmaSuballocationType suballocType,
4115  VmaAllocation* pAllocation);
4116 
4117  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4118  VkResult AllocateDedicatedMemory(
4119  VkDeviceSize size,
4120  VmaSuballocationType suballocType,
4121  uint32_t memTypeIndex,
4122  bool map,
4123  bool isUserDataString,
4124  void* pUserData,
4125  VkBuffer dedicatedBuffer,
4126  VkImage dedicatedImage,
4127  VmaAllocation* pAllocation);
4128 
4129  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4130  void FreeDedicatedMemory(VmaAllocation allocation);
4131 };
4132 
4134 // Memory allocation #2 after VmaAllocator_T definition
4135 
4136 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4137 {
4138  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4139 }
4140 
4141 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4142 {
4143  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4144 }
4145 
4146 template<typename T>
4147 static T* VmaAllocate(VmaAllocator hAllocator)
4148 {
4149  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4150 }
4151 
4152 template<typename T>
4153 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4154 {
4155  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4156 }
4157 
4158 template<typename T>
4159 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4160 {
4161  if(ptr != VMA_NULL)
4162  {
4163  ptr->~T();
4164  VmaFree(hAllocator, ptr);
4165  }
4166 }
4167 
4168 template<typename T>
4169 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4170 {
4171  if(ptr != VMA_NULL)
4172  {
4173  for(size_t i = count; i--; )
4174  ptr[i].~T();
4175  VmaFree(hAllocator, ptr);
4176  }
4177 }
4178 
4180 // VmaStringBuilder
4181 
4182 #if VMA_STATS_STRING_ENABLED
4183 
4184 class VmaStringBuilder
4185 {
4186 public:
4187  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4188  size_t GetLength() const { return m_Data.size(); }
4189  const char* GetData() const { return m_Data.data(); }
4190 
4191  void Add(char ch) { m_Data.push_back(ch); }
4192  void Add(const char* pStr);
4193  void AddNewLine() { Add('\n'); }
4194  void AddNumber(uint32_t num);
4195  void AddNumber(uint64_t num);
4196  void AddPointer(const void* ptr);
4197 
4198 private:
4199  VmaVector< char, VmaStlAllocator<char> > m_Data;
4200 };
4201 
4202 void VmaStringBuilder::Add(const char* pStr)
4203 {
4204  const size_t strLen = strlen(pStr);
4205  if(strLen > 0)
4206  {
4207  const size_t oldCount = m_Data.size();
4208  m_Data.resize(oldCount + strLen);
4209  memcpy(m_Data.data() + oldCount, pStr, strLen);
4210  }
4211 }
4212 
4213 void VmaStringBuilder::AddNumber(uint32_t num)
4214 {
4215  char buf[11];
4216  VmaUint32ToStr(buf, sizeof(buf), num);
4217  Add(buf);
4218 }
4219 
4220 void VmaStringBuilder::AddNumber(uint64_t num)
4221 {
4222  char buf[21];
4223  VmaUint64ToStr(buf, sizeof(buf), num);
4224  Add(buf);
4225 }
4226 
4227 void VmaStringBuilder::AddPointer(const void* ptr)
4228 {
4229  char buf[21];
4230  VmaPtrToStr(buf, sizeof(buf), ptr);
4231  Add(buf);
4232 }
4233 
4234 #endif // #if VMA_STATS_STRING_ENABLED
4235 
4237 // VmaJsonWriter
4238 
4239 #if VMA_STATS_STRING_ENABLED
4240 
4241 class VmaJsonWriter
4242 {
4243 public:
4244  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4245  ~VmaJsonWriter();
4246 
4247  void BeginObject(bool singleLine = false);
4248  void EndObject();
4249 
4250  void BeginArray(bool singleLine = false);
4251  void EndArray();
4252 
4253  void WriteString(const char* pStr);
4254  void BeginString(const char* pStr = VMA_NULL);
4255  void ContinueString(const char* pStr);
4256  void ContinueString(uint32_t n);
4257  void ContinueString(uint64_t n);
4258  void ContinueString_Pointer(const void* ptr);
4259  void EndString(const char* pStr = VMA_NULL);
4260 
4261  void WriteNumber(uint32_t n);
4262  void WriteNumber(uint64_t n);
4263  void WriteBool(bool b);
4264  void WriteNull();
4265 
4266 private:
4267  static const char* const INDENT;
4268 
4269  enum COLLECTION_TYPE
4270  {
4271  COLLECTION_TYPE_OBJECT,
4272  COLLECTION_TYPE_ARRAY,
4273  };
4274  struct StackItem
4275  {
4276  COLLECTION_TYPE type;
4277  uint32_t valueCount;
4278  bool singleLineMode;
4279  };
4280 
4281  VmaStringBuilder& m_SB;
4282  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4283  bool m_InsideString;
4284 
4285  void BeginValue(bool isString);
4286  void WriteIndent(bool oneLess = false);
4287 };
4288 
4289 const char* const VmaJsonWriter::INDENT = " ";
4290 
4291 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4292  m_SB(sb),
4293  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4294  m_InsideString(false)
4295 {
4296 }
4297 
4298 VmaJsonWriter::~VmaJsonWriter()
4299 {
4300  VMA_ASSERT(!m_InsideString);
4301  VMA_ASSERT(m_Stack.empty());
4302 }
4303 
4304 void VmaJsonWriter::BeginObject(bool singleLine)
4305 {
4306  VMA_ASSERT(!m_InsideString);
4307 
4308  BeginValue(false);
4309  m_SB.Add('{');
4310 
4311  StackItem item;
4312  item.type = COLLECTION_TYPE_OBJECT;
4313  item.valueCount = 0;
4314  item.singleLineMode = singleLine;
4315  m_Stack.push_back(item);
4316 }
4317 
4318 void VmaJsonWriter::EndObject()
4319 {
4320  VMA_ASSERT(!m_InsideString);
4321 
4322  WriteIndent(true);
4323  m_SB.Add('}');
4324 
4325  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4326  m_Stack.pop_back();
4327 }
4328 
4329 void VmaJsonWriter::BeginArray(bool singleLine)
4330 {
4331  VMA_ASSERT(!m_InsideString);
4332 
4333  BeginValue(false);
4334  m_SB.Add('[');
4335 
4336  StackItem item;
4337  item.type = COLLECTION_TYPE_ARRAY;
4338  item.valueCount = 0;
4339  item.singleLineMode = singleLine;
4340  m_Stack.push_back(item);
4341 }
4342 
4343 void VmaJsonWriter::EndArray()
4344 {
4345  VMA_ASSERT(!m_InsideString);
4346 
4347  WriteIndent(true);
4348  m_SB.Add(']');
4349 
4350  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4351  m_Stack.pop_back();
4352 }
4353 
4354 void VmaJsonWriter::WriteString(const char* pStr)
4355 {
4356  BeginString(pStr);
4357  EndString();
4358 }
4359 
4360 void VmaJsonWriter::BeginString(const char* pStr)
4361 {
4362  VMA_ASSERT(!m_InsideString);
4363 
4364  BeginValue(true);
4365  m_SB.Add('"');
4366  m_InsideString = true;
4367  if(pStr != VMA_NULL && pStr[0] != '\0')
4368  {
4369  ContinueString(pStr);
4370  }
4371 }
4372 
4373 void VmaJsonWriter::ContinueString(const char* pStr)
4374 {
4375  VMA_ASSERT(m_InsideString);
4376 
4377  const size_t strLen = strlen(pStr);
4378  for(size_t i = 0; i < strLen; ++i)
4379  {
4380  char ch = pStr[i];
4381  if(ch == '\'')
4382  {
4383  m_SB.Add("\\\\");
4384  }
4385  else if(ch == '"')
4386  {
4387  m_SB.Add("\\\"");
4388  }
4389  else if(ch >= 32)
4390  {
4391  m_SB.Add(ch);
4392  }
4393  else switch(ch)
4394  {
4395  case '\b':
4396  m_SB.Add("\\b");
4397  break;
4398  case '\f':
4399  m_SB.Add("\\f");
4400  break;
4401  case '\n':
4402  m_SB.Add("\\n");
4403  break;
4404  case '\r':
4405  m_SB.Add("\\r");
4406  break;
4407  case '\t':
4408  m_SB.Add("\\t");
4409  break;
4410  default:
4411  VMA_ASSERT(0 && "Character not currently supported.");
4412  break;
4413  }
4414  }
4415 }
4416 
4417 void VmaJsonWriter::ContinueString(uint32_t n)
4418 {
4419  VMA_ASSERT(m_InsideString);
4420  m_SB.AddNumber(n);
4421 }
4422 
4423 void VmaJsonWriter::ContinueString(uint64_t n)
4424 {
4425  VMA_ASSERT(m_InsideString);
4426  m_SB.AddNumber(n);
4427 }
4428 
4429 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4430 {
4431  VMA_ASSERT(m_InsideString);
4432  m_SB.AddPointer(ptr);
4433 }
4434 
4435 void VmaJsonWriter::EndString(const char* pStr)
4436 {
4437  VMA_ASSERT(m_InsideString);
4438  if(pStr != VMA_NULL && pStr[0] != '\0')
4439  {
4440  ContinueString(pStr);
4441  }
4442  m_SB.Add('"');
4443  m_InsideString = false;
4444 }
4445 
4446 void VmaJsonWriter::WriteNumber(uint32_t n)
4447 {
4448  VMA_ASSERT(!m_InsideString);
4449  BeginValue(false);
4450  m_SB.AddNumber(n);
4451 }
4452 
4453 void VmaJsonWriter::WriteNumber(uint64_t n)
4454 {
4455  VMA_ASSERT(!m_InsideString);
4456  BeginValue(false);
4457  m_SB.AddNumber(n);
4458 }
4459 
4460 void VmaJsonWriter::WriteBool(bool b)
4461 {
4462  VMA_ASSERT(!m_InsideString);
4463  BeginValue(false);
4464  m_SB.Add(b ? "true" : "false");
4465 }
4466 
4467 void VmaJsonWriter::WriteNull()
4468 {
4469  VMA_ASSERT(!m_InsideString);
4470  BeginValue(false);
4471  m_SB.Add("null");
4472 }
4473 
4474 void VmaJsonWriter::BeginValue(bool isString)
4475 {
4476  if(!m_Stack.empty())
4477  {
4478  StackItem& currItem = m_Stack.back();
4479  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4480  currItem.valueCount % 2 == 0)
4481  {
4482  VMA_ASSERT(isString);
4483  }
4484 
4485  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4486  currItem.valueCount % 2 != 0)
4487  {
4488  m_SB.Add(": ");
4489  }
4490  else if(currItem.valueCount > 0)
4491  {
4492  m_SB.Add(", ");
4493  WriteIndent();
4494  }
4495  else
4496  {
4497  WriteIndent();
4498  }
4499  ++currItem.valueCount;
4500  }
4501 }
4502 
4503 void VmaJsonWriter::WriteIndent(bool oneLess)
4504 {
4505  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4506  {
4507  m_SB.AddNewLine();
4508 
4509  size_t count = m_Stack.size();
4510  if(count > 0 && oneLess)
4511  {
4512  --count;
4513  }
4514  for(size_t i = 0; i < count; ++i)
4515  {
4516  m_SB.Add(INDENT);
4517  }
4518  }
4519 }
4520 
4521 #endif // #if VMA_STATS_STRING_ENABLED
4522 
4524 
4525 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4526 {
4527  if(IsUserDataString())
4528  {
4529  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4530 
4531  FreeUserDataString(hAllocator);
4532 
4533  if(pUserData != VMA_NULL)
4534  {
4535  const char* const newStrSrc = (char*)pUserData;
4536  const size_t newStrLen = strlen(newStrSrc);
4537  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4538  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4539  m_pUserData = newStrDst;
4540  }
4541  }
4542  else
4543  {
4544  m_pUserData = pUserData;
4545  }
4546 }
4547 
4548 void VmaAllocation_T::ChangeBlockAllocation(
4549  VmaAllocator hAllocator,
4550  VmaDeviceMemoryBlock* block,
4551  VkDeviceSize offset)
4552 {
4553  VMA_ASSERT(block != VMA_NULL);
4554  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4555 
4556  // Move mapping reference counter from old block to new block.
4557  if(block != m_BlockAllocation.m_Block)
4558  {
4559  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4560  if(IsPersistentMap())
4561  ++mapRefCount;
4562  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4563  block->Map(hAllocator, mapRefCount, VMA_NULL);
4564  }
4565 
4566  m_BlockAllocation.m_Block = block;
4567  m_BlockAllocation.m_Offset = offset;
4568 }
4569 
4570 VkDeviceSize VmaAllocation_T::GetOffset() const
4571 {
4572  switch(m_Type)
4573  {
4574  case ALLOCATION_TYPE_BLOCK:
4575  return m_BlockAllocation.m_Offset;
4576  case ALLOCATION_TYPE_DEDICATED:
4577  return 0;
4578  default:
4579  VMA_ASSERT(0);
4580  return 0;
4581  }
4582 }
4583 
4584 VkDeviceMemory VmaAllocation_T::GetMemory() const
4585 {
4586  switch(m_Type)
4587  {
4588  case ALLOCATION_TYPE_BLOCK:
4589  return m_BlockAllocation.m_Block->m_hMemory;
4590  case ALLOCATION_TYPE_DEDICATED:
4591  return m_DedicatedAllocation.m_hMemory;
4592  default:
4593  VMA_ASSERT(0);
4594  return VK_NULL_HANDLE;
4595  }
4596 }
4597 
4598 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4599 {
4600  switch(m_Type)
4601  {
4602  case ALLOCATION_TYPE_BLOCK:
4603  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4604  case ALLOCATION_TYPE_DEDICATED:
4605  return m_DedicatedAllocation.m_MemoryTypeIndex;
4606  default:
4607  VMA_ASSERT(0);
4608  return UINT32_MAX;
4609  }
4610 }
4611 
4612 void* VmaAllocation_T::GetMappedData() const
4613 {
4614  switch(m_Type)
4615  {
4616  case ALLOCATION_TYPE_BLOCK:
4617  if(m_MapCount != 0)
4618  {
4619  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4620  VMA_ASSERT(pBlockData != VMA_NULL);
4621  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4622  }
4623  else
4624  {
4625  return VMA_NULL;
4626  }
4627  break;
4628  case ALLOCATION_TYPE_DEDICATED:
4629  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4630  return m_DedicatedAllocation.m_pMappedData;
4631  default:
4632  VMA_ASSERT(0);
4633  return VMA_NULL;
4634  }
4635 }
4636 
4637 bool VmaAllocation_T::CanBecomeLost() const
4638 {
4639  switch(m_Type)
4640  {
4641  case ALLOCATION_TYPE_BLOCK:
4642  return m_BlockAllocation.m_CanBecomeLost;
4643  case ALLOCATION_TYPE_DEDICATED:
4644  return false;
4645  default:
4646  VMA_ASSERT(0);
4647  return false;
4648  }
4649 }
4650 
4651 VmaPool VmaAllocation_T::GetPool() const
4652 {
4653  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4654  return m_BlockAllocation.m_hPool;
4655 }
4656 
4657 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4658 {
4659  VMA_ASSERT(CanBecomeLost());
4660 
4661  /*
4662  Warning: This is a carefully designed algorithm.
4663  Do not modify unless you really know what you're doing :)
4664  */
4665  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4666  for(;;)
4667  {
4668  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4669  {
4670  VMA_ASSERT(0);
4671  return false;
4672  }
4673  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4674  {
4675  return false;
4676  }
4677  else // Last use time earlier than current time.
4678  {
4679  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4680  {
4681  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4682  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4683  return true;
4684  }
4685  }
4686  }
4687 }
4688 
4689 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4690 {
4691  VMA_ASSERT(IsUserDataString());
4692  if(m_pUserData != VMA_NULL)
4693  {
4694  char* const oldStr = (char*)m_pUserData;
4695  const size_t oldStrLen = strlen(oldStr);
4696  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4697  m_pUserData = VMA_NULL;
4698  }
4699 }
4700 
4701 void VmaAllocation_T::BlockAllocMap()
4702 {
4703  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4704 
4705  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4706  {
4707  ++m_MapCount;
4708  }
4709  else
4710  {
4711  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4712  }
4713 }
4714 
4715 void VmaAllocation_T::BlockAllocUnmap()
4716 {
4717  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4718 
4719  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4720  {
4721  --m_MapCount;
4722  }
4723  else
4724  {
4725  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4726  }
4727 }
4728 
4729 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4730 {
4731  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4732 
4733  if(m_MapCount != 0)
4734  {
4735  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4736  {
4737  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4738  *ppData = m_DedicatedAllocation.m_pMappedData;
4739  ++m_MapCount;
4740  return VK_SUCCESS;
4741  }
4742  else
4743  {
4744  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4745  return VK_ERROR_MEMORY_MAP_FAILED;
4746  }
4747  }
4748  else
4749  {
4750  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4751  hAllocator->m_hDevice,
4752  m_DedicatedAllocation.m_hMemory,
4753  0, // offset
4754  VK_WHOLE_SIZE,
4755  0, // flags
4756  ppData);
4757  if(result == VK_SUCCESS)
4758  {
4759  m_DedicatedAllocation.m_pMappedData = *ppData;
4760  m_MapCount = 1;
4761  }
4762  return result;
4763  }
4764 }
4765 
4766 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4767 {
4768  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4769 
4770  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4771  {
4772  --m_MapCount;
4773  if(m_MapCount == 0)
4774  {
4775  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4776  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4777  hAllocator->m_hDevice,
4778  m_DedicatedAllocation.m_hMemory);
4779  }
4780  }
4781  else
4782  {
4783  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4784  }
4785 }
4786 
4787 #if VMA_STATS_STRING_ENABLED
4788 
4789 // Correspond to values of enum VmaSuballocationType.
4790 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4791  "FREE",
4792  "UNKNOWN",
4793  "BUFFER",
4794  "IMAGE_UNKNOWN",
4795  "IMAGE_LINEAR",
4796  "IMAGE_OPTIMAL",
4797 };
4798 
4799 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4800 {
4801  json.BeginObject();
4802 
4803  json.WriteString("Blocks");
4804  json.WriteNumber(stat.blockCount);
4805 
4806  json.WriteString("Allocations");
4807  json.WriteNumber(stat.allocationCount);
4808 
4809  json.WriteString("UnusedRanges");
4810  json.WriteNumber(stat.unusedRangeCount);
4811 
4812  json.WriteString("UsedBytes");
4813  json.WriteNumber(stat.usedBytes);
4814 
4815  json.WriteString("UnusedBytes");
4816  json.WriteNumber(stat.unusedBytes);
4817 
4818  if(stat.allocationCount > 1)
4819  {
4820  json.WriteString("AllocationSize");
4821  json.BeginObject(true);
4822  json.WriteString("Min");
4823  json.WriteNumber(stat.allocationSizeMin);
4824  json.WriteString("Avg");
4825  json.WriteNumber(stat.allocationSizeAvg);
4826  json.WriteString("Max");
4827  json.WriteNumber(stat.allocationSizeMax);
4828  json.EndObject();
4829  }
4830 
4831  if(stat.unusedRangeCount > 1)
4832  {
4833  json.WriteString("UnusedRangeSize");
4834  json.BeginObject(true);
4835  json.WriteString("Min");
4836  json.WriteNumber(stat.unusedRangeSizeMin);
4837  json.WriteString("Avg");
4838  json.WriteNumber(stat.unusedRangeSizeAvg);
4839  json.WriteString("Max");
4840  json.WriteNumber(stat.unusedRangeSizeMax);
4841  json.EndObject();
4842  }
4843 
4844  json.EndObject();
4845 }
4846 
4847 #endif // #if VMA_STATS_STRING_ENABLED
4848 
4849 struct VmaSuballocationItemSizeLess
4850 {
4851  bool operator()(
4852  const VmaSuballocationList::iterator lhs,
4853  const VmaSuballocationList::iterator rhs) const
4854  {
4855  return lhs->size < rhs->size;
4856  }
4857  bool operator()(
4858  const VmaSuballocationList::iterator lhs,
4859  VkDeviceSize rhsSize) const
4860  {
4861  return lhs->size < rhsSize;
4862  }
4863 };
4864 
4866 // class VmaBlockMetadata
4867 
4868 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4869  m_Size(0),
4870  m_FreeCount(0),
4871  m_SumFreeSize(0),
4872  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4873  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4874 {
4875 }
4876 
4877 VmaBlockMetadata::~VmaBlockMetadata()
4878 {
4879 }
4880 
4881 void VmaBlockMetadata::Init(VkDeviceSize size)
4882 {
4883  m_Size = size;
4884  m_FreeCount = 1;
4885  m_SumFreeSize = size;
4886 
4887  VmaSuballocation suballoc = {};
4888  suballoc.offset = 0;
4889  suballoc.size = size;
4890  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4891  suballoc.hAllocation = VK_NULL_HANDLE;
4892 
4893  m_Suballocations.push_back(suballoc);
4894  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4895  --suballocItem;
4896  m_FreeSuballocationsBySize.push_back(suballocItem);
4897 }
4898 
4899 bool VmaBlockMetadata::Validate() const
4900 {
4901  if(m_Suballocations.empty())
4902  {
4903  return false;
4904  }
4905 
4906  // Expected offset of new suballocation as calculates from previous ones.
4907  VkDeviceSize calculatedOffset = 0;
4908  // Expected number of free suballocations as calculated from traversing their list.
4909  uint32_t calculatedFreeCount = 0;
4910  // Expected sum size of free suballocations as calculated from traversing their list.
4911  VkDeviceSize calculatedSumFreeSize = 0;
4912  // Expected number of free suballocations that should be registered in
4913  // m_FreeSuballocationsBySize calculated from traversing their list.
4914  size_t freeSuballocationsToRegister = 0;
4915  // True if previous visisted suballocation was free.
4916  bool prevFree = false;
4917 
4918  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4919  suballocItem != m_Suballocations.cend();
4920  ++suballocItem)
4921  {
4922  const VmaSuballocation& subAlloc = *suballocItem;
4923 
4924  // Actual offset of this suballocation doesn't match expected one.
4925  if(subAlloc.offset != calculatedOffset)
4926  {
4927  return false;
4928  }
4929 
4930  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4931  // Two adjacent free suballocations are invalid. They should be merged.
4932  if(prevFree && currFree)
4933  {
4934  return false;
4935  }
4936 
4937  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4938  {
4939  return false;
4940  }
4941 
4942  if(currFree)
4943  {
4944  calculatedSumFreeSize += subAlloc.size;
4945  ++calculatedFreeCount;
4946  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4947  {
4948  ++freeSuballocationsToRegister;
4949  }
4950  }
4951  else
4952  {
4953  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
4954  {
4955  return false;
4956  }
4957  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
4958  {
4959  return false;
4960  }
4961  }
4962 
4963  calculatedOffset += subAlloc.size;
4964  prevFree = currFree;
4965  }
4966 
4967  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4968  // match expected one.
4969  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4970  {
4971  return false;
4972  }
4973 
4974  VkDeviceSize lastSize = 0;
4975  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4976  {
4977  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4978 
4979  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4980  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4981  {
4982  return false;
4983  }
4984  // They must be sorted by size ascending.
4985  if(suballocItem->size < lastSize)
4986  {
4987  return false;
4988  }
4989 
4990  lastSize = suballocItem->size;
4991  }
4992 
4993  // Check if totals match calculacted values.
4994  if(!ValidateFreeSuballocationList() ||
4995  (calculatedOffset != m_Size) ||
4996  (calculatedSumFreeSize != m_SumFreeSize) ||
4997  (calculatedFreeCount != m_FreeCount))
4998  {
4999  return false;
5000  }
5001 
5002  return true;
5003 }
5004 
5005 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5006 {
5007  if(!m_FreeSuballocationsBySize.empty())
5008  {
5009  return m_FreeSuballocationsBySize.back()->size;
5010  }
5011  else
5012  {
5013  return 0;
5014  }
5015 }
5016 
5017 bool VmaBlockMetadata::IsEmpty() const
5018 {
5019  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5020 }
5021 
5022 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5023 {
5024  outInfo.blockCount = 1;
5025 
5026  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5027  outInfo.allocationCount = rangeCount - m_FreeCount;
5028  outInfo.unusedRangeCount = m_FreeCount;
5029 
5030  outInfo.unusedBytes = m_SumFreeSize;
5031  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5032 
5033  outInfo.allocationSizeMin = UINT64_MAX;
5034  outInfo.allocationSizeMax = 0;
5035  outInfo.unusedRangeSizeMin = UINT64_MAX;
5036  outInfo.unusedRangeSizeMax = 0;
5037 
5038  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5039  suballocItem != m_Suballocations.cend();
5040  ++suballocItem)
5041  {
5042  const VmaSuballocation& suballoc = *suballocItem;
5043  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5044  {
5045  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5046  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5047  }
5048  else
5049  {
5050  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5051  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5052  }
5053  }
5054 }
5055 
5056 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5057 {
5058  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5059 
5060  inoutStats.size += m_Size;
5061  inoutStats.unusedSize += m_SumFreeSize;
5062  inoutStats.allocationCount += rangeCount - m_FreeCount;
5063  inoutStats.unusedRangeCount += m_FreeCount;
5064  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5065 }
5066 
5067 #if VMA_STATS_STRING_ENABLED
5068 
5069 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5070 {
5071  json.BeginObject();
5072 
5073  json.WriteString("TotalBytes");
5074  json.WriteNumber(m_Size);
5075 
5076  json.WriteString("UnusedBytes");
5077  json.WriteNumber(m_SumFreeSize);
5078 
5079  json.WriteString("Allocations");
5080  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5081 
5082  json.WriteString("UnusedRanges");
5083  json.WriteNumber(m_FreeCount);
5084 
5085  json.WriteString("Suballocations");
5086  json.BeginArray();
5087  size_t i = 0;
5088  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5089  suballocItem != m_Suballocations.cend();
5090  ++suballocItem, ++i)
5091  {
5092  json.BeginObject(true);
5093 
5094  json.WriteString("Type");
5095  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5096 
5097  json.WriteString("Size");
5098  json.WriteNumber(suballocItem->size);
5099 
5100  json.WriteString("Offset");
5101  json.WriteNumber(suballocItem->offset);
5102 
5103  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5104  {
5105  const void* pUserData = suballocItem->hAllocation->GetUserData();
5106  if(pUserData != VMA_NULL)
5107  {
5108  json.WriteString("UserData");
5109  if(suballocItem->hAllocation->IsUserDataString())
5110  {
5111  json.WriteString((const char*)pUserData);
5112  }
5113  else
5114  {
5115  json.BeginString();
5116  json.ContinueString_Pointer(pUserData);
5117  json.EndString();
5118  }
5119  }
5120  }
5121 
5122  json.EndObject();
5123  }
5124  json.EndArray();
5125 
5126  json.EndObject();
5127 }
5128 
5129 #endif // #if VMA_STATS_STRING_ENABLED
5130 
5131 /*
5132 How many suitable free suballocations to analyze before choosing best one.
5133 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5134  be chosen.
5135 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5136  suballocations will be analized and best one will be chosen.
5137 - Any other value is also acceptable.
5138 */
5139 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5140 
5141 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5142 {
5143  VMA_ASSERT(IsEmpty());
5144  pAllocationRequest->offset = 0;
5145  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5146  pAllocationRequest->sumItemSize = 0;
5147  pAllocationRequest->item = m_Suballocations.begin();
5148  pAllocationRequest->itemsToMakeLostCount = 0;
5149 }
5150 
5151 bool VmaBlockMetadata::CreateAllocationRequest(
5152  uint32_t currentFrameIndex,
5153  uint32_t frameInUseCount,
5154  VkDeviceSize bufferImageGranularity,
5155  VkDeviceSize allocSize,
5156  VkDeviceSize allocAlignment,
5157  VmaSuballocationType allocType,
5158  bool canMakeOtherLost,
5159  VmaAllocationRequest* pAllocationRequest)
5160 {
5161  VMA_ASSERT(allocSize > 0);
5162  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5163  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5164  VMA_HEAVY_ASSERT(Validate());
5165 
5166  // There is not enough total free space in this block to fullfill the request: Early return.
5167  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5168  {
5169  return false;
5170  }
5171 
5172  // New algorithm, efficiently searching freeSuballocationsBySize.
5173  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5174  if(freeSuballocCount > 0)
5175  {
5176  if(VMA_BEST_FIT)
5177  {
5178  // Find first free suballocation with size not less than allocSize.
5179  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5180  m_FreeSuballocationsBySize.data(),
5181  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5182  allocSize,
5183  VmaSuballocationItemSizeLess());
5184  size_t index = it - m_FreeSuballocationsBySize.data();
5185  for(; index < freeSuballocCount; ++index)
5186  {
5187  if(CheckAllocation(
5188  currentFrameIndex,
5189  frameInUseCount,
5190  bufferImageGranularity,
5191  allocSize,
5192  allocAlignment,
5193  allocType,
5194  m_FreeSuballocationsBySize[index],
5195  false, // canMakeOtherLost
5196  &pAllocationRequest->offset,
5197  &pAllocationRequest->itemsToMakeLostCount,
5198  &pAllocationRequest->sumFreeSize,
5199  &pAllocationRequest->sumItemSize))
5200  {
5201  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5202  return true;
5203  }
5204  }
5205  }
5206  else
5207  {
5208  // Search staring from biggest suballocations.
5209  for(size_t index = freeSuballocCount; index--; )
5210  {
5211  if(CheckAllocation(
5212  currentFrameIndex,
5213  frameInUseCount,
5214  bufferImageGranularity,
5215  allocSize,
5216  allocAlignment,
5217  allocType,
5218  m_FreeSuballocationsBySize[index],
5219  false, // canMakeOtherLost
5220  &pAllocationRequest->offset,
5221  &pAllocationRequest->itemsToMakeLostCount,
5222  &pAllocationRequest->sumFreeSize,
5223  &pAllocationRequest->sumItemSize))
5224  {
5225  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5226  return true;
5227  }
5228  }
5229  }
5230  }
5231 
5232  if(canMakeOtherLost)
5233  {
5234  // Brute-force algorithm. TODO: Come up with something better.
5235 
5236  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5237  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5238 
5239  VmaAllocationRequest tmpAllocRequest = {};
5240  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5241  suballocIt != m_Suballocations.end();
5242  ++suballocIt)
5243  {
5244  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5245  suballocIt->hAllocation->CanBecomeLost())
5246  {
5247  if(CheckAllocation(
5248  currentFrameIndex,
5249  frameInUseCount,
5250  bufferImageGranularity,
5251  allocSize,
5252  allocAlignment,
5253  allocType,
5254  suballocIt,
5255  canMakeOtherLost,
5256  &tmpAllocRequest.offset,
5257  &tmpAllocRequest.itemsToMakeLostCount,
5258  &tmpAllocRequest.sumFreeSize,
5259  &tmpAllocRequest.sumItemSize))
5260  {
5261  tmpAllocRequest.item = suballocIt;
5262 
5263  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5264  {
5265  *pAllocationRequest = tmpAllocRequest;
5266  }
5267  }
5268  }
5269  }
5270 
5271  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5272  {
5273  return true;
5274  }
5275  }
5276 
5277  return false;
5278 }
5279 
5280 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5281  uint32_t currentFrameIndex,
5282  uint32_t frameInUseCount,
5283  VmaAllocationRequest* pAllocationRequest)
5284 {
5285  while(pAllocationRequest->itemsToMakeLostCount > 0)
5286  {
5287  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5288  {
5289  ++pAllocationRequest->item;
5290  }
5291  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5292  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5293  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5294  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5295  {
5296  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5297  --pAllocationRequest->itemsToMakeLostCount;
5298  }
5299  else
5300  {
5301  return false;
5302  }
5303  }
5304 
5305  VMA_HEAVY_ASSERT(Validate());
5306  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5307  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5308 
5309  return true;
5310 }
5311 
5312 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5313 {
5314  uint32_t lostAllocationCount = 0;
5315  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5316  it != m_Suballocations.end();
5317  ++it)
5318  {
5319  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5320  it->hAllocation->CanBecomeLost() &&
5321  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5322  {
5323  it = FreeSuballocation(it);
5324  ++lostAllocationCount;
5325  }
5326  }
5327  return lostAllocationCount;
5328 }
5329 
5330 void VmaBlockMetadata::Alloc(
5331  const VmaAllocationRequest& request,
5332  VmaSuballocationType type,
5333  VkDeviceSize allocSize,
5334  VmaAllocation hAllocation)
5335 {
5336  VMA_ASSERT(request.item != m_Suballocations.end());
5337  VmaSuballocation& suballoc = *request.item;
5338  // Given suballocation is a free block.
5339  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5340  // Given offset is inside this suballocation.
5341  VMA_ASSERT(request.offset >= suballoc.offset);
5342  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5343  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5344  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5345 
5346  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5347  // it to become used.
5348  UnregisterFreeSuballocation(request.item);
5349 
5350  suballoc.offset = request.offset;
5351  suballoc.size = allocSize;
5352  suballoc.type = type;
5353  suballoc.hAllocation = hAllocation;
5354 
5355  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5356  if(paddingEnd)
5357  {
5358  VmaSuballocation paddingSuballoc = {};
5359  paddingSuballoc.offset = request.offset + allocSize;
5360  paddingSuballoc.size = paddingEnd;
5361  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5362  VmaSuballocationList::iterator next = request.item;
5363  ++next;
5364  const VmaSuballocationList::iterator paddingEndItem =
5365  m_Suballocations.insert(next, paddingSuballoc);
5366  RegisterFreeSuballocation(paddingEndItem);
5367  }
5368 
5369  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5370  if(paddingBegin)
5371  {
5372  VmaSuballocation paddingSuballoc = {};
5373  paddingSuballoc.offset = request.offset - paddingBegin;
5374  paddingSuballoc.size = paddingBegin;
5375  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5376  const VmaSuballocationList::iterator paddingBeginItem =
5377  m_Suballocations.insert(request.item, paddingSuballoc);
5378  RegisterFreeSuballocation(paddingBeginItem);
5379  }
5380 
5381  // Update totals.
5382  m_FreeCount = m_FreeCount - 1;
5383  if(paddingBegin > 0)
5384  {
5385  ++m_FreeCount;
5386  }
5387  if(paddingEnd > 0)
5388  {
5389  ++m_FreeCount;
5390  }
5391  m_SumFreeSize -= allocSize;
5392 }
5393 
5394 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5395 {
5396  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5397  suballocItem != m_Suballocations.end();
5398  ++suballocItem)
5399  {
5400  VmaSuballocation& suballoc = *suballocItem;
5401  if(suballoc.hAllocation == allocation)
5402  {
5403  FreeSuballocation(suballocItem);
5404  VMA_HEAVY_ASSERT(Validate());
5405  return;
5406  }
5407  }
5408  VMA_ASSERT(0 && "Not found!");
5409 }
5410 
5411 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5412 {
5413  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5414  suballocItem != m_Suballocations.end();
5415  ++suballocItem)
5416  {
5417  VmaSuballocation& suballoc = *suballocItem;
5418  if(suballoc.offset == offset)
5419  {
5420  FreeSuballocation(suballocItem);
5421  return;
5422  }
5423  }
5424  VMA_ASSERT(0 && "Not found!");
5425 }
5426 
5427 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5428 {
5429  VkDeviceSize lastSize = 0;
5430  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5431  {
5432  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5433 
5434  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5435  {
5436  VMA_ASSERT(0);
5437  return false;
5438  }
5439  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5440  {
5441  VMA_ASSERT(0);
5442  return false;
5443  }
5444  if(it->size < lastSize)
5445  {
5446  VMA_ASSERT(0);
5447  return false;
5448  }
5449 
5450  lastSize = it->size;
5451  }
5452  return true;
5453 }
5454 
5455 bool VmaBlockMetadata::CheckAllocation(
5456  uint32_t currentFrameIndex,
5457  uint32_t frameInUseCount,
5458  VkDeviceSize bufferImageGranularity,
5459  VkDeviceSize allocSize,
5460  VkDeviceSize allocAlignment,
5461  VmaSuballocationType allocType,
5462  VmaSuballocationList::const_iterator suballocItem,
5463  bool canMakeOtherLost,
5464  VkDeviceSize* pOffset,
5465  size_t* itemsToMakeLostCount,
5466  VkDeviceSize* pSumFreeSize,
5467  VkDeviceSize* pSumItemSize) const
5468 {
5469  VMA_ASSERT(allocSize > 0);
5470  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5471  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5472  VMA_ASSERT(pOffset != VMA_NULL);
5473 
5474  *itemsToMakeLostCount = 0;
5475  *pSumFreeSize = 0;
5476  *pSumItemSize = 0;
5477 
5478  if(canMakeOtherLost)
5479  {
5480  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5481  {
5482  *pSumFreeSize = suballocItem->size;
5483  }
5484  else
5485  {
5486  if(suballocItem->hAllocation->CanBecomeLost() &&
5487  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5488  {
5489  ++*itemsToMakeLostCount;
5490  *pSumItemSize = suballocItem->size;
5491  }
5492  else
5493  {
5494  return false;
5495  }
5496  }
5497 
5498  // Remaining size is too small for this request: Early return.
5499  if(m_Size - suballocItem->offset < allocSize)
5500  {
5501  return false;
5502  }
5503 
5504  // Start from offset equal to beginning of this suballocation.
5505  *pOffset = suballocItem->offset;
5506 
5507  // Apply VMA_DEBUG_MARGIN at the beginning.
5508  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5509  {
5510  *pOffset += VMA_DEBUG_MARGIN;
5511  }
5512 
5513  // Apply alignment.
5514  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5515  *pOffset = VmaAlignUp(*pOffset, alignment);
5516 
5517  // Check previous suballocations for BufferImageGranularity conflicts.
5518  // Make bigger alignment if necessary.
5519  if(bufferImageGranularity > 1)
5520  {
5521  bool bufferImageGranularityConflict = false;
5522  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5523  while(prevSuballocItem != m_Suballocations.cbegin())
5524  {
5525  --prevSuballocItem;
5526  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5527  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5528  {
5529  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5530  {
5531  bufferImageGranularityConflict = true;
5532  break;
5533  }
5534  }
5535  else
5536  // Already on previous page.
5537  break;
5538  }
5539  if(bufferImageGranularityConflict)
5540  {
5541  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5542  }
5543  }
5544 
5545  // Now that we have final *pOffset, check if we are past suballocItem.
5546  // If yes, return false - this function should be called for another suballocItem as starting point.
5547  if(*pOffset >= suballocItem->offset + suballocItem->size)
5548  {
5549  return false;
5550  }
5551 
5552  // Calculate padding at the beginning based on current offset.
5553  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5554 
5555  // Calculate required margin at the end if this is not last suballocation.
5556  VmaSuballocationList::const_iterator next = suballocItem;
5557  ++next;
5558  const VkDeviceSize requiredEndMargin =
5559  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5560 
5561  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5562  // Another early return check.
5563  if(suballocItem->offset + totalSize > m_Size)
5564  {
5565  return false;
5566  }
5567 
5568  // Advance lastSuballocItem until desired size is reached.
5569  // Update itemsToMakeLostCount.
5570  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5571  if(totalSize > suballocItem->size)
5572  {
5573  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5574  while(remainingSize > 0)
5575  {
5576  ++lastSuballocItem;
5577  if(lastSuballocItem == m_Suballocations.cend())
5578  {
5579  return false;
5580  }
5581  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5582  {
5583  *pSumFreeSize += lastSuballocItem->size;
5584  }
5585  else
5586  {
5587  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5588  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5589  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5590  {
5591  ++*itemsToMakeLostCount;
5592  *pSumItemSize += lastSuballocItem->size;
5593  }
5594  else
5595  {
5596  return false;
5597  }
5598  }
5599  remainingSize = (lastSuballocItem->size < remainingSize) ?
5600  remainingSize - lastSuballocItem->size : 0;
5601  }
5602  }
5603 
5604  // Check next suballocations for BufferImageGranularity conflicts.
5605  // If conflict exists, we must mark more allocations lost or fail.
5606  if(bufferImageGranularity > 1)
5607  {
5608  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5609  ++nextSuballocItem;
5610  while(nextSuballocItem != m_Suballocations.cend())
5611  {
5612  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5613  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5614  {
5615  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5616  {
5617  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5618  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5619  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5620  {
5621  ++*itemsToMakeLostCount;
5622  }
5623  else
5624  {
5625  return false;
5626  }
5627  }
5628  }
5629  else
5630  {
5631  // Already on next page.
5632  break;
5633  }
5634  ++nextSuballocItem;
5635  }
5636  }
5637  }
5638  else
5639  {
5640  const VmaSuballocation& suballoc = *suballocItem;
5641  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5642 
5643  *pSumFreeSize = suballoc.size;
5644 
5645  // Size of this suballocation is too small for this request: Early return.
5646  if(suballoc.size < allocSize)
5647  {
5648  return false;
5649  }
5650 
5651  // Start from offset equal to beginning of this suballocation.
5652  *pOffset = suballoc.offset;
5653 
5654  // Apply VMA_DEBUG_MARGIN at the beginning.
5655  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5656  {
5657  *pOffset += VMA_DEBUG_MARGIN;
5658  }
5659 
5660  // Apply alignment.
5661  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5662  *pOffset = VmaAlignUp(*pOffset, alignment);
5663 
5664  // Check previous suballocations for BufferImageGranularity conflicts.
5665  // Make bigger alignment if necessary.
5666  if(bufferImageGranularity > 1)
5667  {
5668  bool bufferImageGranularityConflict = false;
5669  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5670  while(prevSuballocItem != m_Suballocations.cbegin())
5671  {
5672  --prevSuballocItem;
5673  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5674  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5675  {
5676  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5677  {
5678  bufferImageGranularityConflict = true;
5679  break;
5680  }
5681  }
5682  else
5683  // Already on previous page.
5684  break;
5685  }
5686  if(bufferImageGranularityConflict)
5687  {
5688  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5689  }
5690  }
5691 
5692  // Calculate padding at the beginning based on current offset.
5693  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5694 
5695  // Calculate required margin at the end if this is not last suballocation.
5696  VmaSuballocationList::const_iterator next = suballocItem;
5697  ++next;
5698  const VkDeviceSize requiredEndMargin =
5699  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5700 
5701  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5702  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5703  {
5704  return false;
5705  }
5706 
5707  // Check next suballocations for BufferImageGranularity conflicts.
5708  // If conflict exists, allocation cannot be made here.
5709  if(bufferImageGranularity > 1)
5710  {
5711  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5712  ++nextSuballocItem;
5713  while(nextSuballocItem != m_Suballocations.cend())
5714  {
5715  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5716  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5717  {
5718  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5719  {
5720  return false;
5721  }
5722  }
5723  else
5724  {
5725  // Already on next page.
5726  break;
5727  }
5728  ++nextSuballocItem;
5729  }
5730  }
5731  }
5732 
5733  // All tests passed: Success. pOffset is already filled.
5734  return true;
5735 }
5736 
5737 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5738 {
5739  VMA_ASSERT(item != m_Suballocations.end());
5740  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5741 
5742  VmaSuballocationList::iterator nextItem = item;
5743  ++nextItem;
5744  VMA_ASSERT(nextItem != m_Suballocations.end());
5745  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5746 
5747  item->size += nextItem->size;
5748  --m_FreeCount;
5749  m_Suballocations.erase(nextItem);
5750 }
5751 
5752 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5753 {
5754  // Change this suballocation to be marked as free.
5755  VmaSuballocation& suballoc = *suballocItem;
5756  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5757  suballoc.hAllocation = VK_NULL_HANDLE;
5758 
5759  // Update totals.
5760  ++m_FreeCount;
5761  m_SumFreeSize += suballoc.size;
5762 
5763  // Merge with previous and/or next suballocation if it's also free.
5764  bool mergeWithNext = false;
5765  bool mergeWithPrev = false;
5766 
5767  VmaSuballocationList::iterator nextItem = suballocItem;
5768  ++nextItem;
5769  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5770  {
5771  mergeWithNext = true;
5772  }
5773 
5774  VmaSuballocationList::iterator prevItem = suballocItem;
5775  if(suballocItem != m_Suballocations.begin())
5776  {
5777  --prevItem;
5778  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5779  {
5780  mergeWithPrev = true;
5781  }
5782  }
5783 
5784  if(mergeWithNext)
5785  {
5786  UnregisterFreeSuballocation(nextItem);
5787  MergeFreeWithNext(suballocItem);
5788  }
5789 
5790  if(mergeWithPrev)
5791  {
5792  UnregisterFreeSuballocation(prevItem);
5793  MergeFreeWithNext(prevItem);
5794  RegisterFreeSuballocation(prevItem);
5795  return prevItem;
5796  }
5797  else
5798  {
5799  RegisterFreeSuballocation(suballocItem);
5800  return suballocItem;
5801  }
5802 }
5803 
5804 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5805 {
5806  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5807  VMA_ASSERT(item->size > 0);
5808 
5809  // You may want to enable this validation at the beginning or at the end of
5810  // this function, depending on what do you want to check.
5811  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5812 
5813  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5814  {
5815  if(m_FreeSuballocationsBySize.empty())
5816  {
5817  m_FreeSuballocationsBySize.push_back(item);
5818  }
5819  else
5820  {
5821  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5822  }
5823  }
5824 
5825  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5826 }
5827 
5828 
5829 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5830 {
5831  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5832  VMA_ASSERT(item->size > 0);
5833 
5834  // You may want to enable this validation at the beginning or at the end of
5835  // this function, depending on what do you want to check.
5836  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5837 
5838  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5839  {
5840  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5841  m_FreeSuballocationsBySize.data(),
5842  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5843  item,
5844  VmaSuballocationItemSizeLess());
5845  for(size_t index = it - m_FreeSuballocationsBySize.data();
5846  index < m_FreeSuballocationsBySize.size();
5847  ++index)
5848  {
5849  if(m_FreeSuballocationsBySize[index] == item)
5850  {
5851  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5852  return;
5853  }
5854  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5855  }
5856  VMA_ASSERT(0 && "Not found.");
5857  }
5858 
5859  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5860 }
5861 
5863 // class VmaDeviceMemoryMapping
5864 
5865 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5866  m_MapCount(0),
5867  m_pMappedData(VMA_NULL)
5868 {
5869 }
5870 
5871 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5872 {
5873  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5874 }
5875 
5876 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
5877 {
5878  if(count == 0)
5879  {
5880  return VK_SUCCESS;
5881  }
5882 
5883  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5884  if(m_MapCount != 0)
5885  {
5886  m_MapCount += count;
5887  VMA_ASSERT(m_pMappedData != VMA_NULL);
5888  if(ppData != VMA_NULL)
5889  {
5890  *ppData = m_pMappedData;
5891  }
5892  return VK_SUCCESS;
5893  }
5894  else
5895  {
5896  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5897  hAllocator->m_hDevice,
5898  hMemory,
5899  0, // offset
5900  VK_WHOLE_SIZE,
5901  0, // flags
5902  &m_pMappedData);
5903  if(result == VK_SUCCESS)
5904  {
5905  if(ppData != VMA_NULL)
5906  {
5907  *ppData = m_pMappedData;
5908  }
5909  m_MapCount = count;
5910  }
5911  return result;
5912  }
5913 }
5914 
5915 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
5916 {
5917  if(count == 0)
5918  {
5919  return;
5920  }
5921 
5922  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5923  if(m_MapCount >= count)
5924  {
5925  m_MapCount -= count;
5926  if(m_MapCount == 0)
5927  {
5928  m_pMappedData = VMA_NULL;
5929  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5930  }
5931  }
5932  else
5933  {
5934  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5935  }
5936 }
5937 
5939 // class VmaDeviceMemoryBlock
5940 
5941 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5942  m_MemoryTypeIndex(UINT32_MAX),
5943  m_hMemory(VK_NULL_HANDLE),
5944  m_Metadata(hAllocator)
5945 {
5946 }
5947 
5948 void VmaDeviceMemoryBlock::Init(
5949  uint32_t newMemoryTypeIndex,
5950  VkDeviceMemory newMemory,
5951  VkDeviceSize newSize)
5952 {
5953  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5954 
5955  m_MemoryTypeIndex = newMemoryTypeIndex;
5956  m_hMemory = newMemory;
5957 
5958  m_Metadata.Init(newSize);
5959 }
5960 
5961 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5962 {
5963  // This is the most important assert in the entire library.
5964  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5965  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5966 
5967  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5968  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5969  m_hMemory = VK_NULL_HANDLE;
5970 }
5971 
5972 bool VmaDeviceMemoryBlock::Validate() const
5973 {
5974  if((m_hMemory == VK_NULL_HANDLE) ||
5975  (m_Metadata.GetSize() == 0))
5976  {
5977  return false;
5978  }
5979 
5980  return m_Metadata.Validate();
5981 }
5982 
5983 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
5984 {
5985  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
5986 }
5987 
5988 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
5989 {
5990  m_Mapping.Unmap(hAllocator, m_hMemory, count);
5991 }
5992 
5993 static void InitStatInfo(VmaStatInfo& outInfo)
5994 {
5995  memset(&outInfo, 0, sizeof(outInfo));
5996  outInfo.allocationSizeMin = UINT64_MAX;
5997  outInfo.unusedRangeSizeMin = UINT64_MAX;
5998 }
5999 
6000 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6001 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6002 {
6003  inoutInfo.blockCount += srcInfo.blockCount;
6004  inoutInfo.allocationCount += srcInfo.allocationCount;
6005  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6006  inoutInfo.usedBytes += srcInfo.usedBytes;
6007  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6008  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6009  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6010  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6011  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6012 }
6013 
6014 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6015 {
6016  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6017  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6018  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6019  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6020 }
6021 
6022 VmaPool_T::VmaPool_T(
6023  VmaAllocator hAllocator,
6024  const VmaPoolCreateInfo& createInfo) :
6025  m_BlockVector(
6026  hAllocator,
6027  createInfo.memoryTypeIndex,
6028  createInfo.blockSize,
6029  createInfo.minBlockCount,
6030  createInfo.maxBlockCount,
6031  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6032  createInfo.frameInUseCount,
6033  true) // isCustomPool
6034 {
6035 }
6036 
6037 VmaPool_T::~VmaPool_T()
6038 {
6039 }
6040 
6041 #if VMA_STATS_STRING_ENABLED
6042 
6043 #endif // #if VMA_STATS_STRING_ENABLED
6044 
6045 VmaBlockVector::VmaBlockVector(
6046  VmaAllocator hAllocator,
6047  uint32_t memoryTypeIndex,
6048  VkDeviceSize preferredBlockSize,
6049  size_t minBlockCount,
6050  size_t maxBlockCount,
6051  VkDeviceSize bufferImageGranularity,
6052  uint32_t frameInUseCount,
6053  bool isCustomPool) :
6054  m_hAllocator(hAllocator),
6055  m_MemoryTypeIndex(memoryTypeIndex),
6056  m_PreferredBlockSize(preferredBlockSize),
6057  m_MinBlockCount(minBlockCount),
6058  m_MaxBlockCount(maxBlockCount),
6059  m_BufferImageGranularity(bufferImageGranularity),
6060  m_FrameInUseCount(frameInUseCount),
6061  m_IsCustomPool(isCustomPool),
6062  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6063  m_HasEmptyBlock(false),
6064  m_pDefragmentator(VMA_NULL)
6065 {
6066 }
6067 
6068 VmaBlockVector::~VmaBlockVector()
6069 {
6070  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6071 
6072  for(size_t i = m_Blocks.size(); i--; )
6073  {
6074  m_Blocks[i]->Destroy(m_hAllocator);
6075  vma_delete(m_hAllocator, m_Blocks[i]);
6076  }
6077 }
6078 
6079 VkResult VmaBlockVector::CreateMinBlocks()
6080 {
6081  for(size_t i = 0; i < m_MinBlockCount; ++i)
6082  {
6083  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6084  if(res != VK_SUCCESS)
6085  {
6086  return res;
6087  }
6088  }
6089  return VK_SUCCESS;
6090 }
6091 
6092 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6093 {
6094  pStats->size = 0;
6095  pStats->unusedSize = 0;
6096  pStats->allocationCount = 0;
6097  pStats->unusedRangeCount = 0;
6098  pStats->unusedRangeSizeMax = 0;
6099 
6100  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6101 
6102  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6103  {
6104  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6105  VMA_ASSERT(pBlock);
6106  VMA_HEAVY_ASSERT(pBlock->Validate());
6107  pBlock->m_Metadata.AddPoolStats(*pStats);
6108  }
6109 }
6110 
6111 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6112 
6113 VkResult VmaBlockVector::Allocate(
6114  VmaPool hCurrentPool,
6115  uint32_t currentFrameIndex,
6116  const VkMemoryRequirements& vkMemReq,
6117  const VmaAllocationCreateInfo& createInfo,
6118  VmaSuballocationType suballocType,
6119  VmaAllocation* pAllocation)
6120 {
6121  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6122  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6123 
6124  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6125 
6126  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6127  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6128  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6129  {
6130  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6131  VMA_ASSERT(pCurrBlock);
6132  VmaAllocationRequest currRequest = {};
6133  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6134  currentFrameIndex,
6135  m_FrameInUseCount,
6136  m_BufferImageGranularity,
6137  vkMemReq.size,
6138  vkMemReq.alignment,
6139  suballocType,
6140  false, // canMakeOtherLost
6141  &currRequest))
6142  {
6143  // Allocate from pCurrBlock.
6144  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6145 
6146  if(mapped)
6147  {
6148  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6149  if(res != VK_SUCCESS)
6150  {
6151  return res;
6152  }
6153  }
6154 
6155  // We no longer have an empty Allocation.
6156  if(pCurrBlock->m_Metadata.IsEmpty())
6157  {
6158  m_HasEmptyBlock = false;
6159  }
6160 
6161  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6162  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6163  (*pAllocation)->InitBlockAllocation(
6164  hCurrentPool,
6165  pCurrBlock,
6166  currRequest.offset,
6167  vkMemReq.alignment,
6168  vkMemReq.size,
6169  suballocType,
6170  mapped,
6171  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6172  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6173  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6174  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6175  return VK_SUCCESS;
6176  }
6177  }
6178 
6179  const bool canCreateNewBlock =
6180  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6181  (m_Blocks.size() < m_MaxBlockCount);
6182 
6183  // 2. Try to create new block.
6184  if(canCreateNewBlock)
6185  {
6186  // Calculate optimal size for new block.
6187  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6188  uint32_t newBlockSizeShift = 0;
6189  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6190 
6191  // Allocating blocks of other sizes is allowed only in default pools.
6192  // In custom pools block size is fixed.
6193  if(m_IsCustomPool == false)
6194  {
6195  // Allocate 1/8, 1/4, 1/2 as first blocks.
6196  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6197  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6198  {
6199  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6200  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6201  {
6202  newBlockSize = smallerNewBlockSize;
6203  ++newBlockSizeShift;
6204  }
6205  else
6206  {
6207  break;
6208  }
6209  }
6210  }
6211 
6212  size_t newBlockIndex = 0;
6213  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6214  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6215  if(m_IsCustomPool == false)
6216  {
6217  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6218  {
6219  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6220  if(smallerNewBlockSize >= vkMemReq.size)
6221  {
6222  newBlockSize = smallerNewBlockSize;
6223  ++newBlockSizeShift;
6224  res = CreateBlock(newBlockSize, &newBlockIndex);
6225  }
6226  else
6227  {
6228  break;
6229  }
6230  }
6231  }
6232 
6233  if(res == VK_SUCCESS)
6234  {
6235  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6236  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6237 
6238  if(mapped)
6239  {
6240  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6241  if(res != VK_SUCCESS)
6242  {
6243  return res;
6244  }
6245  }
6246 
6247  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6248  VmaAllocationRequest allocRequest;
6249  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6250  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6251  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6252  (*pAllocation)->InitBlockAllocation(
6253  hCurrentPool,
6254  pBlock,
6255  allocRequest.offset,
6256  vkMemReq.alignment,
6257  vkMemReq.size,
6258  suballocType,
6259  mapped,
6260  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6261  VMA_HEAVY_ASSERT(pBlock->Validate());
6262  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6263  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6264  return VK_SUCCESS;
6265  }
6266  }
6267 
6268  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6269 
6270  // 3. Try to allocate from existing blocks with making other allocations lost.
6271  if(canMakeOtherLost)
6272  {
6273  uint32_t tryIndex = 0;
6274  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6275  {
6276  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6277  VmaAllocationRequest bestRequest = {};
6278  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6279 
6280  // 1. Search existing allocations.
6281  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6282  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6283  {
6284  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6285  VMA_ASSERT(pCurrBlock);
6286  VmaAllocationRequest currRequest = {};
6287  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6288  currentFrameIndex,
6289  m_FrameInUseCount,
6290  m_BufferImageGranularity,
6291  vkMemReq.size,
6292  vkMemReq.alignment,
6293  suballocType,
6294  canMakeOtherLost,
6295  &currRequest))
6296  {
6297  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6298  if(pBestRequestBlock == VMA_NULL ||
6299  currRequestCost < bestRequestCost)
6300  {
6301  pBestRequestBlock = pCurrBlock;
6302  bestRequest = currRequest;
6303  bestRequestCost = currRequestCost;
6304 
6305  if(bestRequestCost == 0)
6306  {
6307  break;
6308  }
6309  }
6310  }
6311  }
6312 
6313  if(pBestRequestBlock != VMA_NULL)
6314  {
6315  if(mapped)
6316  {
6317  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6318  if(res != VK_SUCCESS)
6319  {
6320  return res;
6321  }
6322  }
6323 
6324  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6325  currentFrameIndex,
6326  m_FrameInUseCount,
6327  &bestRequest))
6328  {
6329  // We no longer have an empty Allocation.
6330  if(pBestRequestBlock->m_Metadata.IsEmpty())
6331  {
6332  m_HasEmptyBlock = false;
6333  }
6334  // Allocate from this pBlock.
6335  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6336  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6337  (*pAllocation)->InitBlockAllocation(
6338  hCurrentPool,
6339  pBestRequestBlock,
6340  bestRequest.offset,
6341  vkMemReq.alignment,
6342  vkMemReq.size,
6343  suballocType,
6344  mapped,
6345  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6346  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6347  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6348  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6349  return VK_SUCCESS;
6350  }
6351  // else: Some allocations must have been touched while we are here. Next try.
6352  }
6353  else
6354  {
6355  // Could not find place in any of the blocks - break outer loop.
6356  break;
6357  }
6358  }
6359  /* Maximum number of tries exceeded - a very unlike event when many other
6360  threads are simultaneously touching allocations making it impossible to make
6361  lost at the same time as we try to allocate. */
6362  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6363  {
6364  return VK_ERROR_TOO_MANY_OBJECTS;
6365  }
6366  }
6367 
6368  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6369 }
6370 
6371 void VmaBlockVector::Free(
6372  VmaAllocation hAllocation)
6373 {
6374  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6375 
6376  // Scope for lock.
6377  {
6378  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6379 
6380  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6381 
6382  if(hAllocation->IsPersistentMap())
6383  {
6384  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6385  }
6386 
6387  pBlock->m_Metadata.Free(hAllocation);
6388  VMA_HEAVY_ASSERT(pBlock->Validate());
6389 
6390  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6391 
6392  // pBlock became empty after this deallocation.
6393  if(pBlock->m_Metadata.IsEmpty())
6394  {
6395  // Already has empty Allocation. We don't want to have two, so delete this one.
6396  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6397  {
6398  pBlockToDelete = pBlock;
6399  Remove(pBlock);
6400  }
6401  // We now have first empty Allocation.
6402  else
6403  {
6404  m_HasEmptyBlock = true;
6405  }
6406  }
6407  // pBlock didn't become empty, but we have another empty block - find and free that one.
6408  // (This is optional, heuristics.)
6409  else if(m_HasEmptyBlock)
6410  {
6411  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6412  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6413  {
6414  pBlockToDelete = pLastBlock;
6415  m_Blocks.pop_back();
6416  m_HasEmptyBlock = false;
6417  }
6418  }
6419 
6420  IncrementallySortBlocks();
6421  }
6422 
6423  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6424  // lock, for performance reason.
6425  if(pBlockToDelete != VMA_NULL)
6426  {
6427  VMA_DEBUG_LOG(" Deleted empty allocation");
6428  pBlockToDelete->Destroy(m_hAllocator);
6429  vma_delete(m_hAllocator, pBlockToDelete);
6430  }
6431 }
6432 
6433 size_t VmaBlockVector::CalcMaxBlockSize() const
6434 {
6435  size_t result = 0;
6436  for(size_t i = m_Blocks.size(); i--; )
6437  {
6438  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6439  if(result >= m_PreferredBlockSize)
6440  {
6441  break;
6442  }
6443  }
6444  return result;
6445 }
6446 
6447 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6448 {
6449  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6450  {
6451  if(m_Blocks[blockIndex] == pBlock)
6452  {
6453  VmaVectorRemove(m_Blocks, blockIndex);
6454  return;
6455  }
6456  }
6457  VMA_ASSERT(0);
6458 }
6459 
6460 void VmaBlockVector::IncrementallySortBlocks()
6461 {
6462  // Bubble sort only until first swap.
6463  for(size_t i = 1; i < m_Blocks.size(); ++i)
6464  {
6465  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6466  {
6467  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6468  return;
6469  }
6470  }
6471 }
6472 
6473 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6474 {
6475  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6476  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6477  allocInfo.allocationSize = blockSize;
6478  VkDeviceMemory mem = VK_NULL_HANDLE;
6479  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6480  if(res < 0)
6481  {
6482  return res;
6483  }
6484 
6485  // New VkDeviceMemory successfully created.
6486 
6487  // Create new Allocation for it.
6488  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6489  pBlock->Init(
6490  m_MemoryTypeIndex,
6491  mem,
6492  allocInfo.allocationSize);
6493 
6494  m_Blocks.push_back(pBlock);
6495  if(pNewBlockIndex != VMA_NULL)
6496  {
6497  *pNewBlockIndex = m_Blocks.size() - 1;
6498  }
6499 
6500  return VK_SUCCESS;
6501 }
6502 
6503 #if VMA_STATS_STRING_ENABLED
6504 
6505 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6506 {
6507  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6508 
6509  json.BeginObject();
6510 
6511  if(m_IsCustomPool)
6512  {
6513  json.WriteString("MemoryTypeIndex");
6514  json.WriteNumber(m_MemoryTypeIndex);
6515 
6516  json.WriteString("BlockSize");
6517  json.WriteNumber(m_PreferredBlockSize);
6518 
6519  json.WriteString("BlockCount");
6520  json.BeginObject(true);
6521  if(m_MinBlockCount > 0)
6522  {
6523  json.WriteString("Min");
6524  json.WriteNumber((uint64_t)m_MinBlockCount);
6525  }
6526  if(m_MaxBlockCount < SIZE_MAX)
6527  {
6528  json.WriteString("Max");
6529  json.WriteNumber((uint64_t)m_MaxBlockCount);
6530  }
6531  json.WriteString("Cur");
6532  json.WriteNumber((uint64_t)m_Blocks.size());
6533  json.EndObject();
6534 
6535  if(m_FrameInUseCount > 0)
6536  {
6537  json.WriteString("FrameInUseCount");
6538  json.WriteNumber(m_FrameInUseCount);
6539  }
6540  }
6541  else
6542  {
6543  json.WriteString("PreferredBlockSize");
6544  json.WriteNumber(m_PreferredBlockSize);
6545  }
6546 
6547  json.WriteString("Blocks");
6548  json.BeginArray();
6549  for(size_t i = 0; i < m_Blocks.size(); ++i)
6550  {
6551  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6552  }
6553  json.EndArray();
6554 
6555  json.EndObject();
6556 }
6557 
6558 #endif // #if VMA_STATS_STRING_ENABLED
6559 
6560 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6561  VmaAllocator hAllocator,
6562  uint32_t currentFrameIndex)
6563 {
6564  if(m_pDefragmentator == VMA_NULL)
6565  {
6566  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6567  hAllocator,
6568  this,
6569  currentFrameIndex);
6570  }
6571 
6572  return m_pDefragmentator;
6573 }
6574 
6575 VkResult VmaBlockVector::Defragment(
6576  VmaDefragmentationStats* pDefragmentationStats,
6577  VkDeviceSize& maxBytesToMove,
6578  uint32_t& maxAllocationsToMove)
6579 {
6580  if(m_pDefragmentator == VMA_NULL)
6581  {
6582  return VK_SUCCESS;
6583  }
6584 
6585  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6586 
6587  // Defragment.
6588  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6589 
6590  // Accumulate statistics.
6591  if(pDefragmentationStats != VMA_NULL)
6592  {
6593  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6594  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6595  pDefragmentationStats->bytesMoved += bytesMoved;
6596  pDefragmentationStats->allocationsMoved += allocationsMoved;
6597  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6598  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6599  maxBytesToMove -= bytesMoved;
6600  maxAllocationsToMove -= allocationsMoved;
6601  }
6602 
6603  // Free empty blocks.
6604  m_HasEmptyBlock = false;
6605  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6606  {
6607  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6608  if(pBlock->m_Metadata.IsEmpty())
6609  {
6610  if(m_Blocks.size() > m_MinBlockCount)
6611  {
6612  if(pDefragmentationStats != VMA_NULL)
6613  {
6614  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6615  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6616  }
6617 
6618  VmaVectorRemove(m_Blocks, blockIndex);
6619  pBlock->Destroy(m_hAllocator);
6620  vma_delete(m_hAllocator, pBlock);
6621  }
6622  else
6623  {
6624  m_HasEmptyBlock = true;
6625  }
6626  }
6627  }
6628 
6629  return result;
6630 }
6631 
6632 void VmaBlockVector::DestroyDefragmentator()
6633 {
6634  if(m_pDefragmentator != VMA_NULL)
6635  {
6636  vma_delete(m_hAllocator, m_pDefragmentator);
6637  m_pDefragmentator = VMA_NULL;
6638  }
6639 }
6640 
6641 void VmaBlockVector::MakePoolAllocationsLost(
6642  uint32_t currentFrameIndex,
6643  size_t* pLostAllocationCount)
6644 {
6645  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6646  size_t lostAllocationCount = 0;
6647  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6648  {
6649  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6650  VMA_ASSERT(pBlock);
6651  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6652  }
6653  if(pLostAllocationCount != VMA_NULL)
6654  {
6655  *pLostAllocationCount = lostAllocationCount;
6656  }
6657 }
6658 
6659 void VmaBlockVector::AddStats(VmaStats* pStats)
6660 {
6661  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6662  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6663 
6664  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6665 
6666  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6667  {
6668  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6669  VMA_ASSERT(pBlock);
6670  VMA_HEAVY_ASSERT(pBlock->Validate());
6671  VmaStatInfo allocationStatInfo;
6672  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6673  VmaAddStatInfo(pStats->total, allocationStatInfo);
6674  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6675  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6676  }
6677 }
6678 
6680 // VmaDefragmentator members definition
6681 
6682 VmaDefragmentator::VmaDefragmentator(
6683  VmaAllocator hAllocator,
6684  VmaBlockVector* pBlockVector,
6685  uint32_t currentFrameIndex) :
6686  m_hAllocator(hAllocator),
6687  m_pBlockVector(pBlockVector),
6688  m_CurrentFrameIndex(currentFrameIndex),
6689  m_BytesMoved(0),
6690  m_AllocationsMoved(0),
6691  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6692  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6693 {
6694 }
6695 
6696 VmaDefragmentator::~VmaDefragmentator()
6697 {
6698  for(size_t i = m_Blocks.size(); i--; )
6699  {
6700  vma_delete(m_hAllocator, m_Blocks[i]);
6701  }
6702 }
6703 
6704 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6705 {
6706  AllocationInfo allocInfo;
6707  allocInfo.m_hAllocation = hAlloc;
6708  allocInfo.m_pChanged = pChanged;
6709  m_Allocations.push_back(allocInfo);
6710 }
6711 
6712 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6713 {
6714  // It has already been mapped for defragmentation.
6715  if(m_pMappedDataForDefragmentation)
6716  {
6717  *ppMappedData = m_pMappedDataForDefragmentation;
6718  return VK_SUCCESS;
6719  }
6720 
6721  // It is originally mapped.
6722  if(m_pBlock->m_Mapping.GetMappedData())
6723  {
6724  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6725  return VK_SUCCESS;
6726  }
6727 
6728  // Map on first usage.
6729  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6730  *ppMappedData = m_pMappedDataForDefragmentation;
6731  return res;
6732 }
6733 
6734 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6735 {
6736  if(m_pMappedDataForDefragmentation != VMA_NULL)
6737  {
6738  m_pBlock->Unmap(hAllocator, 1);
6739  }
6740 }
6741 
6742 VkResult VmaDefragmentator::DefragmentRound(
6743  VkDeviceSize maxBytesToMove,
6744  uint32_t maxAllocationsToMove)
6745 {
6746  if(m_Blocks.empty())
6747  {
6748  return VK_SUCCESS;
6749  }
6750 
6751  size_t srcBlockIndex = m_Blocks.size() - 1;
6752  size_t srcAllocIndex = SIZE_MAX;
6753  for(;;)
6754  {
6755  // 1. Find next allocation to move.
6756  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6757  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6758  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6759  {
6760  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6761  {
6762  // Finished: no more allocations to process.
6763  if(srcBlockIndex == 0)
6764  {
6765  return VK_SUCCESS;
6766  }
6767  else
6768  {
6769  --srcBlockIndex;
6770  srcAllocIndex = SIZE_MAX;
6771  }
6772  }
6773  else
6774  {
6775  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6776  }
6777  }
6778 
6779  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6780  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6781 
6782  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6783  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6784  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6785  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6786 
6787  // 2. Try to find new place for this allocation in preceding or current block.
6788  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6789  {
6790  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6791  VmaAllocationRequest dstAllocRequest;
6792  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6793  m_CurrentFrameIndex,
6794  m_pBlockVector->GetFrameInUseCount(),
6795  m_pBlockVector->GetBufferImageGranularity(),
6796  size,
6797  alignment,
6798  suballocType,
6799  false, // canMakeOtherLost
6800  &dstAllocRequest) &&
6801  MoveMakesSense(
6802  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6803  {
6804  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6805 
6806  // Reached limit on number of allocations or bytes to move.
6807  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6808  (m_BytesMoved + size > maxBytesToMove))
6809  {
6810  return VK_INCOMPLETE;
6811  }
6812 
6813  void* pDstMappedData = VMA_NULL;
6814  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6815  if(res != VK_SUCCESS)
6816  {
6817  return res;
6818  }
6819 
6820  void* pSrcMappedData = VMA_NULL;
6821  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6822  if(res != VK_SUCCESS)
6823  {
6824  return res;
6825  }
6826 
6827  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6828  memcpy(
6829  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6830  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6831  static_cast<size_t>(size));
6832 
6833  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6834  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6835 
6836  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6837 
6838  if(allocInfo.m_pChanged != VMA_NULL)
6839  {
6840  *allocInfo.m_pChanged = VK_TRUE;
6841  }
6842 
6843  ++m_AllocationsMoved;
6844  m_BytesMoved += size;
6845 
6846  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6847 
6848  break;
6849  }
6850  }
6851 
6852  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6853 
6854  if(srcAllocIndex > 0)
6855  {
6856  --srcAllocIndex;
6857  }
6858  else
6859  {
6860  if(srcBlockIndex > 0)
6861  {
6862  --srcBlockIndex;
6863  srcAllocIndex = SIZE_MAX;
6864  }
6865  else
6866  {
6867  return VK_SUCCESS;
6868  }
6869  }
6870  }
6871 }
6872 
6873 VkResult VmaDefragmentator::Defragment(
6874  VkDeviceSize maxBytesToMove,
6875  uint32_t maxAllocationsToMove)
6876 {
6877  if(m_Allocations.empty())
6878  {
6879  return VK_SUCCESS;
6880  }
6881 
6882  // Create block info for each block.
6883  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6884  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6885  {
6886  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6887  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6888  m_Blocks.push_back(pBlockInfo);
6889  }
6890 
6891  // Sort them by m_pBlock pointer value.
6892  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6893 
6894  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6895  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6896  {
6897  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6898  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6899  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6900  {
6901  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6902  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6903  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6904  {
6905  (*it)->m_Allocations.push_back(allocInfo);
6906  }
6907  else
6908  {
6909  VMA_ASSERT(0);
6910  }
6911  }
6912  }
6913  m_Allocations.clear();
6914 
6915  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6916  {
6917  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6918  pBlockInfo->CalcHasNonMovableAllocations();
6919  pBlockInfo->SortAllocationsBySizeDescecnding();
6920  }
6921 
6922  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6923  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6924 
6925  // Execute defragmentation rounds (the main part).
6926  VkResult result = VK_SUCCESS;
6927  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6928  {
6929  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6930  }
6931 
6932  // Unmap blocks that were mapped for defragmentation.
6933  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6934  {
6935  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6936  }
6937 
6938  return result;
6939 }
6940 
6941 bool VmaDefragmentator::MoveMakesSense(
6942  size_t dstBlockIndex, VkDeviceSize dstOffset,
6943  size_t srcBlockIndex, VkDeviceSize srcOffset)
6944 {
6945  if(dstBlockIndex < srcBlockIndex)
6946  {
6947  return true;
6948  }
6949  if(dstBlockIndex > srcBlockIndex)
6950  {
6951  return false;
6952  }
6953  if(dstOffset < srcOffset)
6954  {
6955  return true;
6956  }
6957  return false;
6958 }
6959 
6961 // VmaAllocator_T
6962 
6963 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6964  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6965  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6966  m_hDevice(pCreateInfo->device),
6967  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6968  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6969  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6970  m_PreferredLargeHeapBlockSize(0),
6971  m_PhysicalDevice(pCreateInfo->physicalDevice),
6972  m_CurrentFrameIndex(0),
6973  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6974 {
6975  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6976 
6977  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6978  memset(&m_MemProps, 0, sizeof(m_MemProps));
6979  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6980 
6981  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6982  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6983 
6984  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6985  {
6986  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6987  }
6988 
6989  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6990  {
6991  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6992  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6993  }
6994 
6995  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6996 
6997  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6998  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6999 
7000  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7001  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7002 
7003  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7004  {
7005  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7006  {
7007  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7008  if(limit != VK_WHOLE_SIZE)
7009  {
7010  m_HeapSizeLimit[heapIndex] = limit;
7011  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7012  {
7013  m_MemProps.memoryHeaps[heapIndex].size = limit;
7014  }
7015  }
7016  }
7017  }
7018 
7019  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7020  {
7021  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7022 
7023  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7024  this,
7025  memTypeIndex,
7026  preferredBlockSize,
7027  0,
7028  SIZE_MAX,
7029  GetBufferImageGranularity(),
7030  pCreateInfo->frameInUseCount,
7031  false); // isCustomPool
7032  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7033  // becase minBlockCount is 0.
7034  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7035  }
7036 }
7037 
7038 VmaAllocator_T::~VmaAllocator_T()
7039 {
7040  VMA_ASSERT(m_Pools.empty());
7041 
7042  for(size_t i = GetMemoryTypeCount(); i--; )
7043  {
7044  vma_delete(this, m_pDedicatedAllocations[i]);
7045  vma_delete(this, m_pBlockVectors[i]);
7046  }
7047 }
7048 
7049 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7050 {
7051 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7052  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7053  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7054  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7055  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7056  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7057  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7058  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7059  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7060  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7061  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7062  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7063  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7064  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7065  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7066  if(m_UseKhrDedicatedAllocation)
7067  {
7068  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7069  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7070  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7071  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7072  }
7073 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7074 
7075 #define VMA_COPY_IF_NOT_NULL(funcName) \
7076  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7077 
7078  if(pVulkanFunctions != VMA_NULL)
7079  {
7080  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7081  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7082  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7083  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7084  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7085  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7086  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7087  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7088  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7089  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7090  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7091  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7092  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7093  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7094  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7095  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7096  }
7097 
7098 #undef VMA_COPY_IF_NOT_NULL
7099 
7100  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7101  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7102  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7103  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7104  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7105  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7106  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7107  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7108  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7109  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7110  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7111  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7112  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7113  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7114  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7115  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7116  if(m_UseKhrDedicatedAllocation)
7117  {
7118  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7119  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7120  }
7121 }
7122 
7123 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7124 {
7125  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7126  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7127  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7128  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7129 }
7130 
7131 VkResult VmaAllocator_T::AllocateMemoryOfType(
7132  const VkMemoryRequirements& vkMemReq,
7133  bool dedicatedAllocation,
7134  VkBuffer dedicatedBuffer,
7135  VkImage dedicatedImage,
7136  const VmaAllocationCreateInfo& createInfo,
7137  uint32_t memTypeIndex,
7138  VmaSuballocationType suballocType,
7139  VmaAllocation* pAllocation)
7140 {
7141  VMA_ASSERT(pAllocation != VMA_NULL);
7142  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7143 
7144  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7145 
7146  // If memory type is not HOST_VISIBLE, disable MAPPED.
7147  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7148  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7149  {
7150  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7151  }
7152 
7153  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7154  VMA_ASSERT(blockVector);
7155 
7156  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7157  bool preferDedicatedMemory =
7158  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7159  dedicatedAllocation ||
7160  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7161  vkMemReq.size > preferredBlockSize / 2;
7162 
7163  if(preferDedicatedMemory &&
7164  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7165  finalCreateInfo.pool == VK_NULL_HANDLE)
7166  {
7168  }
7169 
7170  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7171  {
7172  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7173  {
7174  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7175  }
7176  else
7177  {
7178  return AllocateDedicatedMemory(
7179  vkMemReq.size,
7180  suballocType,
7181  memTypeIndex,
7182  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7183  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7184  finalCreateInfo.pUserData,
7185  dedicatedBuffer,
7186  dedicatedImage,
7187  pAllocation);
7188  }
7189  }
7190  else
7191  {
7192  VkResult res = blockVector->Allocate(
7193  VK_NULL_HANDLE, // hCurrentPool
7194  m_CurrentFrameIndex.load(),
7195  vkMemReq,
7196  finalCreateInfo,
7197  suballocType,
7198  pAllocation);
7199  if(res == VK_SUCCESS)
7200  {
7201  return res;
7202  }
7203 
7204  // 5. Try dedicated memory.
7205  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7206  {
7207  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7208  }
7209  else
7210  {
7211  res = AllocateDedicatedMemory(
7212  vkMemReq.size,
7213  suballocType,
7214  memTypeIndex,
7215  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7216  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7217  finalCreateInfo.pUserData,
7218  dedicatedBuffer,
7219  dedicatedImage,
7220  pAllocation);
7221  if(res == VK_SUCCESS)
7222  {
7223  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7224  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7225  return VK_SUCCESS;
7226  }
7227  else
7228  {
7229  // Everything failed: Return error code.
7230  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7231  return res;
7232  }
7233  }
7234  }
7235 }
7236 
7237 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7238  VkDeviceSize size,
7239  VmaSuballocationType suballocType,
7240  uint32_t memTypeIndex,
7241  bool map,
7242  bool isUserDataString,
7243  void* pUserData,
7244  VkBuffer dedicatedBuffer,
7245  VkImage dedicatedImage,
7246  VmaAllocation* pAllocation)
7247 {
7248  VMA_ASSERT(pAllocation);
7249 
7250  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7251  allocInfo.memoryTypeIndex = memTypeIndex;
7252  allocInfo.allocationSize = size;
7253 
7254  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7255  if(m_UseKhrDedicatedAllocation)
7256  {
7257  if(dedicatedBuffer != VK_NULL_HANDLE)
7258  {
7259  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7260  dedicatedAllocInfo.buffer = dedicatedBuffer;
7261  allocInfo.pNext = &dedicatedAllocInfo;
7262  }
7263  else if(dedicatedImage != VK_NULL_HANDLE)
7264  {
7265  dedicatedAllocInfo.image = dedicatedImage;
7266  allocInfo.pNext = &dedicatedAllocInfo;
7267  }
7268  }
7269 
7270  // Allocate VkDeviceMemory.
7271  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7272  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7273  if(res < 0)
7274  {
7275  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7276  return res;
7277  }
7278 
7279  void* pMappedData = VMA_NULL;
7280  if(map)
7281  {
7282  res = (*m_VulkanFunctions.vkMapMemory)(
7283  m_hDevice,
7284  hMemory,
7285  0,
7286  VK_WHOLE_SIZE,
7287  0,
7288  &pMappedData);
7289  if(res < 0)
7290  {
7291  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7292  FreeVulkanMemory(memTypeIndex, size, hMemory);
7293  return res;
7294  }
7295  }
7296 
7297  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7298  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7299  (*pAllocation)->SetUserData(this, pUserData);
7300 
7301  // Register it in m_pDedicatedAllocations.
7302  {
7303  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7304  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7305  VMA_ASSERT(pDedicatedAllocations);
7306  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7307  }
7308 
7309  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7310 
7311  return VK_SUCCESS;
7312 }
7313 
7314 void VmaAllocator_T::GetBufferMemoryRequirements(
7315  VkBuffer hBuffer,
7316  VkMemoryRequirements& memReq,
7317  bool& requiresDedicatedAllocation,
7318  bool& prefersDedicatedAllocation) const
7319 {
7320  if(m_UseKhrDedicatedAllocation)
7321  {
7322  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7323  memReqInfo.buffer = hBuffer;
7324 
7325  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7326 
7327  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7328  memReq2.pNext = &memDedicatedReq;
7329 
7330  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7331 
7332  memReq = memReq2.memoryRequirements;
7333  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7334  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7335  }
7336  else
7337  {
7338  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7339  requiresDedicatedAllocation = false;
7340  prefersDedicatedAllocation = false;
7341  }
7342 }
7343 
7344 void VmaAllocator_T::GetImageMemoryRequirements(
7345  VkImage hImage,
7346  VkMemoryRequirements& memReq,
7347  bool& requiresDedicatedAllocation,
7348  bool& prefersDedicatedAllocation) const
7349 {
7350  if(m_UseKhrDedicatedAllocation)
7351  {
7352  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7353  memReqInfo.image = hImage;
7354 
7355  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7356 
7357  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7358  memReq2.pNext = &memDedicatedReq;
7359 
7360  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7361 
7362  memReq = memReq2.memoryRequirements;
7363  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7364  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7365  }
7366  else
7367  {
7368  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7369  requiresDedicatedAllocation = false;
7370  prefersDedicatedAllocation = false;
7371  }
7372 }
7373 
7374 VkResult VmaAllocator_T::AllocateMemory(
7375  const VkMemoryRequirements& vkMemReq,
7376  bool requiresDedicatedAllocation,
7377  bool prefersDedicatedAllocation,
7378  VkBuffer dedicatedBuffer,
7379  VkImage dedicatedImage,
7380  const VmaAllocationCreateInfo& createInfo,
7381  VmaSuballocationType suballocType,
7382  VmaAllocation* pAllocation)
7383 {
7384  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7385  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7386  {
7387  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7388  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7389  }
7390  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7392  {
7393  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7394  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7395  }
7396  if(requiresDedicatedAllocation)
7397  {
7398  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7399  {
7400  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7401  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7402  }
7403  if(createInfo.pool != VK_NULL_HANDLE)
7404  {
7405  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7406  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7407  }
7408  }
7409  if((createInfo.pool != VK_NULL_HANDLE) &&
7410  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7411  {
7412  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7413  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7414  }
7415 
7416  if(createInfo.pool != VK_NULL_HANDLE)
7417  {
7418  return createInfo.pool->m_BlockVector.Allocate(
7419  createInfo.pool,
7420  m_CurrentFrameIndex.load(),
7421  vkMemReq,
7422  createInfo,
7423  suballocType,
7424  pAllocation);
7425  }
7426  else
7427  {
7428  // Bit mask of memory Vulkan types acceptable for this allocation.
7429  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7430  uint32_t memTypeIndex = UINT32_MAX;
7431  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7432  if(res == VK_SUCCESS)
7433  {
7434  res = AllocateMemoryOfType(
7435  vkMemReq,
7436  requiresDedicatedAllocation || prefersDedicatedAllocation,
7437  dedicatedBuffer,
7438  dedicatedImage,
7439  createInfo,
7440  memTypeIndex,
7441  suballocType,
7442  pAllocation);
7443  // Succeeded on first try.
7444  if(res == VK_SUCCESS)
7445  {
7446  return res;
7447  }
7448  // Allocation from this memory type failed. Try other compatible memory types.
7449  else
7450  {
7451  for(;;)
7452  {
7453  // Remove old memTypeIndex from list of possibilities.
7454  memoryTypeBits &= ~(1u << memTypeIndex);
7455  // Find alternative memTypeIndex.
7456  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7457  if(res == VK_SUCCESS)
7458  {
7459  res = AllocateMemoryOfType(
7460  vkMemReq,
7461  requiresDedicatedAllocation || prefersDedicatedAllocation,
7462  dedicatedBuffer,
7463  dedicatedImage,
7464  createInfo,
7465  memTypeIndex,
7466  suballocType,
7467  pAllocation);
7468  // Allocation from this alternative memory type succeeded.
7469  if(res == VK_SUCCESS)
7470  {
7471  return res;
7472  }
7473  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7474  }
7475  // No other matching memory type index could be found.
7476  else
7477  {
7478  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7479  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7480  }
7481  }
7482  }
7483  }
7484  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7485  else
7486  return res;
7487  }
7488 }
7489 
7490 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7491 {
7492  VMA_ASSERT(allocation);
7493 
7494  if(allocation->CanBecomeLost() == false ||
7495  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7496  {
7497  switch(allocation->GetType())
7498  {
7499  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7500  {
7501  VmaBlockVector* pBlockVector = VMA_NULL;
7502  VmaPool hPool = allocation->GetPool();
7503  if(hPool != VK_NULL_HANDLE)
7504  {
7505  pBlockVector = &hPool->m_BlockVector;
7506  }
7507  else
7508  {
7509  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7510  pBlockVector = m_pBlockVectors[memTypeIndex];
7511  }
7512  pBlockVector->Free(allocation);
7513  }
7514  break;
7515  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7516  FreeDedicatedMemory(allocation);
7517  break;
7518  default:
7519  VMA_ASSERT(0);
7520  }
7521  }
7522 
7523  allocation->SetUserData(this, VMA_NULL);
7524  vma_delete(this, allocation);
7525 }
7526 
7527 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7528 {
7529  // Initialize.
7530  InitStatInfo(pStats->total);
7531  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7532  InitStatInfo(pStats->memoryType[i]);
7533  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7534  InitStatInfo(pStats->memoryHeap[i]);
7535 
7536  // Process default pools.
7537  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7538  {
7539  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7540  VMA_ASSERT(pBlockVector);
7541  pBlockVector->AddStats(pStats);
7542  }
7543 
7544  // Process custom pools.
7545  {
7546  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7547  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7548  {
7549  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7550  }
7551  }
7552 
7553  // Process dedicated allocations.
7554  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7555  {
7556  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7557  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7558  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7559  VMA_ASSERT(pDedicatedAllocVector);
7560  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7561  {
7562  VmaStatInfo allocationStatInfo;
7563  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7564  VmaAddStatInfo(pStats->total, allocationStatInfo);
7565  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7566  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7567  }
7568  }
7569 
7570  // Postprocess.
7571  VmaPostprocessCalcStatInfo(pStats->total);
7572  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7573  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7574  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7575  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7576 }
7577 
7578 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7579 
7580 VkResult VmaAllocator_T::Defragment(
7581  VmaAllocation* pAllocations,
7582  size_t allocationCount,
7583  VkBool32* pAllocationsChanged,
7584  const VmaDefragmentationInfo* pDefragmentationInfo,
7585  VmaDefragmentationStats* pDefragmentationStats)
7586 {
7587  if(pAllocationsChanged != VMA_NULL)
7588  {
7589  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7590  }
7591  if(pDefragmentationStats != VMA_NULL)
7592  {
7593  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7594  }
7595 
7596  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7597 
7598  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7599 
7600  const size_t poolCount = m_Pools.size();
7601 
7602  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7603  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7604  {
7605  VmaAllocation hAlloc = pAllocations[allocIndex];
7606  VMA_ASSERT(hAlloc);
7607  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7608  // DedicatedAlloc cannot be defragmented.
7609  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7610  // Only HOST_VISIBLE memory types can be defragmented.
7611  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7612  // Lost allocation cannot be defragmented.
7613  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7614  {
7615  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7616 
7617  const VmaPool hAllocPool = hAlloc->GetPool();
7618  // This allocation belongs to custom pool.
7619  if(hAllocPool != VK_NULL_HANDLE)
7620  {
7621  pAllocBlockVector = &hAllocPool->GetBlockVector();
7622  }
7623  // This allocation belongs to general pool.
7624  else
7625  {
7626  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7627  }
7628 
7629  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7630 
7631  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7632  &pAllocationsChanged[allocIndex] : VMA_NULL;
7633  pDefragmentator->AddAllocation(hAlloc, pChanged);
7634  }
7635  }
7636 
7637  VkResult result = VK_SUCCESS;
7638 
7639  // ======== Main processing.
7640 
7641  VkDeviceSize maxBytesToMove = SIZE_MAX;
7642  uint32_t maxAllocationsToMove = UINT32_MAX;
7643  if(pDefragmentationInfo != VMA_NULL)
7644  {
7645  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7646  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7647  }
7648 
7649  // Process standard memory.
7650  for(uint32_t memTypeIndex = 0;
7651  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7652  ++memTypeIndex)
7653  {
7654  // Only HOST_VISIBLE memory types can be defragmented.
7655  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7656  {
7657  result = m_pBlockVectors[memTypeIndex]->Defragment(
7658  pDefragmentationStats,
7659  maxBytesToMove,
7660  maxAllocationsToMove);
7661  }
7662  }
7663 
7664  // Process custom pools.
7665  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7666  {
7667  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7668  pDefragmentationStats,
7669  maxBytesToMove,
7670  maxAllocationsToMove);
7671  }
7672 
7673  // ======== Destroy defragmentators.
7674 
7675  // Process custom pools.
7676  for(size_t poolIndex = poolCount; poolIndex--; )
7677  {
7678  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7679  }
7680 
7681  // Process standard memory.
7682  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7683  {
7684  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7685  {
7686  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7687  }
7688  }
7689 
7690  return result;
7691 }
7692 
7693 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7694 {
7695  if(hAllocation->CanBecomeLost())
7696  {
7697  /*
7698  Warning: This is a carefully designed algorithm.
7699  Do not modify unless you really know what you're doing :)
7700  */
7701  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7702  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7703  for(;;)
7704  {
7705  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7706  {
7707  pAllocationInfo->memoryType = UINT32_MAX;
7708  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7709  pAllocationInfo->offset = 0;
7710  pAllocationInfo->size = hAllocation->GetSize();
7711  pAllocationInfo->pMappedData = VMA_NULL;
7712  pAllocationInfo->pUserData = hAllocation->GetUserData();
7713  return;
7714  }
7715  else if(localLastUseFrameIndex == localCurrFrameIndex)
7716  {
7717  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7718  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7719  pAllocationInfo->offset = hAllocation->GetOffset();
7720  pAllocationInfo->size = hAllocation->GetSize();
7721  pAllocationInfo->pMappedData = VMA_NULL;
7722  pAllocationInfo->pUserData = hAllocation->GetUserData();
7723  return;
7724  }
7725  else // Last use time earlier than current time.
7726  {
7727  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7728  {
7729  localLastUseFrameIndex = localCurrFrameIndex;
7730  }
7731  }
7732  }
7733  }
7734  else
7735  {
7736  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7737  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7738  pAllocationInfo->offset = hAllocation->GetOffset();
7739  pAllocationInfo->size = hAllocation->GetSize();
7740  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7741  pAllocationInfo->pUserData = hAllocation->GetUserData();
7742  }
7743 }
7744 
7745 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7746 {
7747  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7748 
7749  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7750 
7751  if(newCreateInfo.maxBlockCount == 0)
7752  {
7753  newCreateInfo.maxBlockCount = SIZE_MAX;
7754  }
7755  if(newCreateInfo.blockSize == 0)
7756  {
7757  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7758  }
7759 
7760  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7761 
7762  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7763  if(res != VK_SUCCESS)
7764  {
7765  vma_delete(this, *pPool);
7766  *pPool = VMA_NULL;
7767  return res;
7768  }
7769 
7770  // Add to m_Pools.
7771  {
7772  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7773  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7774  }
7775 
7776  return VK_SUCCESS;
7777 }
7778 
7779 void VmaAllocator_T::DestroyPool(VmaPool pool)
7780 {
7781  // Remove from m_Pools.
7782  {
7783  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7784  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7785  VMA_ASSERT(success && "Pool not found in Allocator.");
7786  }
7787 
7788  vma_delete(this, pool);
7789 }
7790 
7791 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7792 {
7793  pool->m_BlockVector.GetPoolStats(pPoolStats);
7794 }
7795 
7796 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7797 {
7798  m_CurrentFrameIndex.store(frameIndex);
7799 }
7800 
7801 void VmaAllocator_T::MakePoolAllocationsLost(
7802  VmaPool hPool,
7803  size_t* pLostAllocationCount)
7804 {
7805  hPool->m_BlockVector.MakePoolAllocationsLost(
7806  m_CurrentFrameIndex.load(),
7807  pLostAllocationCount);
7808 }
7809 
7810 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7811 {
7812  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7813  (*pAllocation)->InitLost();
7814 }
7815 
7816 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7817 {
7818  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7819 
7820  VkResult res;
7821  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7822  {
7823  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7824  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7825  {
7826  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7827  if(res == VK_SUCCESS)
7828  {
7829  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7830  }
7831  }
7832  else
7833  {
7834  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7835  }
7836  }
7837  else
7838  {
7839  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7840  }
7841 
7842  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7843  {
7844  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7845  }
7846 
7847  return res;
7848 }
7849 
7850 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7851 {
7852  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7853  {
7854  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7855  }
7856 
7857  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7858 
7859  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7860  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7861  {
7862  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7863  m_HeapSizeLimit[heapIndex] += size;
7864  }
7865 }
7866 
7867 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7868 {
7869  if(hAllocation->CanBecomeLost())
7870  {
7871  return VK_ERROR_MEMORY_MAP_FAILED;
7872  }
7873 
7874  switch(hAllocation->GetType())
7875  {
7876  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7877  {
7878  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7879  char *pBytes = VMA_NULL;
7880  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
7881  if(res == VK_SUCCESS)
7882  {
7883  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7884  hAllocation->BlockAllocMap();
7885  }
7886  return res;
7887  }
7888  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7889  return hAllocation->DedicatedAllocMap(this, ppData);
7890  default:
7891  VMA_ASSERT(0);
7892  return VK_ERROR_MEMORY_MAP_FAILED;
7893  }
7894 }
7895 
7896 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7897 {
7898  switch(hAllocation->GetType())
7899  {
7900  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7901  {
7902  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7903  hAllocation->BlockAllocUnmap();
7904  pBlock->Unmap(this, 1);
7905  }
7906  break;
7907  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7908  hAllocation->DedicatedAllocUnmap(this);
7909  break;
7910  default:
7911  VMA_ASSERT(0);
7912  }
7913 }
7914 
7915 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7916 {
7917  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7918 
7919  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7920  {
7921  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7922  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7923  VMA_ASSERT(pDedicatedAllocations);
7924  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7925  VMA_ASSERT(success);
7926  }
7927 
7928  VkDeviceMemory hMemory = allocation->GetMemory();
7929 
7930  if(allocation->GetMappedData() != VMA_NULL)
7931  {
7932  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7933  }
7934 
7935  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7936 
7937  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7938 }
7939 
7940 #if VMA_STATS_STRING_ENABLED
7941 
7942 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7943 {
7944  bool dedicatedAllocationsStarted = false;
7945  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7946  {
7947  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7948  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7949  VMA_ASSERT(pDedicatedAllocVector);
7950  if(pDedicatedAllocVector->empty() == false)
7951  {
7952  if(dedicatedAllocationsStarted == false)
7953  {
7954  dedicatedAllocationsStarted = true;
7955  json.WriteString("DedicatedAllocations");
7956  json.BeginObject();
7957  }
7958 
7959  json.BeginString("Type ");
7960  json.ContinueString(memTypeIndex);
7961  json.EndString();
7962 
7963  json.BeginArray();
7964 
7965  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7966  {
7967  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7968  json.BeginObject(true);
7969 
7970  json.WriteString("Type");
7971  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7972 
7973  json.WriteString("Size");
7974  json.WriteNumber(hAlloc->GetSize());
7975 
7976  const void* pUserData = hAlloc->GetUserData();
7977  if(pUserData != VMA_NULL)
7978  {
7979  json.WriteString("UserData");
7980  if(hAlloc->IsUserDataString())
7981  {
7982  json.WriteString((const char*)pUserData);
7983  }
7984  else
7985  {
7986  json.BeginString();
7987  json.ContinueString_Pointer(pUserData);
7988  json.EndString();
7989  }
7990  }
7991 
7992  json.EndObject();
7993  }
7994 
7995  json.EndArray();
7996  }
7997  }
7998  if(dedicatedAllocationsStarted)
7999  {
8000  json.EndObject();
8001  }
8002 
8003  {
8004  bool allocationsStarted = false;
8005  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8006  {
8007  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8008  {
8009  if(allocationsStarted == false)
8010  {
8011  allocationsStarted = true;
8012  json.WriteString("DefaultPools");
8013  json.BeginObject();
8014  }
8015 
8016  json.BeginString("Type ");
8017  json.ContinueString(memTypeIndex);
8018  json.EndString();
8019 
8020  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8021  }
8022  }
8023  if(allocationsStarted)
8024  {
8025  json.EndObject();
8026  }
8027  }
8028 
8029  {
8030  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8031  const size_t poolCount = m_Pools.size();
8032  if(poolCount > 0)
8033  {
8034  json.WriteString("Pools");
8035  json.BeginArray();
8036  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8037  {
8038  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8039  }
8040  json.EndArray();
8041  }
8042  }
8043 }
8044 
8045 #endif // #if VMA_STATS_STRING_ENABLED
8046 
8047 static VkResult AllocateMemoryForImage(
8048  VmaAllocator allocator,
8049  VkImage image,
8050  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8051  VmaSuballocationType suballocType,
8052  VmaAllocation* pAllocation)
8053 {
8054  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8055 
8056  VkMemoryRequirements vkMemReq = {};
8057  bool requiresDedicatedAllocation = false;
8058  bool prefersDedicatedAllocation = false;
8059  allocator->GetImageMemoryRequirements(image, vkMemReq,
8060  requiresDedicatedAllocation, prefersDedicatedAllocation);
8061 
8062  return allocator->AllocateMemory(
8063  vkMemReq,
8064  requiresDedicatedAllocation,
8065  prefersDedicatedAllocation,
8066  VK_NULL_HANDLE, // dedicatedBuffer
8067  image, // dedicatedImage
8068  *pAllocationCreateInfo,
8069  suballocType,
8070  pAllocation);
8071 }
8072 
8074 // Public interface
8075 
8076 VkResult vmaCreateAllocator(
8077  const VmaAllocatorCreateInfo* pCreateInfo,
8078  VmaAllocator* pAllocator)
8079 {
8080  VMA_ASSERT(pCreateInfo && pAllocator);
8081  VMA_DEBUG_LOG("vmaCreateAllocator");
8082  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8083  return VK_SUCCESS;
8084 }
8085 
8086 void vmaDestroyAllocator(
8087  VmaAllocator allocator)
8088 {
8089  if(allocator != VK_NULL_HANDLE)
8090  {
8091  VMA_DEBUG_LOG("vmaDestroyAllocator");
8092  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8093  vma_delete(&allocationCallbacks, allocator);
8094  }
8095 }
8096 
8098  VmaAllocator allocator,
8099  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8100 {
8101  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8102  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8103 }
8104 
8106  VmaAllocator allocator,
8107  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8108 {
8109  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8110  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8111 }
8112 
8114  VmaAllocator allocator,
8115  uint32_t memoryTypeIndex,
8116  VkMemoryPropertyFlags* pFlags)
8117 {
8118  VMA_ASSERT(allocator && pFlags);
8119  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8120  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8121 }
8122 
8124  VmaAllocator allocator,
8125  uint32_t frameIndex)
8126 {
8127  VMA_ASSERT(allocator);
8128  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8129 
8130  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8131 
8132  allocator->SetCurrentFrameIndex(frameIndex);
8133 }
8134 
8135 void vmaCalculateStats(
8136  VmaAllocator allocator,
8137  VmaStats* pStats)
8138 {
8139  VMA_ASSERT(allocator && pStats);
8140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8141  allocator->CalculateStats(pStats);
8142 }
8143 
8144 #if VMA_STATS_STRING_ENABLED
8145 
8146 void vmaBuildStatsString(
8147  VmaAllocator allocator,
8148  char** ppStatsString,
8149  VkBool32 detailedMap)
8150 {
8151  VMA_ASSERT(allocator && ppStatsString);
8152  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8153 
8154  VmaStringBuilder sb(allocator);
8155  {
8156  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8157  json.BeginObject();
8158 
8159  VmaStats stats;
8160  allocator->CalculateStats(&stats);
8161 
8162  json.WriteString("Total");
8163  VmaPrintStatInfo(json, stats.total);
8164 
8165  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8166  {
8167  json.BeginString("Heap ");
8168  json.ContinueString(heapIndex);
8169  json.EndString();
8170  json.BeginObject();
8171 
8172  json.WriteString("Size");
8173  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8174 
8175  json.WriteString("Flags");
8176  json.BeginArray(true);
8177  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8178  {
8179  json.WriteString("DEVICE_LOCAL");
8180  }
8181  json.EndArray();
8182 
8183  if(stats.memoryHeap[heapIndex].blockCount > 0)
8184  {
8185  json.WriteString("Stats");
8186  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8187  }
8188 
8189  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8190  {
8191  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8192  {
8193  json.BeginString("Type ");
8194  json.ContinueString(typeIndex);
8195  json.EndString();
8196 
8197  json.BeginObject();
8198 
8199  json.WriteString("Flags");
8200  json.BeginArray(true);
8201  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8202  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8203  {
8204  json.WriteString("DEVICE_LOCAL");
8205  }
8206  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8207  {
8208  json.WriteString("HOST_VISIBLE");
8209  }
8210  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8211  {
8212  json.WriteString("HOST_COHERENT");
8213  }
8214  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8215  {
8216  json.WriteString("HOST_CACHED");
8217  }
8218  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8219  {
8220  json.WriteString("LAZILY_ALLOCATED");
8221  }
8222  json.EndArray();
8223 
8224  if(stats.memoryType[typeIndex].blockCount > 0)
8225  {
8226  json.WriteString("Stats");
8227  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8228  }
8229 
8230  json.EndObject();
8231  }
8232  }
8233 
8234  json.EndObject();
8235  }
8236  if(detailedMap == VK_TRUE)
8237  {
8238  allocator->PrintDetailedMap(json);
8239  }
8240 
8241  json.EndObject();
8242  }
8243 
8244  const size_t len = sb.GetLength();
8245  char* const pChars = vma_new_array(allocator, char, len + 1);
8246  if(len > 0)
8247  {
8248  memcpy(pChars, sb.GetData(), len);
8249  }
8250  pChars[len] = '\0';
8251  *ppStatsString = pChars;
8252 }
8253 
8254 void vmaFreeStatsString(
8255  VmaAllocator allocator,
8256  char* pStatsString)
8257 {
8258  if(pStatsString != VMA_NULL)
8259  {
8260  VMA_ASSERT(allocator);
8261  size_t len = strlen(pStatsString);
8262  vma_delete_array(allocator, pStatsString, len + 1);
8263  }
8264 }
8265 
8266 #endif // #if VMA_STATS_STRING_ENABLED
8267 
8268 /*
8269 This function is not protected by any mutex because it just reads immutable data.
8270 */
8271 VkResult vmaFindMemoryTypeIndex(
8272  VmaAllocator allocator,
8273  uint32_t memoryTypeBits,
8274  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8275  uint32_t* pMemoryTypeIndex)
8276 {
8277  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8278  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8279  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8280 
8281  if(pAllocationCreateInfo->memoryTypeBits != 0)
8282  {
8283  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8284  }
8285 
8286  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8287  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8288 
8289  // Convert usage to requiredFlags and preferredFlags.
8290  switch(pAllocationCreateInfo->usage)
8291  {
8293  break;
8295  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8296  break;
8298  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8299  break;
8301  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8302  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8303  break;
8305  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8306  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8307  break;
8308  default:
8309  break;
8310  }
8311 
8312  *pMemoryTypeIndex = UINT32_MAX;
8313  uint32_t minCost = UINT32_MAX;
8314  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8315  memTypeIndex < allocator->GetMemoryTypeCount();
8316  ++memTypeIndex, memTypeBit <<= 1)
8317  {
8318  // This memory type is acceptable according to memoryTypeBits bitmask.
8319  if((memTypeBit & memoryTypeBits) != 0)
8320  {
8321  const VkMemoryPropertyFlags currFlags =
8322  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8323  // This memory type contains requiredFlags.
8324  if((requiredFlags & ~currFlags) == 0)
8325  {
8326  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8327  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8328  // Remember memory type with lowest cost.
8329  if(currCost < minCost)
8330  {
8331  *pMemoryTypeIndex = memTypeIndex;
8332  if(currCost == 0)
8333  {
8334  return VK_SUCCESS;
8335  }
8336  minCost = currCost;
8337  }
8338  }
8339  }
8340  }
8341  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8342 }
8343 
8344 VkResult vmaCreatePool(
8345  VmaAllocator allocator,
8346  const VmaPoolCreateInfo* pCreateInfo,
8347  VmaPool* pPool)
8348 {
8349  VMA_ASSERT(allocator && pCreateInfo && pPool);
8350 
8351  VMA_DEBUG_LOG("vmaCreatePool");
8352 
8353  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8354 
8355  return allocator->CreatePool(pCreateInfo, pPool);
8356 }
8357 
8358 void vmaDestroyPool(
8359  VmaAllocator allocator,
8360  VmaPool pool)
8361 {
8362  VMA_ASSERT(allocator);
8363 
8364  if(pool == VK_NULL_HANDLE)
8365  {
8366  return;
8367  }
8368 
8369  VMA_DEBUG_LOG("vmaDestroyPool");
8370 
8371  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8372 
8373  allocator->DestroyPool(pool);
8374 }
8375 
8376 void vmaGetPoolStats(
8377  VmaAllocator allocator,
8378  VmaPool pool,
8379  VmaPoolStats* pPoolStats)
8380 {
8381  VMA_ASSERT(allocator && pool && pPoolStats);
8382 
8383  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8384 
8385  allocator->GetPoolStats(pool, pPoolStats);
8386 }
8387 
8389  VmaAllocator allocator,
8390  VmaPool pool,
8391  size_t* pLostAllocationCount)
8392 {
8393  VMA_ASSERT(allocator && pool);
8394 
8395  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8396 
8397  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8398 }
8399 
8400 VkResult vmaAllocateMemory(
8401  VmaAllocator allocator,
8402  const VkMemoryRequirements* pVkMemoryRequirements,
8403  const VmaAllocationCreateInfo* pCreateInfo,
8404  VmaAllocation* pAllocation,
8405  VmaAllocationInfo* pAllocationInfo)
8406 {
8407  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8408 
8409  VMA_DEBUG_LOG("vmaAllocateMemory");
8410 
8411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8412 
8413  VkResult result = allocator->AllocateMemory(
8414  *pVkMemoryRequirements,
8415  false, // requiresDedicatedAllocation
8416  false, // prefersDedicatedAllocation
8417  VK_NULL_HANDLE, // dedicatedBuffer
8418  VK_NULL_HANDLE, // dedicatedImage
8419  *pCreateInfo,
8420  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8421  pAllocation);
8422 
8423  if(pAllocationInfo && result == VK_SUCCESS)
8424  {
8425  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8426  }
8427 
8428  return result;
8429 }
8430 
8432  VmaAllocator allocator,
8433  VkBuffer buffer,
8434  const VmaAllocationCreateInfo* pCreateInfo,
8435  VmaAllocation* pAllocation,
8436  VmaAllocationInfo* pAllocationInfo)
8437 {
8438  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8439 
8440  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8441 
8442  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8443 
8444  VkMemoryRequirements vkMemReq = {};
8445  bool requiresDedicatedAllocation = false;
8446  bool prefersDedicatedAllocation = false;
8447  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8448  requiresDedicatedAllocation,
8449  prefersDedicatedAllocation);
8450 
8451  VkResult result = allocator->AllocateMemory(
8452  vkMemReq,
8453  requiresDedicatedAllocation,
8454  prefersDedicatedAllocation,
8455  buffer, // dedicatedBuffer
8456  VK_NULL_HANDLE, // dedicatedImage
8457  *pCreateInfo,
8458  VMA_SUBALLOCATION_TYPE_BUFFER,
8459  pAllocation);
8460 
8461  if(pAllocationInfo && result == VK_SUCCESS)
8462  {
8463  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8464  }
8465 
8466  return result;
8467 }
8468 
8469 VkResult vmaAllocateMemoryForImage(
8470  VmaAllocator allocator,
8471  VkImage image,
8472  const VmaAllocationCreateInfo* pCreateInfo,
8473  VmaAllocation* pAllocation,
8474  VmaAllocationInfo* pAllocationInfo)
8475 {
8476  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8477 
8478  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8479 
8480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8481 
8482  VkResult result = AllocateMemoryForImage(
8483  allocator,
8484  image,
8485  pCreateInfo,
8486  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8487  pAllocation);
8488 
8489  if(pAllocationInfo && result == VK_SUCCESS)
8490  {
8491  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8492  }
8493 
8494  return result;
8495 }
8496 
8497 void vmaFreeMemory(
8498  VmaAllocator allocator,
8499  VmaAllocation allocation)
8500 {
8501  VMA_ASSERT(allocator && allocation);
8502 
8503  VMA_DEBUG_LOG("vmaFreeMemory");
8504 
8505  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8506 
8507  allocator->FreeMemory(allocation);
8508 }
8509 
8511  VmaAllocator allocator,
8512  VmaAllocation allocation,
8513  VmaAllocationInfo* pAllocationInfo)
8514 {
8515  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8516 
8517  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8518 
8519  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8520 }
8521 
8523  VmaAllocator allocator,
8524  VmaAllocation allocation,
8525  void* pUserData)
8526 {
8527  VMA_ASSERT(allocator && allocation);
8528 
8529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8530 
8531  allocation->SetUserData(allocator, pUserData);
8532 }
8533 
8535  VmaAllocator allocator,
8536  VmaAllocation* pAllocation)
8537 {
8538  VMA_ASSERT(allocator && pAllocation);
8539 
8540  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8541 
8542  allocator->CreateLostAllocation(pAllocation);
8543 }
8544 
8545 VkResult vmaMapMemory(
8546  VmaAllocator allocator,
8547  VmaAllocation allocation,
8548  void** ppData)
8549 {
8550  VMA_ASSERT(allocator && allocation && ppData);
8551 
8552  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8553 
8554  return allocator->Map(allocation, ppData);
8555 }
8556 
8557 void vmaUnmapMemory(
8558  VmaAllocator allocator,
8559  VmaAllocation allocation)
8560 {
8561  VMA_ASSERT(allocator && allocation);
8562 
8563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8564 
8565  allocator->Unmap(allocation);
8566 }
8567 
8568 VkResult vmaDefragment(
8569  VmaAllocator allocator,
8570  VmaAllocation* pAllocations,
8571  size_t allocationCount,
8572  VkBool32* pAllocationsChanged,
8573  const VmaDefragmentationInfo *pDefragmentationInfo,
8574  VmaDefragmentationStats* pDefragmentationStats)
8575 {
8576  VMA_ASSERT(allocator && pAllocations);
8577 
8578  VMA_DEBUG_LOG("vmaDefragment");
8579 
8580  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8581 
8582  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8583 }
8584 
8585 VkResult vmaCreateBuffer(
8586  VmaAllocator allocator,
8587  const VkBufferCreateInfo* pBufferCreateInfo,
8588  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8589  VkBuffer* pBuffer,
8590  VmaAllocation* pAllocation,
8591  VmaAllocationInfo* pAllocationInfo)
8592 {
8593  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8594 
8595  VMA_DEBUG_LOG("vmaCreateBuffer");
8596 
8597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8598 
8599  *pBuffer = VK_NULL_HANDLE;
8600  *pAllocation = VK_NULL_HANDLE;
8601 
8602  // 1. Create VkBuffer.
8603  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8604  allocator->m_hDevice,
8605  pBufferCreateInfo,
8606  allocator->GetAllocationCallbacks(),
8607  pBuffer);
8608  if(res >= 0)
8609  {
8610  // 2. vkGetBufferMemoryRequirements.
8611  VkMemoryRequirements vkMemReq = {};
8612  bool requiresDedicatedAllocation = false;
8613  bool prefersDedicatedAllocation = false;
8614  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8615  requiresDedicatedAllocation, prefersDedicatedAllocation);
8616 
8617  // Make sure alignment requirements for specific buffer usages reported
8618  // in Physical Device Properties are included in alignment reported by memory requirements.
8619  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8620  {
8621  VMA_ASSERT(vkMemReq.alignment %
8622  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8623  }
8624  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8625  {
8626  VMA_ASSERT(vkMemReq.alignment %
8627  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8628  }
8629  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8630  {
8631  VMA_ASSERT(vkMemReq.alignment %
8632  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8633  }
8634 
8635  // 3. Allocate memory using allocator.
8636  res = allocator->AllocateMemory(
8637  vkMemReq,
8638  requiresDedicatedAllocation,
8639  prefersDedicatedAllocation,
8640  *pBuffer, // dedicatedBuffer
8641  VK_NULL_HANDLE, // dedicatedImage
8642  *pAllocationCreateInfo,
8643  VMA_SUBALLOCATION_TYPE_BUFFER,
8644  pAllocation);
8645  if(res >= 0)
8646  {
8647  // 3. Bind buffer with memory.
8648  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8649  allocator->m_hDevice,
8650  *pBuffer,
8651  (*pAllocation)->GetMemory(),
8652  (*pAllocation)->GetOffset());
8653  if(res >= 0)
8654  {
8655  // All steps succeeded.
8656  if(pAllocationInfo != VMA_NULL)
8657  {
8658  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8659  }
8660  return VK_SUCCESS;
8661  }
8662  allocator->FreeMemory(*pAllocation);
8663  *pAllocation = VK_NULL_HANDLE;
8664  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8665  *pBuffer = VK_NULL_HANDLE;
8666  return res;
8667  }
8668  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8669  *pBuffer = VK_NULL_HANDLE;
8670  return res;
8671  }
8672  return res;
8673 }
8674 
8675 void vmaDestroyBuffer(
8676  VmaAllocator allocator,
8677  VkBuffer buffer,
8678  VmaAllocation allocation)
8679 {
8680  if(buffer != VK_NULL_HANDLE)
8681  {
8682  VMA_ASSERT(allocator);
8683 
8684  VMA_DEBUG_LOG("vmaDestroyBuffer");
8685 
8686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8687 
8688  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8689 
8690  allocator->FreeMemory(allocation);
8691  }
8692 }
8693 
8694 VkResult vmaCreateImage(
8695  VmaAllocator allocator,
8696  const VkImageCreateInfo* pImageCreateInfo,
8697  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8698  VkImage* pImage,
8699  VmaAllocation* pAllocation,
8700  VmaAllocationInfo* pAllocationInfo)
8701 {
8702  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8703 
8704  VMA_DEBUG_LOG("vmaCreateImage");
8705 
8706  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8707 
8708  *pImage = VK_NULL_HANDLE;
8709  *pAllocation = VK_NULL_HANDLE;
8710 
8711  // 1. Create VkImage.
8712  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8713  allocator->m_hDevice,
8714  pImageCreateInfo,
8715  allocator->GetAllocationCallbacks(),
8716  pImage);
8717  if(res >= 0)
8718  {
8719  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8720  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8721  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8722 
8723  // 2. Allocate memory using allocator.
8724  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8725  if(res >= 0)
8726  {
8727  // 3. Bind image with memory.
8728  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8729  allocator->m_hDevice,
8730  *pImage,
8731  (*pAllocation)->GetMemory(),
8732  (*pAllocation)->GetOffset());
8733  if(res >= 0)
8734  {
8735  // All steps succeeded.
8736  if(pAllocationInfo != VMA_NULL)
8737  {
8738  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8739  }
8740  return VK_SUCCESS;
8741  }
8742  allocator->FreeMemory(*pAllocation);
8743  *pAllocation = VK_NULL_HANDLE;
8744  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8745  *pImage = VK_NULL_HANDLE;
8746  return res;
8747  }
8748  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8749  *pImage = VK_NULL_HANDLE;
8750  return res;
8751  }
8752  return res;
8753 }
8754 
8755 void vmaDestroyImage(
8756  VmaAllocator allocator,
8757  VkImage image,
8758  VmaAllocation allocation)
8759 {
8760  if(image != VK_NULL_HANDLE)
8761  {
8762  VMA_ASSERT(allocator);
8763 
8764  VMA_DEBUG_LOG("vmaDestroyImage");
8765 
8766  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8767 
8768  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8769 
8770  allocator->FreeMemory(allocation);
8771  }
8772 }
8773 
8774 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:896
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1150
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:921
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:906
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1107
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:900
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1418
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:918
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1584
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1288
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1342
Definition: vk_mem_alloc.h:1187
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:889
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1225
Definition: vk_mem_alloc.h:1134
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:930
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:983
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:915
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1138
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1048
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:903
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1047
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:911
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1588
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:947
VmaStatInfo total
Definition: vk_mem_alloc.h:1057
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1596
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1209
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1579
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:904
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:831
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:924
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1296
Definition: vk_mem_alloc.h:1290
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1428
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:901
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1246
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1312
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1348
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:887
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1299
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:1085
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1574
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1592
Definition: vk_mem_alloc.h:1124
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1233
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:902
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1053
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:837
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:858
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:863
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1594
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1220
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1358
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:897
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1036
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1307
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:850
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1194
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1049
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:854
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1302
Definition: vk_mem_alloc.h:1133
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1215
Definition: vk_mem_alloc.h:1206
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1039
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:899
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1320
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:933
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1351
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1204
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1239
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:971
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1055
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1174
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1048
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:908
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:852
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:907
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1334
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1442
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:927
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1048
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1045
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1339
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1423
Definition: vk_mem_alloc.h:1202
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1590
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:895
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:910
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1043
Definition: vk_mem_alloc.h:1090
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1292
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1041
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:905
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:909
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1161
Definition: vk_mem_alloc.h:1117
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1437
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:885
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:898
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1404
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1270
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1049
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1056
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1345
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1049
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1409