Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
688 #include <vulkan/vulkan.h>
689 
690 VK_DEFINE_HANDLE(VmaAllocator)
691 
692 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
694  VmaAllocator allocator,
695  uint32_t memoryType,
696  VkDeviceMemory memory,
697  VkDeviceSize size);
699 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
700  VmaAllocator allocator,
701  uint32_t memoryType,
702  VkDeviceMemory memory,
703  VkDeviceSize size);
704 
712 typedef struct VmaDeviceMemoryCallbacks {
718 
748 
751 typedef VkFlags VmaAllocatorCreateFlags;
752 
757 typedef struct VmaVulkanFunctions {
758  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
759  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
760  PFN_vkAllocateMemory vkAllocateMemory;
761  PFN_vkFreeMemory vkFreeMemory;
762  PFN_vkMapMemory vkMapMemory;
763  PFN_vkUnmapMemory vkUnmapMemory;
764  PFN_vkBindBufferMemory vkBindBufferMemory;
765  PFN_vkBindImageMemory vkBindImageMemory;
766  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
767  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
768  PFN_vkCreateBuffer vkCreateBuffer;
769  PFN_vkDestroyBuffer vkDestroyBuffer;
770  PFN_vkCreateImage vkCreateImage;
771  PFN_vkDestroyImage vkDestroyImage;
772  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
773  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
775 
778 {
780  VmaAllocatorCreateFlags flags;
782 
783  VkPhysicalDevice physicalDevice;
785 
786  VkDevice device;
788 
791 
792  const VkAllocationCallbacks* pAllocationCallbacks;
794 
809  uint32_t frameInUseCount;
833  const VkDeviceSize* pHeapSizeLimit;
847 
849 VkResult vmaCreateAllocator(
850  const VmaAllocatorCreateInfo* pCreateInfo,
851  VmaAllocator* pAllocator);
852 
855  VmaAllocator allocator);
856 
862  VmaAllocator allocator,
863  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
864 
870  VmaAllocator allocator,
871  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
872 
880  VmaAllocator allocator,
881  uint32_t memoryTypeIndex,
882  VkMemoryPropertyFlags* pFlags);
883 
893  VmaAllocator allocator,
894  uint32_t frameIndex);
895 
898 typedef struct VmaStatInfo
899 {
901  uint32_t blockCount;
903  uint32_t allocationCount;
907  VkDeviceSize usedBytes;
909  VkDeviceSize unusedBytes;
910  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
911  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
912 } VmaStatInfo;
913 
915 typedef struct VmaStats
916 {
917  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
918  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
920 } VmaStats;
921 
923 void vmaCalculateStats(
924  VmaAllocator allocator,
925  VmaStats* pStats);
926 
927 #define VMA_STATS_STRING_ENABLED 1
928 
929 #if VMA_STATS_STRING_ENABLED
930 
932 
935  VmaAllocator allocator,
936  char** ppStatsString,
937  VkBool32 detailedMap);
938 
939 void vmaFreeStatsString(
940  VmaAllocator allocator,
941  char* pStatsString);
942 
943 #endif // #if VMA_STATS_STRING_ENABLED
944 
945 VK_DEFINE_HANDLE(VmaPool)
946 
947 typedef enum VmaMemoryUsage
948 {
988 
1003 
1053 
1057 
1059 {
1061  VmaAllocationCreateFlags flags;
1072  VkMemoryPropertyFlags requiredFlags;
1077  VkMemoryPropertyFlags preferredFlags;
1085  uint32_t memoryTypeBits;
1091  VmaPool pool;
1098  void* pUserData;
1100 
1115 VkResult vmaFindMemoryTypeIndex(
1116  VmaAllocator allocator,
1117  uint32_t memoryTypeBits,
1118  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1119  uint32_t* pMemoryTypeIndex);
1120 
1141 
1144 typedef VkFlags VmaPoolCreateFlags;
1145 
1148 typedef struct VmaPoolCreateInfo {
1154  VmaPoolCreateFlags flags;
1159  VkDeviceSize blockSize;
1188 
1191 typedef struct VmaPoolStats {
1194  VkDeviceSize size;
1197  VkDeviceSize unusedSize;
1210  VkDeviceSize unusedRangeSizeMax;
1211 } VmaPoolStats;
1212 
1219 VkResult vmaCreatePool(
1220  VmaAllocator allocator,
1221  const VmaPoolCreateInfo* pCreateInfo,
1222  VmaPool* pPool);
1223 
1226 void vmaDestroyPool(
1227  VmaAllocator allocator,
1228  VmaPool pool);
1229 
1236 void vmaGetPoolStats(
1237  VmaAllocator allocator,
1238  VmaPool pool,
1239  VmaPoolStats* pPoolStats);
1240 
1248  VmaAllocator allocator,
1249  VmaPool pool,
1250  size_t* pLostAllocationCount);
1251 
1252 VK_DEFINE_HANDLE(VmaAllocation)
1253 
1254 
1256 typedef struct VmaAllocationInfo {
1261  uint32_t memoryType;
1270  VkDeviceMemory deviceMemory;
1275  VkDeviceSize offset;
1280  VkDeviceSize size;
1294  void* pUserData;
1296 
1307 VkResult vmaAllocateMemory(
1308  VmaAllocator allocator,
1309  const VkMemoryRequirements* pVkMemoryRequirements,
1310  const VmaAllocationCreateInfo* pCreateInfo,
1311  VmaAllocation* pAllocation,
1312  VmaAllocationInfo* pAllocationInfo);
1313 
1321  VmaAllocator allocator,
1322  VkBuffer buffer,
1323  const VmaAllocationCreateInfo* pCreateInfo,
1324  VmaAllocation* pAllocation,
1325  VmaAllocationInfo* pAllocationInfo);
1326 
1328 VkResult vmaAllocateMemoryForImage(
1329  VmaAllocator allocator,
1330  VkImage image,
1331  const VmaAllocationCreateInfo* pCreateInfo,
1332  VmaAllocation* pAllocation,
1333  VmaAllocationInfo* pAllocationInfo);
1334 
1336 void vmaFreeMemory(
1337  VmaAllocator allocator,
1338  VmaAllocation allocation);
1339 
1342  VmaAllocator allocator,
1343  VmaAllocation allocation,
1344  VmaAllocationInfo* pAllocationInfo);
1345 
1360  VmaAllocator allocator,
1361  VmaAllocation allocation,
1362  void* pUserData);
1363 
1375  VmaAllocator allocator,
1376  VmaAllocation* pAllocation);
1377 
1412 VkResult vmaMapMemory(
1413  VmaAllocator allocator,
1414  VmaAllocation allocation,
1415  void** ppData);
1416 
1421 void vmaUnmapMemory(
1422  VmaAllocator allocator,
1423  VmaAllocation allocation);
1424 
1426 typedef struct VmaDefragmentationInfo {
1431  VkDeviceSize maxBytesToMove;
1438 
1440 typedef struct VmaDefragmentationStats {
1442  VkDeviceSize bytesMoved;
1444  VkDeviceSize bytesFreed;
1450 
1527 VkResult vmaDefragment(
1528  VmaAllocator allocator,
1529  VmaAllocation* pAllocations,
1530  size_t allocationCount,
1531  VkBool32* pAllocationsChanged,
1532  const VmaDefragmentationInfo *pDefragmentationInfo,
1533  VmaDefragmentationStats* pDefragmentationStats);
1534 
1561 VkResult vmaCreateBuffer(
1562  VmaAllocator allocator,
1563  const VkBufferCreateInfo* pBufferCreateInfo,
1564  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1565  VkBuffer* pBuffer,
1566  VmaAllocation* pAllocation,
1567  VmaAllocationInfo* pAllocationInfo);
1568 
1580 void vmaDestroyBuffer(
1581  VmaAllocator allocator,
1582  VkBuffer buffer,
1583  VmaAllocation allocation);
1584 
1586 VkResult vmaCreateImage(
1587  VmaAllocator allocator,
1588  const VkImageCreateInfo* pImageCreateInfo,
1589  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1590  VkImage* pImage,
1591  VmaAllocation* pAllocation,
1592  VmaAllocationInfo* pAllocationInfo);
1593 
1605 void vmaDestroyImage(
1606  VmaAllocator allocator,
1607  VkImage image,
1608  VmaAllocation allocation);
1609 
1610 #ifdef __cplusplus
1611 }
1612 #endif
1613 
1614 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1615 
1616 // For Visual Studio IntelliSense.
1617 #ifdef __INTELLISENSE__
1618 #define VMA_IMPLEMENTATION
1619 #endif
1620 
1621 #ifdef VMA_IMPLEMENTATION
1622 #undef VMA_IMPLEMENTATION
1623 
1624 #include <cstdint>
1625 #include <cstdlib>
1626 #include <cstring>
1627 
1628 /*******************************************************************************
1629 CONFIGURATION SECTION
1630 
1631 Define some of these macros before each #include of this header or change them
1632 here if you need other then default behavior depending on your environment.
1633 */
1634 
1635 /*
1636 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1637 internally, like:
1638 
1639  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1640 
1641 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1642 VmaAllocatorCreateInfo::pVulkanFunctions.
1643 */
1644 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1645 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1646 #endif
1647 
1648 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1649 //#define VMA_USE_STL_CONTAINERS 1
1650 
1651 /* Set this macro to 1 to make the library including and using STL containers:
1652 std::pair, std::vector, std::list, std::unordered_map.
1653 
1654 Set it to 0 or undefined to make the library using its own implementation of
1655 the containers.
1656 */
1657 #if VMA_USE_STL_CONTAINERS
1658  #define VMA_USE_STL_VECTOR 1
1659  #define VMA_USE_STL_UNORDERED_MAP 1
1660  #define VMA_USE_STL_LIST 1
1661 #endif
1662 
1663 #if VMA_USE_STL_VECTOR
1664  #include <vector>
1665 #endif
1666 
1667 #if VMA_USE_STL_UNORDERED_MAP
1668  #include <unordered_map>
1669 #endif
1670 
1671 #if VMA_USE_STL_LIST
1672  #include <list>
1673 #endif
1674 
1675 /*
1676 Following headers are used in this CONFIGURATION section only, so feel free to
1677 remove them if not needed.
1678 */
1679 #include <cassert> // for assert
1680 #include <algorithm> // for min, max
1681 #include <mutex> // for std::mutex
1682 #include <atomic> // for std::atomic
1683 
1684 #if !defined(_WIN32)
1685  #include <malloc.h> // for aligned_alloc()
1686 #endif
1687 
1688 // Normal assert to check for programmer's errors, especially in Debug configuration.
1689 #ifndef VMA_ASSERT
1690  #ifdef _DEBUG
1691  #define VMA_ASSERT(expr) assert(expr)
1692  #else
1693  #define VMA_ASSERT(expr)
1694  #endif
1695 #endif
1696 
1697 // Assert that will be called very often, like inside data structures e.g. operator[].
1698 // Making it non-empty can make program slow.
1699 #ifndef VMA_HEAVY_ASSERT
1700  #ifdef _DEBUG
1701  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1702  #else
1703  #define VMA_HEAVY_ASSERT(expr)
1704  #endif
1705 #endif
1706 
1707 #ifndef VMA_NULL
1708  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1709  #define VMA_NULL nullptr
1710 #endif
1711 
1712 #ifndef VMA_ALIGN_OF
1713  #define VMA_ALIGN_OF(type) (__alignof(type))
1714 #endif
1715 
1716 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1717  #if defined(_WIN32)
1718  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1719  #else
1720  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1721  #endif
1722 #endif
1723 
1724 #ifndef VMA_SYSTEM_FREE
1725  #if defined(_WIN32)
1726  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1727  #else
1728  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1729  #endif
1730 #endif
1731 
1732 #ifndef VMA_MIN
1733  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1734 #endif
1735 
1736 #ifndef VMA_MAX
1737  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1738 #endif
1739 
1740 #ifndef VMA_SWAP
1741  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1742 #endif
1743 
1744 #ifndef VMA_SORT
1745  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1746 #endif
1747 
1748 #ifndef VMA_DEBUG_LOG
1749  #define VMA_DEBUG_LOG(format, ...)
1750  /*
1751  #define VMA_DEBUG_LOG(format, ...) do { \
1752  printf(format, __VA_ARGS__); \
1753  printf("\n"); \
1754  } while(false)
1755  */
1756 #endif
1757 
1758 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1759 #if VMA_STATS_STRING_ENABLED
1760  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1761  {
1762  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1763  }
1764  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1765  {
1766  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1767  }
1768  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1769  {
1770  snprintf(outStr, strLen, "%p", ptr);
1771  }
1772 #endif
1773 
1774 #ifndef VMA_MUTEX
1775  class VmaMutex
1776  {
1777  public:
1778  VmaMutex() { }
1779  ~VmaMutex() { }
1780  void Lock() { m_Mutex.lock(); }
1781  void Unlock() { m_Mutex.unlock(); }
1782  private:
1783  std::mutex m_Mutex;
1784  };
1785  #define VMA_MUTEX VmaMutex
1786 #endif
1787 
1788 /*
1789 If providing your own implementation, you need to implement a subset of std::atomic:
1790 
1791 - Constructor(uint32_t desired)
1792 - uint32_t load() const
1793 - void store(uint32_t desired)
1794 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1795 */
1796 #ifndef VMA_ATOMIC_UINT32
1797  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1798 #endif
1799 
1800 #ifndef VMA_BEST_FIT
1801 
1813  #define VMA_BEST_FIT (1)
1814 #endif
1815 
1816 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1817 
1821  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1822 #endif
1823 
1824 #ifndef VMA_DEBUG_ALIGNMENT
1825 
1829  #define VMA_DEBUG_ALIGNMENT (1)
1830 #endif
1831 
1832 #ifndef VMA_DEBUG_MARGIN
1833 
1837  #define VMA_DEBUG_MARGIN (0)
1838 #endif
1839 
1840 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1841 
1845  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1846 #endif
1847 
1848 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1849 
1853  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1854 #endif
1855 
1856 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1857  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
1859 #endif
1860 
1861 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1862  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
1864 #endif
1865 
1866 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1867 
1868 /*******************************************************************************
1869 END OF CONFIGURATION
1870 */
1871 
1872 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1873  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1874 
1875 // Returns number of bits set to 1 in (v).
1876 static inline uint32_t VmaCountBitsSet(uint32_t v)
1877 {
1878  uint32_t c = v - ((v >> 1) & 0x55555555);
1879  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1880  c = ((c >> 4) + c) & 0x0F0F0F0F;
1881  c = ((c >> 8) + c) & 0x00FF00FF;
1882  c = ((c >> 16) + c) & 0x0000FFFF;
1883  return c;
1884 }
1885 
1886 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1887 // Use types like uint32_t, uint64_t as T.
1888 template <typename T>
1889 static inline T VmaAlignUp(T val, T align)
1890 {
1891  return (val + align - 1) / align * align;
1892 }
1893 
1894 // Division with mathematical rounding to nearest number.
1895 template <typename T>
1896 inline T VmaRoundDiv(T x, T y)
1897 {
1898  return (x + (y / (T)2)) / y;
1899 }
1900 
1901 #ifndef VMA_SORT
1902 
1903 template<typename Iterator, typename Compare>
1904 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1905 {
1906  Iterator centerValue = end; --centerValue;
1907  Iterator insertIndex = beg;
1908  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1909  {
1910  if(cmp(*memTypeIndex, *centerValue))
1911  {
1912  if(insertIndex != memTypeIndex)
1913  {
1914  VMA_SWAP(*memTypeIndex, *insertIndex);
1915  }
1916  ++insertIndex;
1917  }
1918  }
1919  if(insertIndex != centerValue)
1920  {
1921  VMA_SWAP(*insertIndex, *centerValue);
1922  }
1923  return insertIndex;
1924 }
1925 
1926 template<typename Iterator, typename Compare>
1927 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1928 {
1929  if(beg < end)
1930  {
1931  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1932  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1933  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1934  }
1935 }
1936 
1937 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1938 
1939 #endif // #ifndef VMA_SORT
1940 
1941 /*
1942 Returns true if two memory blocks occupy overlapping pages.
1943 ResourceA must be in less memory offset than ResourceB.
1944 
1945 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1946 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1947 */
1948 static inline bool VmaBlocksOnSamePage(
1949  VkDeviceSize resourceAOffset,
1950  VkDeviceSize resourceASize,
1951  VkDeviceSize resourceBOffset,
1952  VkDeviceSize pageSize)
1953 {
1954  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1955  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1956  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1957  VkDeviceSize resourceBStart = resourceBOffset;
1958  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1959  return resourceAEndPage == resourceBStartPage;
1960 }
1961 
1962 enum VmaSuballocationType
1963 {
1964  VMA_SUBALLOCATION_TYPE_FREE = 0,
1965  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1966  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1967  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1968  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1969  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1970  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1971 };
1972 
1973 /*
1974 Returns true if given suballocation types could conflict and must respect
1975 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1976 or linear image and another one is optimal image. If type is unknown, behave
1977 conservatively.
1978 */
1979 static inline bool VmaIsBufferImageGranularityConflict(
1980  VmaSuballocationType suballocType1,
1981  VmaSuballocationType suballocType2)
1982 {
1983  if(suballocType1 > suballocType2)
1984  {
1985  VMA_SWAP(suballocType1, suballocType2);
1986  }
1987 
1988  switch(suballocType1)
1989  {
1990  case VMA_SUBALLOCATION_TYPE_FREE:
1991  return false;
1992  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1993  return true;
1994  case VMA_SUBALLOCATION_TYPE_BUFFER:
1995  return
1996  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1997  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1998  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1999  return
2000  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2001  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2002  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2003  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2004  return
2005  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2006  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2007  return false;
2008  default:
2009  VMA_ASSERT(0);
2010  return true;
2011  }
2012 }
2013 
2014 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2015 struct VmaMutexLock
2016 {
2017 public:
2018  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2019  m_pMutex(useMutex ? &mutex : VMA_NULL)
2020  {
2021  if(m_pMutex)
2022  {
2023  m_pMutex->Lock();
2024  }
2025  }
2026 
2027  ~VmaMutexLock()
2028  {
2029  if(m_pMutex)
2030  {
2031  m_pMutex->Unlock();
2032  }
2033  }
2034 
2035 private:
2036  VMA_MUTEX* m_pMutex;
2037 };
2038 
2039 #if VMA_DEBUG_GLOBAL_MUTEX
2040  static VMA_MUTEX gDebugGlobalMutex;
2041  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2042 #else
2043  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2044 #endif
2045 
2046 // Minimum size of a free suballocation to register it in the free suballocation collection.
2047 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2048 
2049 /*
2050 Performs binary search and returns iterator to first element that is greater or
2051 equal to (key), according to comparison (cmp).
2052 
2053 Cmp should return true if first argument is less than second argument.
2054 
2055 Returned value is the found element, if present in the collection or place where
2056 new element with value (key) should be inserted.
2057 */
2058 template <typename IterT, typename KeyT, typename CmpT>
2059 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2060 {
2061  size_t down = 0, up = (end - beg);
2062  while(down < up)
2063  {
2064  const size_t mid = (down + up) / 2;
2065  if(cmp(*(beg+mid), key))
2066  {
2067  down = mid + 1;
2068  }
2069  else
2070  {
2071  up = mid;
2072  }
2073  }
2074  return beg + down;
2075 }
2076 
2078 // Memory allocation
2079 
2080 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2081 {
2082  if((pAllocationCallbacks != VMA_NULL) &&
2083  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2084  {
2085  return (*pAllocationCallbacks->pfnAllocation)(
2086  pAllocationCallbacks->pUserData,
2087  size,
2088  alignment,
2089  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2090  }
2091  else
2092  {
2093  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2094  }
2095 }
2096 
2097 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2098 {
2099  if((pAllocationCallbacks != VMA_NULL) &&
2100  (pAllocationCallbacks->pfnFree != VMA_NULL))
2101  {
2102  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2103  }
2104  else
2105  {
2106  VMA_SYSTEM_FREE(ptr);
2107  }
2108 }
2109 
2110 template<typename T>
2111 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2112 {
2113  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2114 }
2115 
2116 template<typename T>
2117 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2118 {
2119  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2120 }
2121 
2122 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2123 
2124 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2125 
2126 template<typename T>
2127 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2128 {
2129  ptr->~T();
2130  VmaFree(pAllocationCallbacks, ptr);
2131 }
2132 
2133 template<typename T>
2134 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2135 {
2136  if(ptr != VMA_NULL)
2137  {
2138  for(size_t i = count; i--; )
2139  {
2140  ptr[i].~T();
2141  }
2142  VmaFree(pAllocationCallbacks, ptr);
2143  }
2144 }
2145 
2146 // STL-compatible allocator.
2147 template<typename T>
2148 class VmaStlAllocator
2149 {
2150 public:
2151  const VkAllocationCallbacks* const m_pCallbacks;
2152  typedef T value_type;
2153 
2154  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2155  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2156 
2157  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2158  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2159 
2160  template<typename U>
2161  bool operator==(const VmaStlAllocator<U>& rhs) const
2162  {
2163  return m_pCallbacks == rhs.m_pCallbacks;
2164  }
2165  template<typename U>
2166  bool operator!=(const VmaStlAllocator<U>& rhs) const
2167  {
2168  return m_pCallbacks != rhs.m_pCallbacks;
2169  }
2170 
2171  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2172 };
2173 
2174 #if VMA_USE_STL_VECTOR
2175 
2176 #define VmaVector std::vector
2177 
2178 template<typename T, typename allocatorT>
2179 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2180 {
2181  vec.insert(vec.begin() + index, item);
2182 }
2183 
2184 template<typename T, typename allocatorT>
2185 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2186 {
2187  vec.erase(vec.begin() + index);
2188 }
2189 
2190 #else // #if VMA_USE_STL_VECTOR
2191 
2192 /* Class with interface compatible with subset of std::vector.
2193 T must be POD because constructors and destructors are not called and memcpy is
2194 used for these objects. */
2195 template<typename T, typename AllocatorT>
2196 class VmaVector
2197 {
2198 public:
2199  typedef T value_type;
2200 
2201  VmaVector(const AllocatorT& allocator) :
2202  m_Allocator(allocator),
2203  m_pArray(VMA_NULL),
2204  m_Count(0),
2205  m_Capacity(0)
2206  {
2207  }
2208 
2209  VmaVector(size_t count, const AllocatorT& allocator) :
2210  m_Allocator(allocator),
2211  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2212  m_Count(count),
2213  m_Capacity(count)
2214  {
2215  }
2216 
2217  VmaVector(const VmaVector<T, AllocatorT>& src) :
2218  m_Allocator(src.m_Allocator),
2219  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2220  m_Count(src.m_Count),
2221  m_Capacity(src.m_Count)
2222  {
2223  if(m_Count != 0)
2224  {
2225  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2226  }
2227  }
2228 
2229  ~VmaVector()
2230  {
2231  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2232  }
2233 
2234  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2235  {
2236  if(&rhs != this)
2237  {
2238  resize(rhs.m_Count);
2239  if(m_Count != 0)
2240  {
2241  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2242  }
2243  }
2244  return *this;
2245  }
2246 
2247  bool empty() const { return m_Count == 0; }
2248  size_t size() const { return m_Count; }
2249  T* data() { return m_pArray; }
2250  const T* data() const { return m_pArray; }
2251 
2252  T& operator[](size_t index)
2253  {
2254  VMA_HEAVY_ASSERT(index < m_Count);
2255  return m_pArray[index];
2256  }
2257  const T& operator[](size_t index) const
2258  {
2259  VMA_HEAVY_ASSERT(index < m_Count);
2260  return m_pArray[index];
2261  }
2262 
2263  T& front()
2264  {
2265  VMA_HEAVY_ASSERT(m_Count > 0);
2266  return m_pArray[0];
2267  }
2268  const T& front() const
2269  {
2270  VMA_HEAVY_ASSERT(m_Count > 0);
2271  return m_pArray[0];
2272  }
2273  T& back()
2274  {
2275  VMA_HEAVY_ASSERT(m_Count > 0);
2276  return m_pArray[m_Count - 1];
2277  }
2278  const T& back() const
2279  {
2280  VMA_HEAVY_ASSERT(m_Count > 0);
2281  return m_pArray[m_Count - 1];
2282  }
2283 
2284  void reserve(size_t newCapacity, bool freeMemory = false)
2285  {
2286  newCapacity = VMA_MAX(newCapacity, m_Count);
2287 
2288  if((newCapacity < m_Capacity) && !freeMemory)
2289  {
2290  newCapacity = m_Capacity;
2291  }
2292 
2293  if(newCapacity != m_Capacity)
2294  {
2295  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2296  if(m_Count != 0)
2297  {
2298  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2299  }
2300  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2301  m_Capacity = newCapacity;
2302  m_pArray = newArray;
2303  }
2304  }
2305 
2306  void resize(size_t newCount, bool freeMemory = false)
2307  {
2308  size_t newCapacity = m_Capacity;
2309  if(newCount > m_Capacity)
2310  {
2311  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2312  }
2313  else if(freeMemory)
2314  {
2315  newCapacity = newCount;
2316  }
2317 
2318  if(newCapacity != m_Capacity)
2319  {
2320  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2321  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2322  if(elementsToCopy != 0)
2323  {
2324  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2325  }
2326  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2327  m_Capacity = newCapacity;
2328  m_pArray = newArray;
2329  }
2330 
2331  m_Count = newCount;
2332  }
2333 
2334  void clear(bool freeMemory = false)
2335  {
2336  resize(0, freeMemory);
2337  }
2338 
2339  void insert(size_t index, const T& src)
2340  {
2341  VMA_HEAVY_ASSERT(index <= m_Count);
2342  const size_t oldCount = size();
2343  resize(oldCount + 1);
2344  if(index < oldCount)
2345  {
2346  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2347  }
2348  m_pArray[index] = src;
2349  }
2350 
2351  void remove(size_t index)
2352  {
2353  VMA_HEAVY_ASSERT(index < m_Count);
2354  const size_t oldCount = size();
2355  if(index < oldCount - 1)
2356  {
2357  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2358  }
2359  resize(oldCount - 1);
2360  }
2361 
2362  void push_back(const T& src)
2363  {
2364  const size_t newIndex = size();
2365  resize(newIndex + 1);
2366  m_pArray[newIndex] = src;
2367  }
2368 
2369  void pop_back()
2370  {
2371  VMA_HEAVY_ASSERT(m_Count > 0);
2372  resize(size() - 1);
2373  }
2374 
2375  void push_front(const T& src)
2376  {
2377  insert(0, src);
2378  }
2379 
2380  void pop_front()
2381  {
2382  VMA_HEAVY_ASSERT(m_Count > 0);
2383  remove(0);
2384  }
2385 
2386  typedef T* iterator;
2387 
2388  iterator begin() { return m_pArray; }
2389  iterator end() { return m_pArray + m_Count; }
2390 
2391 private:
2392  AllocatorT m_Allocator;
2393  T* m_pArray;
2394  size_t m_Count;
2395  size_t m_Capacity;
2396 };
2397 
2398 template<typename T, typename allocatorT>
2399 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2400 {
2401  vec.insert(index, item);
2402 }
2403 
2404 template<typename T, typename allocatorT>
2405 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2406 {
2407  vec.remove(index);
2408 }
2409 
2410 #endif // #if VMA_USE_STL_VECTOR
2411 
2412 template<typename CmpLess, typename VectorT>
2413 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2414 {
2415  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2416  vector.data(),
2417  vector.data() + vector.size(),
2418  value,
2419  CmpLess()) - vector.data();
2420  VmaVectorInsert(vector, indexToInsert, value);
2421  return indexToInsert;
2422 }
2423 
2424 template<typename CmpLess, typename VectorT>
2425 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2426 {
2427  CmpLess comparator;
2428  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2429  vector.begin(),
2430  vector.end(),
2431  value,
2432  comparator);
2433  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2434  {
2435  size_t indexToRemove = it - vector.begin();
2436  VmaVectorRemove(vector, indexToRemove);
2437  return true;
2438  }
2439  return false;
2440 }
2441 
2442 template<typename CmpLess, typename VectorT>
2443 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2444 {
2445  CmpLess comparator;
2446  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2447  vector.data(),
2448  vector.data() + vector.size(),
2449  value,
2450  comparator);
2451  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2452  {
2453  return it - vector.begin();
2454  }
2455  else
2456  {
2457  return vector.size();
2458  }
2459 }
2460 
2462 // class VmaPoolAllocator
2463 
2464 /*
2465 Allocator for objects of type T using a list of arrays (pools) to speed up
2466 allocation. Number of elements that can be allocated is not bounded because
2467 allocator can create multiple blocks.
2468 */
2469 template<typename T>
2470 class VmaPoolAllocator
2471 {
2472 public:
2473  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2474  ~VmaPoolAllocator();
2475  void Clear();
2476  T* Alloc();
2477  void Free(T* ptr);
2478 
2479 private:
2480  union Item
2481  {
2482  uint32_t NextFreeIndex;
2483  T Value;
2484  };
2485 
2486  struct ItemBlock
2487  {
2488  Item* pItems;
2489  uint32_t FirstFreeIndex;
2490  };
2491 
2492  const VkAllocationCallbacks* m_pAllocationCallbacks;
2493  size_t m_ItemsPerBlock;
2494  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2495 
2496  ItemBlock& CreateNewBlock();
2497 };
2498 
2499 template<typename T>
2500 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2501  m_pAllocationCallbacks(pAllocationCallbacks),
2502  m_ItemsPerBlock(itemsPerBlock),
2503  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2504 {
2505  VMA_ASSERT(itemsPerBlock > 0);
2506 }
2507 
2508 template<typename T>
2509 VmaPoolAllocator<T>::~VmaPoolAllocator()
2510 {
2511  Clear();
2512 }
2513 
2514 template<typename T>
2515 void VmaPoolAllocator<T>::Clear()
2516 {
2517  for(size_t i = m_ItemBlocks.size(); i--; )
2518  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2519  m_ItemBlocks.clear();
2520 }
2521 
2522 template<typename T>
2523 T* VmaPoolAllocator<T>::Alloc()
2524 {
2525  for(size_t i = m_ItemBlocks.size(); i--; )
2526  {
2527  ItemBlock& block = m_ItemBlocks[i];
2528  // This block has some free items: Use first one.
2529  if(block.FirstFreeIndex != UINT32_MAX)
2530  {
2531  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2532  block.FirstFreeIndex = pItem->NextFreeIndex;
2533  return &pItem->Value;
2534  }
2535  }
2536 
2537  // No block has free item: Create new one and use it.
2538  ItemBlock& newBlock = CreateNewBlock();
2539  Item* const pItem = &newBlock.pItems[0];
2540  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2541  return &pItem->Value;
2542 }
2543 
2544 template<typename T>
2545 void VmaPoolAllocator<T>::Free(T* ptr)
2546 {
2547  // Search all memory blocks to find ptr.
2548  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2549  {
2550  ItemBlock& block = m_ItemBlocks[i];
2551 
2552  // Casting to union.
2553  Item* pItemPtr;
2554  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2555 
2556  // Check if pItemPtr is in address range of this block.
2557  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2558  {
2559  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2560  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2561  block.FirstFreeIndex = index;
2562  return;
2563  }
2564  }
2565  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2566 }
2567 
2568 template<typename T>
2569 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2570 {
2571  ItemBlock newBlock = {
2572  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2573 
2574  m_ItemBlocks.push_back(newBlock);
2575 
2576  // Setup singly-linked list of all free items in this block.
2577  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2578  newBlock.pItems[i].NextFreeIndex = i + 1;
2579  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2580  return m_ItemBlocks.back();
2581 }
2582 
2584 // class VmaRawList, VmaList
2585 
2586 #if VMA_USE_STL_LIST
2587 
2588 #define VmaList std::list
2589 
2590 #else // #if VMA_USE_STL_LIST
2591 
2592 template<typename T>
2593 struct VmaListItem
2594 {
2595  VmaListItem* pPrev;
2596  VmaListItem* pNext;
2597  T Value;
2598 };
2599 
2600 // Doubly linked list.
2601 template<typename T>
2602 class VmaRawList
2603 {
2604 public:
2605  typedef VmaListItem<T> ItemType;
2606 
2607  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2608  ~VmaRawList();
2609  void Clear();
2610 
2611  size_t GetCount() const { return m_Count; }
2612  bool IsEmpty() const { return m_Count == 0; }
2613 
2614  ItemType* Front() { return m_pFront; }
2615  const ItemType* Front() const { return m_pFront; }
2616  ItemType* Back() { return m_pBack; }
2617  const ItemType* Back() const { return m_pBack; }
2618 
2619  ItemType* PushBack();
2620  ItemType* PushFront();
2621  ItemType* PushBack(const T& value);
2622  ItemType* PushFront(const T& value);
2623  void PopBack();
2624  void PopFront();
2625 
2626  // Item can be null - it means PushBack.
2627  ItemType* InsertBefore(ItemType* pItem);
2628  // Item can be null - it means PushFront.
2629  ItemType* InsertAfter(ItemType* pItem);
2630 
2631  ItemType* InsertBefore(ItemType* pItem, const T& value);
2632  ItemType* InsertAfter(ItemType* pItem, const T& value);
2633 
2634  void Remove(ItemType* pItem);
2635 
2636 private:
2637  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2638  VmaPoolAllocator<ItemType> m_ItemAllocator;
2639  ItemType* m_pFront;
2640  ItemType* m_pBack;
2641  size_t m_Count;
2642 
2643  // Declared not defined, to block copy constructor and assignment operator.
2644  VmaRawList(const VmaRawList<T>& src);
2645  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2646 };
2647 
2648 template<typename T>
2649 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2650  m_pAllocationCallbacks(pAllocationCallbacks),
2651  m_ItemAllocator(pAllocationCallbacks, 128),
2652  m_pFront(VMA_NULL),
2653  m_pBack(VMA_NULL),
2654  m_Count(0)
2655 {
2656 }
2657 
2658 template<typename T>
2659 VmaRawList<T>::~VmaRawList()
2660 {
2661  // Intentionally not calling Clear, because that would be unnecessary
2662  // computations to return all items to m_ItemAllocator as free.
2663 }
2664 
2665 template<typename T>
2666 void VmaRawList<T>::Clear()
2667 {
2668  if(IsEmpty() == false)
2669  {
2670  ItemType* pItem = m_pBack;
2671  while(pItem != VMA_NULL)
2672  {
2673  ItemType* const pPrevItem = pItem->pPrev;
2674  m_ItemAllocator.Free(pItem);
2675  pItem = pPrevItem;
2676  }
2677  m_pFront = VMA_NULL;
2678  m_pBack = VMA_NULL;
2679  m_Count = 0;
2680  }
2681 }
2682 
2683 template<typename T>
2684 VmaListItem<T>* VmaRawList<T>::PushBack()
2685 {
2686  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2687  pNewItem->pNext = VMA_NULL;
2688  if(IsEmpty())
2689  {
2690  pNewItem->pPrev = VMA_NULL;
2691  m_pFront = pNewItem;
2692  m_pBack = pNewItem;
2693  m_Count = 1;
2694  }
2695  else
2696  {
2697  pNewItem->pPrev = m_pBack;
2698  m_pBack->pNext = pNewItem;
2699  m_pBack = pNewItem;
2700  ++m_Count;
2701  }
2702  return pNewItem;
2703 }
2704 
2705 template<typename T>
2706 VmaListItem<T>* VmaRawList<T>::PushFront()
2707 {
2708  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2709  pNewItem->pPrev = VMA_NULL;
2710  if(IsEmpty())
2711  {
2712  pNewItem->pNext = VMA_NULL;
2713  m_pFront = pNewItem;
2714  m_pBack = pNewItem;
2715  m_Count = 1;
2716  }
2717  else
2718  {
2719  pNewItem->pNext = m_pFront;
2720  m_pFront->pPrev = pNewItem;
2721  m_pFront = pNewItem;
2722  ++m_Count;
2723  }
2724  return pNewItem;
2725 }
2726 
2727 template<typename T>
2728 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2729 {
2730  ItemType* const pNewItem = PushBack();
2731  pNewItem->Value = value;
2732  return pNewItem;
2733 }
2734 
2735 template<typename T>
2736 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2737 {
2738  ItemType* const pNewItem = PushFront();
2739  pNewItem->Value = value;
2740  return pNewItem;
2741 }
2742 
2743 template<typename T>
2744 void VmaRawList<T>::PopBack()
2745 {
2746  VMA_HEAVY_ASSERT(m_Count > 0);
2747  ItemType* const pBackItem = m_pBack;
2748  ItemType* const pPrevItem = pBackItem->pPrev;
2749  if(pPrevItem != VMA_NULL)
2750  {
2751  pPrevItem->pNext = VMA_NULL;
2752  }
2753  m_pBack = pPrevItem;
2754  m_ItemAllocator.Free(pBackItem);
2755  --m_Count;
2756 }
2757 
2758 template<typename T>
2759 void VmaRawList<T>::PopFront()
2760 {
2761  VMA_HEAVY_ASSERT(m_Count > 0);
2762  ItemType* const pFrontItem = m_pFront;
2763  ItemType* const pNextItem = pFrontItem->pNext;
2764  if(pNextItem != VMA_NULL)
2765  {
2766  pNextItem->pPrev = VMA_NULL;
2767  }
2768  m_pFront = pNextItem;
2769  m_ItemAllocator.Free(pFrontItem);
2770  --m_Count;
2771 }
2772 
2773 template<typename T>
2774 void VmaRawList<T>::Remove(ItemType* pItem)
2775 {
2776  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2777  VMA_HEAVY_ASSERT(m_Count > 0);
2778 
2779  if(pItem->pPrev != VMA_NULL)
2780  {
2781  pItem->pPrev->pNext = pItem->pNext;
2782  }
2783  else
2784  {
2785  VMA_HEAVY_ASSERT(m_pFront == pItem);
2786  m_pFront = pItem->pNext;
2787  }
2788 
2789  if(pItem->pNext != VMA_NULL)
2790  {
2791  pItem->pNext->pPrev = pItem->pPrev;
2792  }
2793  else
2794  {
2795  VMA_HEAVY_ASSERT(m_pBack == pItem);
2796  m_pBack = pItem->pPrev;
2797  }
2798 
2799  m_ItemAllocator.Free(pItem);
2800  --m_Count;
2801 }
2802 
2803 template<typename T>
2804 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2805 {
2806  if(pItem != VMA_NULL)
2807  {
2808  ItemType* const prevItem = pItem->pPrev;
2809  ItemType* const newItem = m_ItemAllocator.Alloc();
2810  newItem->pPrev = prevItem;
2811  newItem->pNext = pItem;
2812  pItem->pPrev = newItem;
2813  if(prevItem != VMA_NULL)
2814  {
2815  prevItem->pNext = newItem;
2816  }
2817  else
2818  {
2819  VMA_HEAVY_ASSERT(m_pFront == pItem);
2820  m_pFront = newItem;
2821  }
2822  ++m_Count;
2823  return newItem;
2824  }
2825  else
2826  return PushBack();
2827 }
2828 
2829 template<typename T>
2830 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2831 {
2832  if(pItem != VMA_NULL)
2833  {
2834  ItemType* const nextItem = pItem->pNext;
2835  ItemType* const newItem = m_ItemAllocator.Alloc();
2836  newItem->pNext = nextItem;
2837  newItem->pPrev = pItem;
2838  pItem->pNext = newItem;
2839  if(nextItem != VMA_NULL)
2840  {
2841  nextItem->pPrev = newItem;
2842  }
2843  else
2844  {
2845  VMA_HEAVY_ASSERT(m_pBack == pItem);
2846  m_pBack = newItem;
2847  }
2848  ++m_Count;
2849  return newItem;
2850  }
2851  else
2852  return PushFront();
2853 }
2854 
2855 template<typename T>
2856 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2857 {
2858  ItemType* const newItem = InsertBefore(pItem);
2859  newItem->Value = value;
2860  return newItem;
2861 }
2862 
2863 template<typename T>
2864 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2865 {
2866  ItemType* const newItem = InsertAfter(pItem);
2867  newItem->Value = value;
2868  return newItem;
2869 }
2870 
2871 template<typename T, typename AllocatorT>
2872 class VmaList
2873 {
2874 public:
2875  class iterator
2876  {
2877  public:
2878  iterator() :
2879  m_pList(VMA_NULL),
2880  m_pItem(VMA_NULL)
2881  {
2882  }
2883 
2884  T& operator*() const
2885  {
2886  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2887  return m_pItem->Value;
2888  }
2889  T* operator->() const
2890  {
2891  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2892  return &m_pItem->Value;
2893  }
2894 
2895  iterator& operator++()
2896  {
2897  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2898  m_pItem = m_pItem->pNext;
2899  return *this;
2900  }
2901  iterator& operator--()
2902  {
2903  if(m_pItem != VMA_NULL)
2904  {
2905  m_pItem = m_pItem->pPrev;
2906  }
2907  else
2908  {
2909  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2910  m_pItem = m_pList->Back();
2911  }
2912  return *this;
2913  }
2914 
2915  iterator operator++(int)
2916  {
2917  iterator result = *this;
2918  ++*this;
2919  return result;
2920  }
2921  iterator operator--(int)
2922  {
2923  iterator result = *this;
2924  --*this;
2925  return result;
2926  }
2927 
2928  bool operator==(const iterator& rhs) const
2929  {
2930  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2931  return m_pItem == rhs.m_pItem;
2932  }
2933  bool operator!=(const iterator& rhs) const
2934  {
2935  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2936  return m_pItem != rhs.m_pItem;
2937  }
2938 
2939  private:
2940  VmaRawList<T>* m_pList;
2941  VmaListItem<T>* m_pItem;
2942 
2943  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2944  m_pList(pList),
2945  m_pItem(pItem)
2946  {
2947  }
2948 
2949  friend class VmaList<T, AllocatorT>;
2950  };
2951 
2952  class const_iterator
2953  {
2954  public:
2955  const_iterator() :
2956  m_pList(VMA_NULL),
2957  m_pItem(VMA_NULL)
2958  {
2959  }
2960 
2961  const_iterator(const iterator& src) :
2962  m_pList(src.m_pList),
2963  m_pItem(src.m_pItem)
2964  {
2965  }
2966 
2967  const T& operator*() const
2968  {
2969  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2970  return m_pItem->Value;
2971  }
2972  const T* operator->() const
2973  {
2974  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2975  return &m_pItem->Value;
2976  }
2977 
2978  const_iterator& operator++()
2979  {
2980  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2981  m_pItem = m_pItem->pNext;
2982  return *this;
2983  }
2984  const_iterator& operator--()
2985  {
2986  if(m_pItem != VMA_NULL)
2987  {
2988  m_pItem = m_pItem->pPrev;
2989  }
2990  else
2991  {
2992  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2993  m_pItem = m_pList->Back();
2994  }
2995  return *this;
2996  }
2997 
2998  const_iterator operator++(int)
2999  {
3000  const_iterator result = *this;
3001  ++*this;
3002  return result;
3003  }
3004  const_iterator operator--(int)
3005  {
3006  const_iterator result = *this;
3007  --*this;
3008  return result;
3009  }
3010 
3011  bool operator==(const const_iterator& rhs) const
3012  {
3013  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3014  return m_pItem == rhs.m_pItem;
3015  }
3016  bool operator!=(const const_iterator& rhs) const
3017  {
3018  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3019  return m_pItem != rhs.m_pItem;
3020  }
3021 
3022  private:
3023  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3024  m_pList(pList),
3025  m_pItem(pItem)
3026  {
3027  }
3028 
3029  const VmaRawList<T>* m_pList;
3030  const VmaListItem<T>* m_pItem;
3031 
3032  friend class VmaList<T, AllocatorT>;
3033  };
3034 
3035  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3036 
3037  bool empty() const { return m_RawList.IsEmpty(); }
3038  size_t size() const { return m_RawList.GetCount(); }
3039 
3040  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3041  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3042 
3043  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3044  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3045 
3046  void clear() { m_RawList.Clear(); }
3047  void push_back(const T& value) { m_RawList.PushBack(value); }
3048  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3049  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3050 
3051 private:
3052  VmaRawList<T> m_RawList;
3053 };
3054 
3055 #endif // #if VMA_USE_STL_LIST
3056 
3058 // class VmaMap
3059 
3060 // Unused in this version.
3061 #if 0
3062 
3063 #if VMA_USE_STL_UNORDERED_MAP
3064 
3065 #define VmaPair std::pair
3066 
3067 #define VMA_MAP_TYPE(KeyT, ValueT) \
3068  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3069 
3070 #else // #if VMA_USE_STL_UNORDERED_MAP
3071 
3072 template<typename T1, typename T2>
3073 struct VmaPair
3074 {
3075  T1 first;
3076  T2 second;
3077 
3078  VmaPair() : first(), second() { }
3079  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3080 };
3081 
3082 /* Class compatible with subset of interface of std::unordered_map.
3083 KeyT, ValueT must be POD because they will be stored in VmaVector.
3084 */
3085 template<typename KeyT, typename ValueT>
3086 class VmaMap
3087 {
3088 public:
3089  typedef VmaPair<KeyT, ValueT> PairType;
3090  typedef PairType* iterator;
3091 
3092  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3093 
3094  iterator begin() { return m_Vector.begin(); }
3095  iterator end() { return m_Vector.end(); }
3096 
3097  void insert(const PairType& pair);
3098  iterator find(const KeyT& key);
3099  void erase(iterator it);
3100 
3101 private:
3102  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3103 };
3104 
3105 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3106 
3107 template<typename FirstT, typename SecondT>
3108 struct VmaPairFirstLess
3109 {
3110  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3111  {
3112  return lhs.first < rhs.first;
3113  }
3114  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3115  {
3116  return lhs.first < rhsFirst;
3117  }
3118 };
3119 
3120 template<typename KeyT, typename ValueT>
3121 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3122 {
3123  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3124  m_Vector.data(),
3125  m_Vector.data() + m_Vector.size(),
3126  pair,
3127  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3128  VmaVectorInsert(m_Vector, indexToInsert, pair);
3129 }
3130 
3131 template<typename KeyT, typename ValueT>
3132 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3133 {
3134  PairType* it = VmaBinaryFindFirstNotLess(
3135  m_Vector.data(),
3136  m_Vector.data() + m_Vector.size(),
3137  key,
3138  VmaPairFirstLess<KeyT, ValueT>());
3139  if((it != m_Vector.end()) && (it->first == key))
3140  {
3141  return it;
3142  }
3143  else
3144  {
3145  return m_Vector.end();
3146  }
3147 }
3148 
3149 template<typename KeyT, typename ValueT>
3150 void VmaMap<KeyT, ValueT>::erase(iterator it)
3151 {
3152  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3153 }
3154 
3155 #endif // #if VMA_USE_STL_UNORDERED_MAP
3156 
3157 #endif // #if 0
3158 
3160 
3161 class VmaDeviceMemoryBlock;
3162 
3163 struct VmaAllocation_T
3164 {
3165 private:
3166  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3167 
3168  enum FLAGS
3169  {
3170  FLAG_USER_DATA_STRING = 0x01,
3171  };
3172 
3173 public:
3174  enum ALLOCATION_TYPE
3175  {
3176  ALLOCATION_TYPE_NONE,
3177  ALLOCATION_TYPE_BLOCK,
3178  ALLOCATION_TYPE_DEDICATED,
3179  };
3180 
3181  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3182  m_Alignment(1),
3183  m_Size(0),
3184  m_pUserData(VMA_NULL),
3185  m_LastUseFrameIndex(currentFrameIndex),
3186  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3187  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3188  m_MapCount(0),
3189  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3190  {
3191  }
3192 
3193  ~VmaAllocation_T()
3194  {
3195  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3196 
3197  // Check if owned string was freed.
3198  VMA_ASSERT(m_pUserData == VMA_NULL);
3199  }
3200 
3201  void InitBlockAllocation(
3202  VmaPool hPool,
3203  VmaDeviceMemoryBlock* block,
3204  VkDeviceSize offset,
3205  VkDeviceSize alignment,
3206  VkDeviceSize size,
3207  VmaSuballocationType suballocationType,
3208  bool mapped,
3209  bool canBecomeLost)
3210  {
3211  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3212  VMA_ASSERT(block != VMA_NULL);
3213  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3214  m_Alignment = alignment;
3215  m_Size = size;
3216  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3217  m_SuballocationType = (uint8_t)suballocationType;
3218  m_BlockAllocation.m_hPool = hPool;
3219  m_BlockAllocation.m_Block = block;
3220  m_BlockAllocation.m_Offset = offset;
3221  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3222  }
3223 
3224  void InitLost()
3225  {
3226  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3227  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3228  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3229  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3230  m_BlockAllocation.m_Block = VMA_NULL;
3231  m_BlockAllocation.m_Offset = 0;
3232  m_BlockAllocation.m_CanBecomeLost = true;
3233  }
3234 
3235  void ChangeBlockAllocation(
3236  VmaDeviceMemoryBlock* block,
3237  VkDeviceSize offset)
3238  {
3239  VMA_ASSERT(block != VMA_NULL);
3240  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3241  m_BlockAllocation.m_Block = block;
3242  m_BlockAllocation.m_Offset = offset;
3243  }
3244 
3245  // pMappedData not null means allocation is created with MAPPED flag.
3246  void InitDedicatedAllocation(
3247  uint32_t memoryTypeIndex,
3248  VkDeviceMemory hMemory,
3249  VmaSuballocationType suballocationType,
3250  void* pMappedData,
3251  VkDeviceSize size)
3252  {
3253  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3254  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3255  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3256  m_Alignment = 0;
3257  m_Size = size;
3258  m_SuballocationType = (uint8_t)suballocationType;
3259  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3260  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3261  m_DedicatedAllocation.m_hMemory = hMemory;
3262  m_DedicatedAllocation.m_pMappedData = pMappedData;
3263  }
3264 
3265  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3266  VkDeviceSize GetAlignment() const { return m_Alignment; }
3267  VkDeviceSize GetSize() const { return m_Size; }
3268  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3269  void* GetUserData() const { return m_pUserData; }
3270  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3271  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3272 
3273  VmaDeviceMemoryBlock* GetBlock() const
3274  {
3275  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3276  return m_BlockAllocation.m_Block;
3277  }
3278  VkDeviceSize GetOffset() const;
3279  VkDeviceMemory GetMemory() const;
3280  uint32_t GetMemoryTypeIndex() const;
3281  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3282  void* GetMappedData() const;
3283  bool CanBecomeLost() const;
3284  VmaPool GetPool() const;
3285 
3286  uint32_t GetLastUseFrameIndex() const
3287  {
3288  return m_LastUseFrameIndex.load();
3289  }
3290  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3291  {
3292  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3293  }
3294  /*
3295  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3296  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3297  - Else, returns false.
3298 
3299  If hAllocation is already lost, assert - you should not call it then.
3300  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3301  */
3302  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3303 
3304  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3305  {
3306  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3307  outInfo.blockCount = 1;
3308  outInfo.allocationCount = 1;
3309  outInfo.unusedRangeCount = 0;
3310  outInfo.usedBytes = m_Size;
3311  outInfo.unusedBytes = 0;
3312  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3313  outInfo.unusedRangeSizeMin = UINT64_MAX;
3314  outInfo.unusedRangeSizeMax = 0;
3315  }
3316 
3317  void BlockAllocMap();
3318  void BlockAllocUnmap();
3319  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3320  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3321 
3322 private:
3323  VkDeviceSize m_Alignment;
3324  VkDeviceSize m_Size;
3325  void* m_pUserData;
3326  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3327  uint8_t m_Type; // ALLOCATION_TYPE
3328  uint8_t m_SuballocationType; // VmaSuballocationType
3329  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3330  // Bits with mask 0x7F, used only when ALLOCATION_TYPE_DEDICATED, are reference counter for vmaMapMemory()/vmaUnmapMemory().
3331  uint8_t m_MapCount;
3332  uint8_t m_Flags; // enum FLAGS
3333 
3334  // Allocation out of VmaDeviceMemoryBlock.
3335  struct BlockAllocation
3336  {
3337  VmaPool m_hPool; // Null if belongs to general memory.
3338  VmaDeviceMemoryBlock* m_Block;
3339  VkDeviceSize m_Offset;
3340  bool m_CanBecomeLost;
3341  };
3342 
3343  // Allocation for an object that has its own private VkDeviceMemory.
3344  struct DedicatedAllocation
3345  {
3346  uint32_t m_MemoryTypeIndex;
3347  VkDeviceMemory m_hMemory;
3348  void* m_pMappedData; // Not null means memory is mapped.
3349  };
3350 
3351  union
3352  {
3353  // Allocation out of VmaDeviceMemoryBlock.
3354  BlockAllocation m_BlockAllocation;
3355  // Allocation for an object that has its own private VkDeviceMemory.
3356  DedicatedAllocation m_DedicatedAllocation;
3357  };
3358 
3359  void FreeUserDataString(VmaAllocator hAllocator);
3360 };
3361 
3362 /*
3363 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3364 allocated memory block or free.
3365 */
3366 struct VmaSuballocation
3367 {
3368  VkDeviceSize offset;
3369  VkDeviceSize size;
3370  VmaAllocation hAllocation;
3371  VmaSuballocationType type;
3372 };
3373 
3374 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3375 
3376 // Cost of one additional allocation lost, as equivalent in bytes.
3377 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3378 
3379 /*
3380 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3381 
3382 If canMakeOtherLost was false:
3383 - item points to a FREE suballocation.
3384 - itemsToMakeLostCount is 0.
3385 
3386 If canMakeOtherLost was true:
3387 - item points to first of sequence of suballocations, which are either FREE,
3388  or point to VmaAllocations that can become lost.
3389 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3390  the requested allocation to succeed.
3391 */
3392 struct VmaAllocationRequest
3393 {
3394  VkDeviceSize offset;
3395  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3396  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3397  VmaSuballocationList::iterator item;
3398  size_t itemsToMakeLostCount;
3399 
3400  VkDeviceSize CalcCost() const
3401  {
3402  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3403  }
3404 };
3405 
3406 /*
3407 Data structure used for bookkeeping of allocations and unused ranges of memory
3408 in a single VkDeviceMemory block.
3409 */
3410 class VmaBlockMetadata
3411 {
3412 public:
3413  VmaBlockMetadata(VmaAllocator hAllocator);
3414  ~VmaBlockMetadata();
3415  void Init(VkDeviceSize size);
3416 
3417  // Validates all data structures inside this object. If not valid, returns false.
3418  bool Validate() const;
3419  VkDeviceSize GetSize() const { return m_Size; }
3420  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3421  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3422  VkDeviceSize GetUnusedRangeSizeMax() const;
3423  // Returns true if this block is empty - contains only single free suballocation.
3424  bool IsEmpty() const;
3425 
3426  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3427  void AddPoolStats(VmaPoolStats& inoutStats) const;
3428 
3429 #if VMA_STATS_STRING_ENABLED
3430  void PrintDetailedMap(class VmaJsonWriter& json) const;
3431 #endif
3432 
3433  // Creates trivial request for case when block is empty.
3434  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3435 
3436  // Tries to find a place for suballocation with given parameters inside this block.
3437  // If succeeded, fills pAllocationRequest and returns true.
3438  // If failed, returns false.
3439  bool CreateAllocationRequest(
3440  uint32_t currentFrameIndex,
3441  uint32_t frameInUseCount,
3442  VkDeviceSize bufferImageGranularity,
3443  VkDeviceSize allocSize,
3444  VkDeviceSize allocAlignment,
3445  VmaSuballocationType allocType,
3446  bool canMakeOtherLost,
3447  VmaAllocationRequest* pAllocationRequest);
3448 
3449  bool MakeRequestedAllocationsLost(
3450  uint32_t currentFrameIndex,
3451  uint32_t frameInUseCount,
3452  VmaAllocationRequest* pAllocationRequest);
3453 
3454  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3455 
3456  // Makes actual allocation based on request. Request must already be checked and valid.
3457  void Alloc(
3458  const VmaAllocationRequest& request,
3459  VmaSuballocationType type,
3460  VkDeviceSize allocSize,
3461  VmaAllocation hAllocation);
3462 
3463  // Frees suballocation assigned to given memory region.
3464  void Free(const VmaAllocation allocation);
3465 
3466 private:
3467  VkDeviceSize m_Size;
3468  uint32_t m_FreeCount;
3469  VkDeviceSize m_SumFreeSize;
3470  VmaSuballocationList m_Suballocations;
3471  // Suballocations that are free and have size greater than certain threshold.
3472  // Sorted by size, ascending.
3473  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3474 
3475  bool ValidateFreeSuballocationList() const;
3476 
3477  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3478  // If yes, fills pOffset and returns true. If no, returns false.
3479  bool CheckAllocation(
3480  uint32_t currentFrameIndex,
3481  uint32_t frameInUseCount,
3482  VkDeviceSize bufferImageGranularity,
3483  VkDeviceSize allocSize,
3484  VkDeviceSize allocAlignment,
3485  VmaSuballocationType allocType,
3486  VmaSuballocationList::const_iterator suballocItem,
3487  bool canMakeOtherLost,
3488  VkDeviceSize* pOffset,
3489  size_t* itemsToMakeLostCount,
3490  VkDeviceSize* pSumFreeSize,
3491  VkDeviceSize* pSumItemSize) const;
3492  // Given free suballocation, it merges it with following one, which must also be free.
3493  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3494  // Releases given suballocation, making it free.
3495  // Merges it with adjacent free suballocations if applicable.
3496  // Returns iterator to new free suballocation at this place.
3497  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3498  // Given free suballocation, it inserts it into sorted list of
3499  // m_FreeSuballocationsBySize if it's suitable.
3500  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3501  // Given free suballocation, it removes it from sorted list of
3502  // m_FreeSuballocationsBySize if it's suitable.
3503  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3504 };
3505 
3506 // Helper class that represents mapped memory. Synchronized internally.
3507 class VmaDeviceMemoryMapping
3508 {
3509 public:
3510  VmaDeviceMemoryMapping();
3511  ~VmaDeviceMemoryMapping();
3512 
3513  void* GetMappedData() const { return m_pMappedData; }
3514 
3515  // ppData can be null.
3516  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData);
3517  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory);
3518 
3519 private:
3520  VMA_MUTEX m_Mutex;
3521  uint32_t m_MapCount;
3522  void* m_pMappedData;
3523 };
3524 
3525 /*
3526 Represents a single block of device memory (`VkDeviceMemory`) with all the
3527 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3528 
3529 Thread-safety: This class must be externally synchronized.
3530 */
3531 class VmaDeviceMemoryBlock
3532 {
3533 public:
3534  uint32_t m_MemoryTypeIndex;
3535  VkDeviceMemory m_hMemory;
3536  VmaDeviceMemoryMapping m_Mapping;
3537  VmaBlockMetadata m_Metadata;
3538 
3539  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3540 
3541  ~VmaDeviceMemoryBlock()
3542  {
3543  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3544  }
3545 
3546  // Always call after construction.
3547  void Init(
3548  uint32_t newMemoryTypeIndex,
3549  VkDeviceMemory newMemory,
3550  VkDeviceSize newSize);
3551  // Always call before destruction.
3552  void Destroy(VmaAllocator allocator);
3553 
3554  // Validates all data structures inside this object. If not valid, returns false.
3555  bool Validate() const;
3556 
3557  // ppData can be null.
3558  VkResult Map(VmaAllocator hAllocator, void** ppData);
3559  void Unmap(VmaAllocator hAllocator);
3560 };
3561 
3562 struct VmaPointerLess
3563 {
3564  bool operator()(const void* lhs, const void* rhs) const
3565  {
3566  return lhs < rhs;
3567  }
3568 };
3569 
3570 class VmaDefragmentator;
3571 
3572 /*
3573 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3574 Vulkan memory type.
3575 
3576 Synchronized internally with a mutex.
3577 */
3578 struct VmaBlockVector
3579 {
3580  VmaBlockVector(
3581  VmaAllocator hAllocator,
3582  uint32_t memoryTypeIndex,
3583  VkDeviceSize preferredBlockSize,
3584  size_t minBlockCount,
3585  size_t maxBlockCount,
3586  VkDeviceSize bufferImageGranularity,
3587  uint32_t frameInUseCount,
3588  bool isCustomPool);
3589  ~VmaBlockVector();
3590 
3591  VkResult CreateMinBlocks();
3592 
3593  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3594  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3595  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3596  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3597 
3598  void GetPoolStats(VmaPoolStats* pStats);
3599 
3600  bool IsEmpty() const { return m_Blocks.empty(); }
3601 
3602  VkResult Allocate(
3603  VmaPool hCurrentPool,
3604  uint32_t currentFrameIndex,
3605  const VkMemoryRequirements& vkMemReq,
3606  const VmaAllocationCreateInfo& createInfo,
3607  VmaSuballocationType suballocType,
3608  VmaAllocation* pAllocation);
3609 
3610  void Free(
3611  VmaAllocation hAllocation);
3612 
3613  // Adds statistics of this BlockVector to pStats.
3614  void AddStats(VmaStats* pStats);
3615 
3616 #if VMA_STATS_STRING_ENABLED
3617  void PrintDetailedMap(class VmaJsonWriter& json);
3618 #endif
3619 
3620  void MakePoolAllocationsLost(
3621  uint32_t currentFrameIndex,
3622  size_t* pLostAllocationCount);
3623 
3624  VmaDefragmentator* EnsureDefragmentator(
3625  VmaAllocator hAllocator,
3626  uint32_t currentFrameIndex);
3627 
3628  VkResult Defragment(
3629  VmaDefragmentationStats* pDefragmentationStats,
3630  VkDeviceSize& maxBytesToMove,
3631  uint32_t& maxAllocationsToMove);
3632 
3633  void DestroyDefragmentator();
3634 
3635 private:
3636  friend class VmaDefragmentator;
3637 
3638  const VmaAllocator m_hAllocator;
3639  const uint32_t m_MemoryTypeIndex;
3640  const VkDeviceSize m_PreferredBlockSize;
3641  const size_t m_MinBlockCount;
3642  const size_t m_MaxBlockCount;
3643  const VkDeviceSize m_BufferImageGranularity;
3644  const uint32_t m_FrameInUseCount;
3645  const bool m_IsCustomPool;
3646  VMA_MUTEX m_Mutex;
3647  // Incrementally sorted by sumFreeSize, ascending.
3648  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3649  /* There can be at most one allocation that is completely empty - a
3650  hysteresis to avoid pessimistic case of alternating creation and destruction
3651  of a VkDeviceMemory. */
3652  bool m_HasEmptyBlock;
3653  VmaDefragmentator* m_pDefragmentator;
3654 
3655  size_t CalcMaxBlockSize() const;
3656 
3657  // Finds and removes given block from vector.
3658  void Remove(VmaDeviceMemoryBlock* pBlock);
3659 
3660  // Performs single step in sorting m_Blocks. They may not be fully sorted
3661  // after this call.
3662  void IncrementallySortBlocks();
3663 
3664  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3665 };
3666 
3667 struct VmaPool_T
3668 {
3669 public:
3670  VmaBlockVector m_BlockVector;
3671 
3672  // Takes ownership.
3673  VmaPool_T(
3674  VmaAllocator hAllocator,
3675  const VmaPoolCreateInfo& createInfo);
3676  ~VmaPool_T();
3677 
3678  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3679 
3680 #if VMA_STATS_STRING_ENABLED
3681  //void PrintDetailedMap(class VmaStringBuilder& sb);
3682 #endif
3683 };
3684 
3685 class VmaDefragmentator
3686 {
3687  const VmaAllocator m_hAllocator;
3688  VmaBlockVector* const m_pBlockVector;
3689  uint32_t m_CurrentFrameIndex;
3690  VkDeviceSize m_BytesMoved;
3691  uint32_t m_AllocationsMoved;
3692 
3693  struct AllocationInfo
3694  {
3695  VmaAllocation m_hAllocation;
3696  VkBool32* m_pChanged;
3697 
3698  AllocationInfo() :
3699  m_hAllocation(VK_NULL_HANDLE),
3700  m_pChanged(VMA_NULL)
3701  {
3702  }
3703  };
3704 
3705  struct AllocationInfoSizeGreater
3706  {
3707  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3708  {
3709  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3710  }
3711  };
3712 
3713  // Used between AddAllocation and Defragment.
3714  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3715 
3716  struct BlockInfo
3717  {
3718  VmaDeviceMemoryBlock* m_pBlock;
3719  bool m_HasNonMovableAllocations;
3720  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3721 
3722  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3723  m_pBlock(VMA_NULL),
3724  m_HasNonMovableAllocations(true),
3725  m_Allocations(pAllocationCallbacks),
3726  m_pMappedDataForDefragmentation(VMA_NULL)
3727  {
3728  }
3729 
3730  void CalcHasNonMovableAllocations()
3731  {
3732  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3733  const size_t defragmentAllocCount = m_Allocations.size();
3734  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3735  }
3736 
3737  void SortAllocationsBySizeDescecnding()
3738  {
3739  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3740  }
3741 
3742  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3743  void Unmap(VmaAllocator hAllocator);
3744 
3745  private:
3746  // Not null if mapped for defragmentation only, not originally mapped.
3747  void* m_pMappedDataForDefragmentation;
3748  };
3749 
3750  struct BlockPointerLess
3751  {
3752  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3753  {
3754  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3755  }
3756  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3757  {
3758  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3759  }
3760  };
3761 
3762  // 1. Blocks with some non-movable allocations go first.
3763  // 2. Blocks with smaller sumFreeSize go first.
3764  struct BlockInfoCompareMoveDestination
3765  {
3766  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3767  {
3768  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3769  {
3770  return true;
3771  }
3772  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3773  {
3774  return false;
3775  }
3776  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3777  {
3778  return true;
3779  }
3780  return false;
3781  }
3782  };
3783 
3784  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3785  BlockInfoVector m_Blocks;
3786 
3787  VkResult DefragmentRound(
3788  VkDeviceSize maxBytesToMove,
3789  uint32_t maxAllocationsToMove);
3790 
3791  static bool MoveMakesSense(
3792  size_t dstBlockIndex, VkDeviceSize dstOffset,
3793  size_t srcBlockIndex, VkDeviceSize srcOffset);
3794 
3795 public:
3796  VmaDefragmentator(
3797  VmaAllocator hAllocator,
3798  VmaBlockVector* pBlockVector,
3799  uint32_t currentFrameIndex);
3800 
3801  ~VmaDefragmentator();
3802 
3803  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3804  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3805 
3806  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3807 
3808  VkResult Defragment(
3809  VkDeviceSize maxBytesToMove,
3810  uint32_t maxAllocationsToMove);
3811 };
3812 
3813 // Main allocator object.
3814 struct VmaAllocator_T
3815 {
3816  bool m_UseMutex;
3817  bool m_UseKhrDedicatedAllocation;
3818  VkDevice m_hDevice;
3819  bool m_AllocationCallbacksSpecified;
3820  VkAllocationCallbacks m_AllocationCallbacks;
3821  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3822 
3823  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3824  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3825  VMA_MUTEX m_HeapSizeLimitMutex;
3826 
3827  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3828  VkPhysicalDeviceMemoryProperties m_MemProps;
3829 
3830  // Default pools.
3831  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3832 
3833  // Each vector is sorted by memory (handle value).
3834  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3835  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
3836  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3837 
3838  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3839  ~VmaAllocator_T();
3840 
3841  const VkAllocationCallbacks* GetAllocationCallbacks() const
3842  {
3843  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3844  }
3845  const VmaVulkanFunctions& GetVulkanFunctions() const
3846  {
3847  return m_VulkanFunctions;
3848  }
3849 
3850  VkDeviceSize GetBufferImageGranularity() const
3851  {
3852  return VMA_MAX(
3853  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3854  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3855  }
3856 
3857  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3858  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3859 
3860  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3861  {
3862  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3863  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3864  }
3865 
3866  void GetBufferMemoryRequirements(
3867  VkBuffer hBuffer,
3868  VkMemoryRequirements& memReq,
3869  bool& requiresDedicatedAllocation,
3870  bool& prefersDedicatedAllocation) const;
3871  void GetImageMemoryRequirements(
3872  VkImage hImage,
3873  VkMemoryRequirements& memReq,
3874  bool& requiresDedicatedAllocation,
3875  bool& prefersDedicatedAllocation) const;
3876 
3877  // Main allocation function.
3878  VkResult AllocateMemory(
3879  const VkMemoryRequirements& vkMemReq,
3880  bool requiresDedicatedAllocation,
3881  bool prefersDedicatedAllocation,
3882  VkBuffer dedicatedBuffer,
3883  VkImage dedicatedImage,
3884  const VmaAllocationCreateInfo& createInfo,
3885  VmaSuballocationType suballocType,
3886  VmaAllocation* pAllocation);
3887 
3888  // Main deallocation function.
3889  void FreeMemory(const VmaAllocation allocation);
3890 
3891  void CalculateStats(VmaStats* pStats);
3892 
3893 #if VMA_STATS_STRING_ENABLED
3894  void PrintDetailedMap(class VmaJsonWriter& json);
3895 #endif
3896 
3897  VkResult Defragment(
3898  VmaAllocation* pAllocations,
3899  size_t allocationCount,
3900  VkBool32* pAllocationsChanged,
3901  const VmaDefragmentationInfo* pDefragmentationInfo,
3902  VmaDefragmentationStats* pDefragmentationStats);
3903 
3904  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3905 
3906  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3907  void DestroyPool(VmaPool pool);
3908  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3909 
3910  void SetCurrentFrameIndex(uint32_t frameIndex);
3911 
3912  void MakePoolAllocationsLost(
3913  VmaPool hPool,
3914  size_t* pLostAllocationCount);
3915 
3916  void CreateLostAllocation(VmaAllocation* pAllocation);
3917 
3918  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3919  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3920 
3921  VkResult Map(VmaAllocation hAllocation, void** ppData);
3922  void Unmap(VmaAllocation hAllocation);
3923 
3924 private:
3925  VkDeviceSize m_PreferredLargeHeapBlockSize;
3926 
3927  VkPhysicalDevice m_PhysicalDevice;
3928  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3929 
3930  VMA_MUTEX m_PoolsMutex;
3931  // Protected by m_PoolsMutex. Sorted by pointer value.
3932  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3933 
3934  VmaVulkanFunctions m_VulkanFunctions;
3935 
3936  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3937 
3938  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3939 
3940  VkResult AllocateMemoryOfType(
3941  const VkMemoryRequirements& vkMemReq,
3942  bool dedicatedAllocation,
3943  VkBuffer dedicatedBuffer,
3944  VkImage dedicatedImage,
3945  const VmaAllocationCreateInfo& createInfo,
3946  uint32_t memTypeIndex,
3947  VmaSuballocationType suballocType,
3948  VmaAllocation* pAllocation);
3949 
3950  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3951  VkResult AllocateDedicatedMemory(
3952  VkDeviceSize size,
3953  VmaSuballocationType suballocType,
3954  uint32_t memTypeIndex,
3955  bool map,
3956  bool isUserDataString,
3957  void* pUserData,
3958  VkBuffer dedicatedBuffer,
3959  VkImage dedicatedImage,
3960  VmaAllocation* pAllocation);
3961 
3962  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3963  void FreeDedicatedMemory(VmaAllocation allocation);
3964 };
3965 
3967 // Memory allocation #2 after VmaAllocator_T definition
3968 
3969 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3970 {
3971  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3972 }
3973 
3974 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3975 {
3976  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3977 }
3978 
3979 template<typename T>
3980 static T* VmaAllocate(VmaAllocator hAllocator)
3981 {
3982  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3983 }
3984 
3985 template<typename T>
3986 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3987 {
3988  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3989 }
3990 
3991 template<typename T>
3992 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3993 {
3994  if(ptr != VMA_NULL)
3995  {
3996  ptr->~T();
3997  VmaFree(hAllocator, ptr);
3998  }
3999 }
4000 
4001 template<typename T>
4002 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4003 {
4004  if(ptr != VMA_NULL)
4005  {
4006  for(size_t i = count; i--; )
4007  ptr[i].~T();
4008  VmaFree(hAllocator, ptr);
4009  }
4010 }
4011 
4013 // VmaStringBuilder
4014 
4015 #if VMA_STATS_STRING_ENABLED
4016 
4017 class VmaStringBuilder
4018 {
4019 public:
4020  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4021  size_t GetLength() const { return m_Data.size(); }
4022  const char* GetData() const { return m_Data.data(); }
4023 
4024  void Add(char ch) { m_Data.push_back(ch); }
4025  void Add(const char* pStr);
4026  void AddNewLine() { Add('\n'); }
4027  void AddNumber(uint32_t num);
4028  void AddNumber(uint64_t num);
4029  void AddPointer(const void* ptr);
4030 
4031 private:
4032  VmaVector< char, VmaStlAllocator<char> > m_Data;
4033 };
4034 
4035 void VmaStringBuilder::Add(const char* pStr)
4036 {
4037  const size_t strLen = strlen(pStr);
4038  if(strLen > 0)
4039  {
4040  const size_t oldCount = m_Data.size();
4041  m_Data.resize(oldCount + strLen);
4042  memcpy(m_Data.data() + oldCount, pStr, strLen);
4043  }
4044 }
4045 
4046 void VmaStringBuilder::AddNumber(uint32_t num)
4047 {
4048  char buf[11];
4049  VmaUint32ToStr(buf, sizeof(buf), num);
4050  Add(buf);
4051 }
4052 
4053 void VmaStringBuilder::AddNumber(uint64_t num)
4054 {
4055  char buf[21];
4056  VmaUint64ToStr(buf, sizeof(buf), num);
4057  Add(buf);
4058 }
4059 
4060 void VmaStringBuilder::AddPointer(const void* ptr)
4061 {
4062  char buf[21];
4063  VmaPtrToStr(buf, sizeof(buf), ptr);
4064  Add(buf);
4065 }
4066 
4067 #endif // #if VMA_STATS_STRING_ENABLED
4068 
4070 // VmaJsonWriter
4071 
4072 #if VMA_STATS_STRING_ENABLED
4073 
4074 class VmaJsonWriter
4075 {
4076 public:
4077  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4078  ~VmaJsonWriter();
4079 
4080  void BeginObject(bool singleLine = false);
4081  void EndObject();
4082 
4083  void BeginArray(bool singleLine = false);
4084  void EndArray();
4085 
4086  void WriteString(const char* pStr);
4087  void BeginString(const char* pStr = VMA_NULL);
4088  void ContinueString(const char* pStr);
4089  void ContinueString(uint32_t n);
4090  void ContinueString(uint64_t n);
4091  void ContinueString_Pointer(const void* ptr);
4092  void EndString(const char* pStr = VMA_NULL);
4093 
4094  void WriteNumber(uint32_t n);
4095  void WriteNumber(uint64_t n);
4096  void WriteBool(bool b);
4097  void WriteNull();
4098 
4099 private:
4100  static const char* const INDENT;
4101 
4102  enum COLLECTION_TYPE
4103  {
4104  COLLECTION_TYPE_OBJECT,
4105  COLLECTION_TYPE_ARRAY,
4106  };
4107  struct StackItem
4108  {
4109  COLLECTION_TYPE type;
4110  uint32_t valueCount;
4111  bool singleLineMode;
4112  };
4113 
4114  VmaStringBuilder& m_SB;
4115  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4116  bool m_InsideString;
4117 
4118  void BeginValue(bool isString);
4119  void WriteIndent(bool oneLess = false);
4120 };
4121 
4122 const char* const VmaJsonWriter::INDENT = " ";
4123 
4124 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4125  m_SB(sb),
4126  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4127  m_InsideString(false)
4128 {
4129 }
4130 
4131 VmaJsonWriter::~VmaJsonWriter()
4132 {
4133  VMA_ASSERT(!m_InsideString);
4134  VMA_ASSERT(m_Stack.empty());
4135 }
4136 
4137 void VmaJsonWriter::BeginObject(bool singleLine)
4138 {
4139  VMA_ASSERT(!m_InsideString);
4140 
4141  BeginValue(false);
4142  m_SB.Add('{');
4143 
4144  StackItem item;
4145  item.type = COLLECTION_TYPE_OBJECT;
4146  item.valueCount = 0;
4147  item.singleLineMode = singleLine;
4148  m_Stack.push_back(item);
4149 }
4150 
4151 void VmaJsonWriter::EndObject()
4152 {
4153  VMA_ASSERT(!m_InsideString);
4154 
4155  WriteIndent(true);
4156  m_SB.Add('}');
4157 
4158  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4159  m_Stack.pop_back();
4160 }
4161 
4162 void VmaJsonWriter::BeginArray(bool singleLine)
4163 {
4164  VMA_ASSERT(!m_InsideString);
4165 
4166  BeginValue(false);
4167  m_SB.Add('[');
4168 
4169  StackItem item;
4170  item.type = COLLECTION_TYPE_ARRAY;
4171  item.valueCount = 0;
4172  item.singleLineMode = singleLine;
4173  m_Stack.push_back(item);
4174 }
4175 
4176 void VmaJsonWriter::EndArray()
4177 {
4178  VMA_ASSERT(!m_InsideString);
4179 
4180  WriteIndent(true);
4181  m_SB.Add(']');
4182 
4183  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4184  m_Stack.pop_back();
4185 }
4186 
4187 void VmaJsonWriter::WriteString(const char* pStr)
4188 {
4189  BeginString(pStr);
4190  EndString();
4191 }
4192 
4193 void VmaJsonWriter::BeginString(const char* pStr)
4194 {
4195  VMA_ASSERT(!m_InsideString);
4196 
4197  BeginValue(true);
4198  m_SB.Add('"');
4199  m_InsideString = true;
4200  if(pStr != VMA_NULL && pStr[0] != '\0')
4201  {
4202  ContinueString(pStr);
4203  }
4204 }
4205 
4206 void VmaJsonWriter::ContinueString(const char* pStr)
4207 {
4208  VMA_ASSERT(m_InsideString);
4209 
4210  const size_t strLen = strlen(pStr);
4211  for(size_t i = 0; i < strLen; ++i)
4212  {
4213  char ch = pStr[i];
4214  if(ch == '\'')
4215  {
4216  m_SB.Add("\\\\");
4217  }
4218  else if(ch == '"')
4219  {
4220  m_SB.Add("\\\"");
4221  }
4222  else if(ch >= 32)
4223  {
4224  m_SB.Add(ch);
4225  }
4226  else switch(ch)
4227  {
4228  case '\b':
4229  m_SB.Add("\\b");
4230  break;
4231  case '\f':
4232  m_SB.Add("\\f");
4233  break;
4234  case '\n':
4235  m_SB.Add("\\n");
4236  break;
4237  case '\r':
4238  m_SB.Add("\\r");
4239  break;
4240  case '\t':
4241  m_SB.Add("\\t");
4242  break;
4243  default:
4244  VMA_ASSERT(0 && "Character not currently supported.");
4245  break;
4246  }
4247  }
4248 }
4249 
4250 void VmaJsonWriter::ContinueString(uint32_t n)
4251 {
4252  VMA_ASSERT(m_InsideString);
4253  m_SB.AddNumber(n);
4254 }
4255 
4256 void VmaJsonWriter::ContinueString(uint64_t n)
4257 {
4258  VMA_ASSERT(m_InsideString);
4259  m_SB.AddNumber(n);
4260 }
4261 
4262 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4263 {
4264  VMA_ASSERT(m_InsideString);
4265  m_SB.AddPointer(ptr);
4266 }
4267 
4268 void VmaJsonWriter::EndString(const char* pStr)
4269 {
4270  VMA_ASSERT(m_InsideString);
4271  if(pStr != VMA_NULL && pStr[0] != '\0')
4272  {
4273  ContinueString(pStr);
4274  }
4275  m_SB.Add('"');
4276  m_InsideString = false;
4277 }
4278 
4279 void VmaJsonWriter::WriteNumber(uint32_t n)
4280 {
4281  VMA_ASSERT(!m_InsideString);
4282  BeginValue(false);
4283  m_SB.AddNumber(n);
4284 }
4285 
4286 void VmaJsonWriter::WriteNumber(uint64_t n)
4287 {
4288  VMA_ASSERT(!m_InsideString);
4289  BeginValue(false);
4290  m_SB.AddNumber(n);
4291 }
4292 
4293 void VmaJsonWriter::WriteBool(bool b)
4294 {
4295  VMA_ASSERT(!m_InsideString);
4296  BeginValue(false);
4297  m_SB.Add(b ? "true" : "false");
4298 }
4299 
4300 void VmaJsonWriter::WriteNull()
4301 {
4302  VMA_ASSERT(!m_InsideString);
4303  BeginValue(false);
4304  m_SB.Add("null");
4305 }
4306 
4307 void VmaJsonWriter::BeginValue(bool isString)
4308 {
4309  if(!m_Stack.empty())
4310  {
4311  StackItem& currItem = m_Stack.back();
4312  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4313  currItem.valueCount % 2 == 0)
4314  {
4315  VMA_ASSERT(isString);
4316  }
4317 
4318  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4319  currItem.valueCount % 2 != 0)
4320  {
4321  m_SB.Add(": ");
4322  }
4323  else if(currItem.valueCount > 0)
4324  {
4325  m_SB.Add(", ");
4326  WriteIndent();
4327  }
4328  else
4329  {
4330  WriteIndent();
4331  }
4332  ++currItem.valueCount;
4333  }
4334 }
4335 
4336 void VmaJsonWriter::WriteIndent(bool oneLess)
4337 {
4338  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4339  {
4340  m_SB.AddNewLine();
4341 
4342  size_t count = m_Stack.size();
4343  if(count > 0 && oneLess)
4344  {
4345  --count;
4346  }
4347  for(size_t i = 0; i < count; ++i)
4348  {
4349  m_SB.Add(INDENT);
4350  }
4351  }
4352 }
4353 
4354 #endif // #if VMA_STATS_STRING_ENABLED
4355 
4357 
4358 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4359 {
4360  if(IsUserDataString())
4361  {
4362  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4363 
4364  FreeUserDataString(hAllocator);
4365 
4366  if(pUserData != VMA_NULL)
4367  {
4368  const char* const newStrSrc = (char*)pUserData;
4369  const size_t newStrLen = strlen(newStrSrc);
4370  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4371  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4372  m_pUserData = newStrDst;
4373  }
4374  }
4375  else
4376  {
4377  m_pUserData = pUserData;
4378  }
4379 }
4380 
4381 VkDeviceSize VmaAllocation_T::GetOffset() const
4382 {
4383  switch(m_Type)
4384  {
4385  case ALLOCATION_TYPE_BLOCK:
4386  return m_BlockAllocation.m_Offset;
4387  case ALLOCATION_TYPE_DEDICATED:
4388  return 0;
4389  default:
4390  VMA_ASSERT(0);
4391  return 0;
4392  }
4393 }
4394 
4395 VkDeviceMemory VmaAllocation_T::GetMemory() const
4396 {
4397  switch(m_Type)
4398  {
4399  case ALLOCATION_TYPE_BLOCK:
4400  return m_BlockAllocation.m_Block->m_hMemory;
4401  case ALLOCATION_TYPE_DEDICATED:
4402  return m_DedicatedAllocation.m_hMemory;
4403  default:
4404  VMA_ASSERT(0);
4405  return VK_NULL_HANDLE;
4406  }
4407 }
4408 
4409 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4410 {
4411  switch(m_Type)
4412  {
4413  case ALLOCATION_TYPE_BLOCK:
4414  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4415  case ALLOCATION_TYPE_DEDICATED:
4416  return m_DedicatedAllocation.m_MemoryTypeIndex;
4417  default:
4418  VMA_ASSERT(0);
4419  return UINT32_MAX;
4420  }
4421 }
4422 
4423 void* VmaAllocation_T::GetMappedData() const
4424 {
4425  switch(m_Type)
4426  {
4427  case ALLOCATION_TYPE_BLOCK:
4428  if(m_MapCount != 0)
4429  {
4430  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4431  VMA_ASSERT(pBlockData != VMA_NULL);
4432  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4433  }
4434  else
4435  {
4436  return VMA_NULL;
4437  }
4438  break;
4439  case ALLOCATION_TYPE_DEDICATED:
4440  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4441  return m_DedicatedAllocation.m_pMappedData;
4442  default:
4443  VMA_ASSERT(0);
4444  return VMA_NULL;
4445  }
4446 }
4447 
4448 bool VmaAllocation_T::CanBecomeLost() const
4449 {
4450  switch(m_Type)
4451  {
4452  case ALLOCATION_TYPE_BLOCK:
4453  return m_BlockAllocation.m_CanBecomeLost;
4454  case ALLOCATION_TYPE_DEDICATED:
4455  return false;
4456  default:
4457  VMA_ASSERT(0);
4458  return false;
4459  }
4460 }
4461 
4462 VmaPool VmaAllocation_T::GetPool() const
4463 {
4464  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4465  return m_BlockAllocation.m_hPool;
4466 }
4467 
4468 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4469 {
4470  VMA_ASSERT(CanBecomeLost());
4471 
4472  /*
4473  Warning: This is a carefully designed algorithm.
4474  Do not modify unless you really know what you're doing :)
4475  */
4476  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4477  for(;;)
4478  {
4479  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4480  {
4481  VMA_ASSERT(0);
4482  return false;
4483  }
4484  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4485  {
4486  return false;
4487  }
4488  else // Last use time earlier than current time.
4489  {
4490  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4491  {
4492  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4493  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4494  return true;
4495  }
4496  }
4497  }
4498 }
4499 
4500 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4501 {
4502  VMA_ASSERT(IsUserDataString());
4503  if(m_pUserData != VMA_NULL)
4504  {
4505  char* const oldStr = (char*)m_pUserData;
4506  const size_t oldStrLen = strlen(oldStr);
4507  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4508  m_pUserData = VMA_NULL;
4509  }
4510 }
4511 
4512 void VmaAllocation_T::BlockAllocMap()
4513 {
4514  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4515 
4516  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4517  {
4518  ++m_MapCount;
4519  }
4520  else
4521  {
4522  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4523  }
4524 }
4525 
4526 void VmaAllocation_T::BlockAllocUnmap()
4527 {
4528  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4529 
4530  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4531  {
4532  --m_MapCount;
4533  }
4534  else
4535  {
4536  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4537  }
4538 }
4539 
4540 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4541 {
4542  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4543 
4544  if(m_MapCount != 0)
4545  {
4546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4547  {
4548  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4549  *ppData = m_DedicatedAllocation.m_pMappedData;
4550  ++m_MapCount;
4551  return VK_SUCCESS;
4552  }
4553  else
4554  {
4555  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4556  return VK_ERROR_MEMORY_MAP_FAILED;
4557  }
4558  }
4559  else
4560  {
4561  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4562  hAllocator->m_hDevice,
4563  m_DedicatedAllocation.m_hMemory,
4564  0, // offset
4565  VK_WHOLE_SIZE,
4566  0, // flags
4567  ppData);
4568  if(result == VK_SUCCESS)
4569  {
4570  m_DedicatedAllocation.m_pMappedData = *ppData;
4571  m_MapCount = 1;
4572  }
4573  return result;
4574  }
4575 }
4576 
4577 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4578 {
4579  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4580 
4581  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4582  {
4583  --m_MapCount;
4584  if(m_MapCount == 0)
4585  {
4586  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4587  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4588  hAllocator->m_hDevice,
4589  m_DedicatedAllocation.m_hMemory);
4590  }
4591  }
4592  else
4593  {
4594  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4595  }
4596 }
4597 
4598 #if VMA_STATS_STRING_ENABLED
4599 
4600 // Correspond to values of enum VmaSuballocationType.
4601 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4602  "FREE",
4603  "UNKNOWN",
4604  "BUFFER",
4605  "IMAGE_UNKNOWN",
4606  "IMAGE_LINEAR",
4607  "IMAGE_OPTIMAL",
4608 };
4609 
4610 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4611 {
4612  json.BeginObject();
4613 
4614  json.WriteString("Blocks");
4615  json.WriteNumber(stat.blockCount);
4616 
4617  json.WriteString("Allocations");
4618  json.WriteNumber(stat.allocationCount);
4619 
4620  json.WriteString("UnusedRanges");
4621  json.WriteNumber(stat.unusedRangeCount);
4622 
4623  json.WriteString("UsedBytes");
4624  json.WriteNumber(stat.usedBytes);
4625 
4626  json.WriteString("UnusedBytes");
4627  json.WriteNumber(stat.unusedBytes);
4628 
4629  if(stat.allocationCount > 1)
4630  {
4631  json.WriteString("AllocationSize");
4632  json.BeginObject(true);
4633  json.WriteString("Min");
4634  json.WriteNumber(stat.allocationSizeMin);
4635  json.WriteString("Avg");
4636  json.WriteNumber(stat.allocationSizeAvg);
4637  json.WriteString("Max");
4638  json.WriteNumber(stat.allocationSizeMax);
4639  json.EndObject();
4640  }
4641 
4642  if(stat.unusedRangeCount > 1)
4643  {
4644  json.WriteString("UnusedRangeSize");
4645  json.BeginObject(true);
4646  json.WriteString("Min");
4647  json.WriteNumber(stat.unusedRangeSizeMin);
4648  json.WriteString("Avg");
4649  json.WriteNumber(stat.unusedRangeSizeAvg);
4650  json.WriteString("Max");
4651  json.WriteNumber(stat.unusedRangeSizeMax);
4652  json.EndObject();
4653  }
4654 
4655  json.EndObject();
4656 }
4657 
4658 #endif // #if VMA_STATS_STRING_ENABLED
4659 
4660 struct VmaSuballocationItemSizeLess
4661 {
4662  bool operator()(
4663  const VmaSuballocationList::iterator lhs,
4664  const VmaSuballocationList::iterator rhs) const
4665  {
4666  return lhs->size < rhs->size;
4667  }
4668  bool operator()(
4669  const VmaSuballocationList::iterator lhs,
4670  VkDeviceSize rhsSize) const
4671  {
4672  return lhs->size < rhsSize;
4673  }
4674 };
4675 
4677 // class VmaBlockMetadata
4678 
4679 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4680  m_Size(0),
4681  m_FreeCount(0),
4682  m_SumFreeSize(0),
4683  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4684  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4685 {
4686 }
4687 
4688 VmaBlockMetadata::~VmaBlockMetadata()
4689 {
4690 }
4691 
4692 void VmaBlockMetadata::Init(VkDeviceSize size)
4693 {
4694  m_Size = size;
4695  m_FreeCount = 1;
4696  m_SumFreeSize = size;
4697 
4698  VmaSuballocation suballoc = {};
4699  suballoc.offset = 0;
4700  suballoc.size = size;
4701  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4702  suballoc.hAllocation = VK_NULL_HANDLE;
4703 
4704  m_Suballocations.push_back(suballoc);
4705  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4706  --suballocItem;
4707  m_FreeSuballocationsBySize.push_back(suballocItem);
4708 }
4709 
4710 bool VmaBlockMetadata::Validate() const
4711 {
4712  if(m_Suballocations.empty())
4713  {
4714  return false;
4715  }
4716 
4717  // Expected offset of new suballocation as calculates from previous ones.
4718  VkDeviceSize calculatedOffset = 0;
4719  // Expected number of free suballocations as calculated from traversing their list.
4720  uint32_t calculatedFreeCount = 0;
4721  // Expected sum size of free suballocations as calculated from traversing their list.
4722  VkDeviceSize calculatedSumFreeSize = 0;
4723  // Expected number of free suballocations that should be registered in
4724  // m_FreeSuballocationsBySize calculated from traversing their list.
4725  size_t freeSuballocationsToRegister = 0;
4726  // True if previous visisted suballocation was free.
4727  bool prevFree = false;
4728 
4729  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4730  suballocItem != m_Suballocations.cend();
4731  ++suballocItem)
4732  {
4733  const VmaSuballocation& subAlloc = *suballocItem;
4734 
4735  // Actual offset of this suballocation doesn't match expected one.
4736  if(subAlloc.offset != calculatedOffset)
4737  {
4738  return false;
4739  }
4740 
4741  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4742  // Two adjacent free suballocations are invalid. They should be merged.
4743  if(prevFree && currFree)
4744  {
4745  return false;
4746  }
4747  prevFree = currFree;
4748 
4749  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4750  {
4751  return false;
4752  }
4753 
4754  if(currFree)
4755  {
4756  calculatedSumFreeSize += subAlloc.size;
4757  ++calculatedFreeCount;
4758  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4759  {
4760  ++freeSuballocationsToRegister;
4761  }
4762  }
4763 
4764  calculatedOffset += subAlloc.size;
4765  }
4766 
4767  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4768  // match expected one.
4769  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4770  {
4771  return false;
4772  }
4773 
4774  VkDeviceSize lastSize = 0;
4775  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4776  {
4777  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4778 
4779  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4780  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4781  {
4782  return false;
4783  }
4784  // They must be sorted by size ascending.
4785  if(suballocItem->size < lastSize)
4786  {
4787  return false;
4788  }
4789 
4790  lastSize = suballocItem->size;
4791  }
4792 
4793  // Check if totals match calculacted values.
4794  return
4795  ValidateFreeSuballocationList() &&
4796  (calculatedOffset == m_Size) &&
4797  (calculatedSumFreeSize == m_SumFreeSize) &&
4798  (calculatedFreeCount == m_FreeCount);
4799 }
4800 
4801 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4802 {
4803  if(!m_FreeSuballocationsBySize.empty())
4804  {
4805  return m_FreeSuballocationsBySize.back()->size;
4806  }
4807  else
4808  {
4809  return 0;
4810  }
4811 }
4812 
4813 bool VmaBlockMetadata::IsEmpty() const
4814 {
4815  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4816 }
4817 
4818 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4819 {
4820  outInfo.blockCount = 1;
4821 
4822  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4823  outInfo.allocationCount = rangeCount - m_FreeCount;
4824  outInfo.unusedRangeCount = m_FreeCount;
4825 
4826  outInfo.unusedBytes = m_SumFreeSize;
4827  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4828 
4829  outInfo.allocationSizeMin = UINT64_MAX;
4830  outInfo.allocationSizeMax = 0;
4831  outInfo.unusedRangeSizeMin = UINT64_MAX;
4832  outInfo.unusedRangeSizeMax = 0;
4833 
4834  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4835  suballocItem != m_Suballocations.cend();
4836  ++suballocItem)
4837  {
4838  const VmaSuballocation& suballoc = *suballocItem;
4839  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4840  {
4841  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4842  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4843  }
4844  else
4845  {
4846  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4847  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4848  }
4849  }
4850 }
4851 
4852 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4853 {
4854  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4855 
4856  inoutStats.size += m_Size;
4857  inoutStats.unusedSize += m_SumFreeSize;
4858  inoutStats.allocationCount += rangeCount - m_FreeCount;
4859  inoutStats.unusedRangeCount += m_FreeCount;
4860  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4861 }
4862 
4863 #if VMA_STATS_STRING_ENABLED
4864 
4865 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4866 {
4867  json.BeginObject();
4868 
4869  json.WriteString("TotalBytes");
4870  json.WriteNumber(m_Size);
4871 
4872  json.WriteString("UnusedBytes");
4873  json.WriteNumber(m_SumFreeSize);
4874 
4875  json.WriteString("Allocations");
4876  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4877 
4878  json.WriteString("UnusedRanges");
4879  json.WriteNumber(m_FreeCount);
4880 
4881  json.WriteString("Suballocations");
4882  json.BeginArray();
4883  size_t i = 0;
4884  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4885  suballocItem != m_Suballocations.cend();
4886  ++suballocItem, ++i)
4887  {
4888  json.BeginObject(true);
4889 
4890  json.WriteString("Type");
4891  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4892 
4893  json.WriteString("Size");
4894  json.WriteNumber(suballocItem->size);
4895 
4896  json.WriteString("Offset");
4897  json.WriteNumber(suballocItem->offset);
4898 
4899  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4900  {
4901  const void* pUserData = suballocItem->hAllocation->GetUserData();
4902  if(pUserData != VMA_NULL)
4903  {
4904  json.WriteString("UserData");
4905  if(suballocItem->hAllocation->IsUserDataString())
4906  {
4907  json.WriteString((const char*)pUserData);
4908  }
4909  else
4910  {
4911  json.BeginString();
4912  json.ContinueString_Pointer(pUserData);
4913  json.EndString();
4914  }
4915  }
4916  }
4917 
4918  json.EndObject();
4919  }
4920  json.EndArray();
4921 
4922  json.EndObject();
4923 }
4924 
4925 #endif // #if VMA_STATS_STRING_ENABLED
4926 
4927 /*
4928 How many suitable free suballocations to analyze before choosing best one.
4929 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4930  be chosen.
4931 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4932  suballocations will be analized and best one will be chosen.
4933 - Any other value is also acceptable.
4934 */
4935 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4936 
4937 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4938 {
4939  VMA_ASSERT(IsEmpty());
4940  pAllocationRequest->offset = 0;
4941  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4942  pAllocationRequest->sumItemSize = 0;
4943  pAllocationRequest->item = m_Suballocations.begin();
4944  pAllocationRequest->itemsToMakeLostCount = 0;
4945 }
4946 
4947 bool VmaBlockMetadata::CreateAllocationRequest(
4948  uint32_t currentFrameIndex,
4949  uint32_t frameInUseCount,
4950  VkDeviceSize bufferImageGranularity,
4951  VkDeviceSize allocSize,
4952  VkDeviceSize allocAlignment,
4953  VmaSuballocationType allocType,
4954  bool canMakeOtherLost,
4955  VmaAllocationRequest* pAllocationRequest)
4956 {
4957  VMA_ASSERT(allocSize > 0);
4958  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4959  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4960  VMA_HEAVY_ASSERT(Validate());
4961 
4962  // There is not enough total free space in this block to fullfill the request: Early return.
4963  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4964  {
4965  return false;
4966  }
4967 
4968  // New algorithm, efficiently searching freeSuballocationsBySize.
4969  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4970  if(freeSuballocCount > 0)
4971  {
4972  if(VMA_BEST_FIT)
4973  {
4974  // Find first free suballocation with size not less than allocSize.
4975  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4976  m_FreeSuballocationsBySize.data(),
4977  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4978  allocSize,
4979  VmaSuballocationItemSizeLess());
4980  size_t index = it - m_FreeSuballocationsBySize.data();
4981  for(; index < freeSuballocCount; ++index)
4982  {
4983  if(CheckAllocation(
4984  currentFrameIndex,
4985  frameInUseCount,
4986  bufferImageGranularity,
4987  allocSize,
4988  allocAlignment,
4989  allocType,
4990  m_FreeSuballocationsBySize[index],
4991  false, // canMakeOtherLost
4992  &pAllocationRequest->offset,
4993  &pAllocationRequest->itemsToMakeLostCount,
4994  &pAllocationRequest->sumFreeSize,
4995  &pAllocationRequest->sumItemSize))
4996  {
4997  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4998  return true;
4999  }
5000  }
5001  }
5002  else
5003  {
5004  // Search staring from biggest suballocations.
5005  for(size_t index = freeSuballocCount; index--; )
5006  {
5007  if(CheckAllocation(
5008  currentFrameIndex,
5009  frameInUseCount,
5010  bufferImageGranularity,
5011  allocSize,
5012  allocAlignment,
5013  allocType,
5014  m_FreeSuballocationsBySize[index],
5015  false, // canMakeOtherLost
5016  &pAllocationRequest->offset,
5017  &pAllocationRequest->itemsToMakeLostCount,
5018  &pAllocationRequest->sumFreeSize,
5019  &pAllocationRequest->sumItemSize))
5020  {
5021  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5022  return true;
5023  }
5024  }
5025  }
5026  }
5027 
5028  if(canMakeOtherLost)
5029  {
5030  // Brute-force algorithm. TODO: Come up with something better.
5031 
5032  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5033  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5034 
5035  VmaAllocationRequest tmpAllocRequest = {};
5036  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5037  suballocIt != m_Suballocations.end();
5038  ++suballocIt)
5039  {
5040  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5041  suballocIt->hAllocation->CanBecomeLost())
5042  {
5043  if(CheckAllocation(
5044  currentFrameIndex,
5045  frameInUseCount,
5046  bufferImageGranularity,
5047  allocSize,
5048  allocAlignment,
5049  allocType,
5050  suballocIt,
5051  canMakeOtherLost,
5052  &tmpAllocRequest.offset,
5053  &tmpAllocRequest.itemsToMakeLostCount,
5054  &tmpAllocRequest.sumFreeSize,
5055  &tmpAllocRequest.sumItemSize))
5056  {
5057  tmpAllocRequest.item = suballocIt;
5058 
5059  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5060  {
5061  *pAllocationRequest = tmpAllocRequest;
5062  }
5063  }
5064  }
5065  }
5066 
5067  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5068  {
5069  return true;
5070  }
5071  }
5072 
5073  return false;
5074 }
5075 
5076 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5077  uint32_t currentFrameIndex,
5078  uint32_t frameInUseCount,
5079  VmaAllocationRequest* pAllocationRequest)
5080 {
5081  while(pAllocationRequest->itemsToMakeLostCount > 0)
5082  {
5083  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5084  {
5085  ++pAllocationRequest->item;
5086  }
5087  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5088  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5089  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5090  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5091  {
5092  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5093  --pAllocationRequest->itemsToMakeLostCount;
5094  }
5095  else
5096  {
5097  return false;
5098  }
5099  }
5100 
5101  VMA_HEAVY_ASSERT(Validate());
5102  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5103  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5104 
5105  return true;
5106 }
5107 
5108 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5109 {
5110  uint32_t lostAllocationCount = 0;
5111  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5112  it != m_Suballocations.end();
5113  ++it)
5114  {
5115  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5116  it->hAllocation->CanBecomeLost() &&
5117  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5118  {
5119  it = FreeSuballocation(it);
5120  ++lostAllocationCount;
5121  }
5122  }
5123  return lostAllocationCount;
5124 }
5125 
5126 void VmaBlockMetadata::Alloc(
5127  const VmaAllocationRequest& request,
5128  VmaSuballocationType type,
5129  VkDeviceSize allocSize,
5130  VmaAllocation hAllocation)
5131 {
5132  VMA_ASSERT(request.item != m_Suballocations.end());
5133  VmaSuballocation& suballoc = *request.item;
5134  // Given suballocation is a free block.
5135  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5136  // Given offset is inside this suballocation.
5137  VMA_ASSERT(request.offset >= suballoc.offset);
5138  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5139  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5140  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5141 
5142  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5143  // it to become used.
5144  UnregisterFreeSuballocation(request.item);
5145 
5146  suballoc.offset = request.offset;
5147  suballoc.size = allocSize;
5148  suballoc.type = type;
5149  suballoc.hAllocation = hAllocation;
5150 
5151  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5152  if(paddingEnd)
5153  {
5154  VmaSuballocation paddingSuballoc = {};
5155  paddingSuballoc.offset = request.offset + allocSize;
5156  paddingSuballoc.size = paddingEnd;
5157  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5158  VmaSuballocationList::iterator next = request.item;
5159  ++next;
5160  const VmaSuballocationList::iterator paddingEndItem =
5161  m_Suballocations.insert(next, paddingSuballoc);
5162  RegisterFreeSuballocation(paddingEndItem);
5163  }
5164 
5165  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5166  if(paddingBegin)
5167  {
5168  VmaSuballocation paddingSuballoc = {};
5169  paddingSuballoc.offset = request.offset - paddingBegin;
5170  paddingSuballoc.size = paddingBegin;
5171  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5172  const VmaSuballocationList::iterator paddingBeginItem =
5173  m_Suballocations.insert(request.item, paddingSuballoc);
5174  RegisterFreeSuballocation(paddingBeginItem);
5175  }
5176 
5177  // Update totals.
5178  m_FreeCount = m_FreeCount - 1;
5179  if(paddingBegin > 0)
5180  {
5181  ++m_FreeCount;
5182  }
5183  if(paddingEnd > 0)
5184  {
5185  ++m_FreeCount;
5186  }
5187  m_SumFreeSize -= allocSize;
5188 }
5189 
5190 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5191 {
5192  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5193  suballocItem != m_Suballocations.end();
5194  ++suballocItem)
5195  {
5196  VmaSuballocation& suballoc = *suballocItem;
5197  if(suballoc.hAllocation == allocation)
5198  {
5199  FreeSuballocation(suballocItem);
5200  VMA_HEAVY_ASSERT(Validate());
5201  return;
5202  }
5203  }
5204  VMA_ASSERT(0 && "Not found!");
5205 }
5206 
5207 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5208 {
5209  VkDeviceSize lastSize = 0;
5210  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5211  {
5212  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5213 
5214  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5215  {
5216  VMA_ASSERT(0);
5217  return false;
5218  }
5219  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5220  {
5221  VMA_ASSERT(0);
5222  return false;
5223  }
5224  if(it->size < lastSize)
5225  {
5226  VMA_ASSERT(0);
5227  return false;
5228  }
5229 
5230  lastSize = it->size;
5231  }
5232  return true;
5233 }
5234 
5235 bool VmaBlockMetadata::CheckAllocation(
5236  uint32_t currentFrameIndex,
5237  uint32_t frameInUseCount,
5238  VkDeviceSize bufferImageGranularity,
5239  VkDeviceSize allocSize,
5240  VkDeviceSize allocAlignment,
5241  VmaSuballocationType allocType,
5242  VmaSuballocationList::const_iterator suballocItem,
5243  bool canMakeOtherLost,
5244  VkDeviceSize* pOffset,
5245  size_t* itemsToMakeLostCount,
5246  VkDeviceSize* pSumFreeSize,
5247  VkDeviceSize* pSumItemSize) const
5248 {
5249  VMA_ASSERT(allocSize > 0);
5250  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5251  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5252  VMA_ASSERT(pOffset != VMA_NULL);
5253 
5254  *itemsToMakeLostCount = 0;
5255  *pSumFreeSize = 0;
5256  *pSumItemSize = 0;
5257 
5258  if(canMakeOtherLost)
5259  {
5260  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5261  {
5262  *pSumFreeSize = suballocItem->size;
5263  }
5264  else
5265  {
5266  if(suballocItem->hAllocation->CanBecomeLost() &&
5267  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5268  {
5269  ++*itemsToMakeLostCount;
5270  *pSumItemSize = suballocItem->size;
5271  }
5272  else
5273  {
5274  return false;
5275  }
5276  }
5277 
5278  // Remaining size is too small for this request: Early return.
5279  if(m_Size - suballocItem->offset < allocSize)
5280  {
5281  return false;
5282  }
5283 
5284  // Start from offset equal to beginning of this suballocation.
5285  *pOffset = suballocItem->offset;
5286 
5287  // Apply VMA_DEBUG_MARGIN at the beginning.
5288  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5289  {
5290  *pOffset += VMA_DEBUG_MARGIN;
5291  }
5292 
5293  // Apply alignment.
5294  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5295  *pOffset = VmaAlignUp(*pOffset, alignment);
5296 
5297  // Check previous suballocations for BufferImageGranularity conflicts.
5298  // Make bigger alignment if necessary.
5299  if(bufferImageGranularity > 1)
5300  {
5301  bool bufferImageGranularityConflict = false;
5302  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5303  while(prevSuballocItem != m_Suballocations.cbegin())
5304  {
5305  --prevSuballocItem;
5306  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5307  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5308  {
5309  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5310  {
5311  bufferImageGranularityConflict = true;
5312  break;
5313  }
5314  }
5315  else
5316  // Already on previous page.
5317  break;
5318  }
5319  if(bufferImageGranularityConflict)
5320  {
5321  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5322  }
5323  }
5324 
5325  // Now that we have final *pOffset, check if we are past suballocItem.
5326  // If yes, return false - this function should be called for another suballocItem as starting point.
5327  if(*pOffset >= suballocItem->offset + suballocItem->size)
5328  {
5329  return false;
5330  }
5331 
5332  // Calculate padding at the beginning based on current offset.
5333  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5334 
5335  // Calculate required margin at the end if this is not last suballocation.
5336  VmaSuballocationList::const_iterator next = suballocItem;
5337  ++next;
5338  const VkDeviceSize requiredEndMargin =
5339  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5340 
5341  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5342  // Another early return check.
5343  if(suballocItem->offset + totalSize > m_Size)
5344  {
5345  return false;
5346  }
5347 
5348  // Advance lastSuballocItem until desired size is reached.
5349  // Update itemsToMakeLostCount.
5350  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5351  if(totalSize > suballocItem->size)
5352  {
5353  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5354  while(remainingSize > 0)
5355  {
5356  ++lastSuballocItem;
5357  if(lastSuballocItem == m_Suballocations.cend())
5358  {
5359  return false;
5360  }
5361  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5362  {
5363  *pSumFreeSize += lastSuballocItem->size;
5364  }
5365  else
5366  {
5367  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5368  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5369  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5370  {
5371  ++*itemsToMakeLostCount;
5372  *pSumItemSize += lastSuballocItem->size;
5373  }
5374  else
5375  {
5376  return false;
5377  }
5378  }
5379  remainingSize = (lastSuballocItem->size < remainingSize) ?
5380  remainingSize - lastSuballocItem->size : 0;
5381  }
5382  }
5383 
5384  // Check next suballocations for BufferImageGranularity conflicts.
5385  // If conflict exists, we must mark more allocations lost or fail.
5386  if(bufferImageGranularity > 1)
5387  {
5388  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5389  ++nextSuballocItem;
5390  while(nextSuballocItem != m_Suballocations.cend())
5391  {
5392  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5393  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5394  {
5395  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5396  {
5397  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5398  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5399  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5400  {
5401  ++*itemsToMakeLostCount;
5402  }
5403  else
5404  {
5405  return false;
5406  }
5407  }
5408  }
5409  else
5410  {
5411  // Already on next page.
5412  break;
5413  }
5414  ++nextSuballocItem;
5415  }
5416  }
5417  }
5418  else
5419  {
5420  const VmaSuballocation& suballoc = *suballocItem;
5421  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5422 
5423  *pSumFreeSize = suballoc.size;
5424 
5425  // Size of this suballocation is too small for this request: Early return.
5426  if(suballoc.size < allocSize)
5427  {
5428  return false;
5429  }
5430 
5431  // Start from offset equal to beginning of this suballocation.
5432  *pOffset = suballoc.offset;
5433 
5434  // Apply VMA_DEBUG_MARGIN at the beginning.
5435  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5436  {
5437  *pOffset += VMA_DEBUG_MARGIN;
5438  }
5439 
5440  // Apply alignment.
5441  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5442  *pOffset = VmaAlignUp(*pOffset, alignment);
5443 
5444  // Check previous suballocations for BufferImageGranularity conflicts.
5445  // Make bigger alignment if necessary.
5446  if(bufferImageGranularity > 1)
5447  {
5448  bool bufferImageGranularityConflict = false;
5449  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5450  while(prevSuballocItem != m_Suballocations.cbegin())
5451  {
5452  --prevSuballocItem;
5453  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5454  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5455  {
5456  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5457  {
5458  bufferImageGranularityConflict = true;
5459  break;
5460  }
5461  }
5462  else
5463  // Already on previous page.
5464  break;
5465  }
5466  if(bufferImageGranularityConflict)
5467  {
5468  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5469  }
5470  }
5471 
5472  // Calculate padding at the beginning based on current offset.
5473  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5474 
5475  // Calculate required margin at the end if this is not last suballocation.
5476  VmaSuballocationList::const_iterator next = suballocItem;
5477  ++next;
5478  const VkDeviceSize requiredEndMargin =
5479  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5480 
5481  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5482  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5483  {
5484  return false;
5485  }
5486 
5487  // Check next suballocations for BufferImageGranularity conflicts.
5488  // If conflict exists, allocation cannot be made here.
5489  if(bufferImageGranularity > 1)
5490  {
5491  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5492  ++nextSuballocItem;
5493  while(nextSuballocItem != m_Suballocations.cend())
5494  {
5495  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5496  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5497  {
5498  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5499  {
5500  return false;
5501  }
5502  }
5503  else
5504  {
5505  // Already on next page.
5506  break;
5507  }
5508  ++nextSuballocItem;
5509  }
5510  }
5511  }
5512 
5513  // All tests passed: Success. pOffset is already filled.
5514  return true;
5515 }
5516 
5517 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5518 {
5519  VMA_ASSERT(item != m_Suballocations.end());
5520  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5521 
5522  VmaSuballocationList::iterator nextItem = item;
5523  ++nextItem;
5524  VMA_ASSERT(nextItem != m_Suballocations.end());
5525  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5526 
5527  item->size += nextItem->size;
5528  --m_FreeCount;
5529  m_Suballocations.erase(nextItem);
5530 }
5531 
5532 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5533 {
5534  // Change this suballocation to be marked as free.
5535  VmaSuballocation& suballoc = *suballocItem;
5536  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5537  suballoc.hAllocation = VK_NULL_HANDLE;
5538 
5539  // Update totals.
5540  ++m_FreeCount;
5541  m_SumFreeSize += suballoc.size;
5542 
5543  // Merge with previous and/or next suballocation if it's also free.
5544  bool mergeWithNext = false;
5545  bool mergeWithPrev = false;
5546 
5547  VmaSuballocationList::iterator nextItem = suballocItem;
5548  ++nextItem;
5549  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5550  {
5551  mergeWithNext = true;
5552  }
5553 
5554  VmaSuballocationList::iterator prevItem = suballocItem;
5555  if(suballocItem != m_Suballocations.begin())
5556  {
5557  --prevItem;
5558  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5559  {
5560  mergeWithPrev = true;
5561  }
5562  }
5563 
5564  if(mergeWithNext)
5565  {
5566  UnregisterFreeSuballocation(nextItem);
5567  MergeFreeWithNext(suballocItem);
5568  }
5569 
5570  if(mergeWithPrev)
5571  {
5572  UnregisterFreeSuballocation(prevItem);
5573  MergeFreeWithNext(prevItem);
5574  RegisterFreeSuballocation(prevItem);
5575  return prevItem;
5576  }
5577  else
5578  {
5579  RegisterFreeSuballocation(suballocItem);
5580  return suballocItem;
5581  }
5582 }
5583 
5584 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5585 {
5586  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5587  VMA_ASSERT(item->size > 0);
5588 
5589  // You may want to enable this validation at the beginning or at the end of
5590  // this function, depending on what do you want to check.
5591  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5592 
5593  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5594  {
5595  if(m_FreeSuballocationsBySize.empty())
5596  {
5597  m_FreeSuballocationsBySize.push_back(item);
5598  }
5599  else
5600  {
5601  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5602  }
5603  }
5604 
5605  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5606 }
5607 
5608 
5609 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5610 {
5611  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5612  VMA_ASSERT(item->size > 0);
5613 
5614  // You may want to enable this validation at the beginning or at the end of
5615  // this function, depending on what do you want to check.
5616  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5617 
5618  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5619  {
5620  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5621  m_FreeSuballocationsBySize.data(),
5622  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5623  item,
5624  VmaSuballocationItemSizeLess());
5625  for(size_t index = it - m_FreeSuballocationsBySize.data();
5626  index < m_FreeSuballocationsBySize.size();
5627  ++index)
5628  {
5629  if(m_FreeSuballocationsBySize[index] == item)
5630  {
5631  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5632  return;
5633  }
5634  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5635  }
5636  VMA_ASSERT(0 && "Not found.");
5637  }
5638 
5639  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5640 }
5641 
5643 // class VmaDeviceMemoryMapping
5644 
5645 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5646  m_MapCount(0),
5647  m_pMappedData(VMA_NULL)
5648 {
5649 }
5650 
5651 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5652 {
5653  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5654 }
5655 
5656 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData)
5657 {
5658  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5659  if(m_MapCount != 0)
5660  {
5661  ++m_MapCount;
5662  VMA_ASSERT(m_pMappedData != VMA_NULL);
5663  if(ppData != VMA_NULL)
5664  {
5665  *ppData = m_pMappedData;
5666  }
5667  return VK_SUCCESS;
5668  }
5669  else
5670  {
5671  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5672  hAllocator->m_hDevice,
5673  hMemory,
5674  0, // offset
5675  VK_WHOLE_SIZE,
5676  0, // flags
5677  &m_pMappedData);
5678  if(result == VK_SUCCESS)
5679  {
5680  if(ppData != VMA_NULL)
5681  {
5682  *ppData = m_pMappedData;
5683  }
5684  m_MapCount = 1;
5685  }
5686  return result;
5687  }
5688 }
5689 
5690 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory)
5691 {
5692  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5693  if(m_MapCount != 0)
5694  {
5695  if(--m_MapCount == 0)
5696  {
5697  m_pMappedData = VMA_NULL;
5698  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5699  }
5700  }
5701  else
5702  {
5703  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5704  }
5705 }
5706 
5708 // class VmaDeviceMemoryBlock
5709 
5710 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5711  m_MemoryTypeIndex(UINT32_MAX),
5712  m_hMemory(VK_NULL_HANDLE),
5713  m_Metadata(hAllocator)
5714 {
5715 }
5716 
5717 void VmaDeviceMemoryBlock::Init(
5718  uint32_t newMemoryTypeIndex,
5719  VkDeviceMemory newMemory,
5720  VkDeviceSize newSize)
5721 {
5722  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5723 
5724  m_MemoryTypeIndex = newMemoryTypeIndex;
5725  m_hMemory = newMemory;
5726 
5727  m_Metadata.Init(newSize);
5728 }
5729 
5730 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5731 {
5732  // This is the most important assert in the entire library.
5733  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5734  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5735 
5736  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5737  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5738  m_hMemory = VK_NULL_HANDLE;
5739 }
5740 
5741 bool VmaDeviceMemoryBlock::Validate() const
5742 {
5743  if((m_hMemory == VK_NULL_HANDLE) ||
5744  (m_Metadata.GetSize() == 0))
5745  {
5746  return false;
5747  }
5748 
5749  return m_Metadata.Validate();
5750 }
5751 
5752 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, void** ppData)
5753 {
5754  return m_Mapping.Map(hAllocator, m_hMemory, ppData);
5755 }
5756 
5757 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator)
5758 {
5759  m_Mapping.Unmap(hAllocator, m_hMemory);
5760 }
5761 
5762 static void InitStatInfo(VmaStatInfo& outInfo)
5763 {
5764  memset(&outInfo, 0, sizeof(outInfo));
5765  outInfo.allocationSizeMin = UINT64_MAX;
5766  outInfo.unusedRangeSizeMin = UINT64_MAX;
5767 }
5768 
5769 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5770 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5771 {
5772  inoutInfo.blockCount += srcInfo.blockCount;
5773  inoutInfo.allocationCount += srcInfo.allocationCount;
5774  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5775  inoutInfo.usedBytes += srcInfo.usedBytes;
5776  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5777  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5778  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5779  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5780  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5781 }
5782 
5783 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5784 {
5785  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5786  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5787  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5788  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5789 }
5790 
5791 VmaPool_T::VmaPool_T(
5792  VmaAllocator hAllocator,
5793  const VmaPoolCreateInfo& createInfo) :
5794  m_BlockVector(
5795  hAllocator,
5796  createInfo.memoryTypeIndex,
5797  createInfo.blockSize,
5798  createInfo.minBlockCount,
5799  createInfo.maxBlockCount,
5800  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5801  createInfo.frameInUseCount,
5802  true) // isCustomPool
5803 {
5804 }
5805 
5806 VmaPool_T::~VmaPool_T()
5807 {
5808 }
5809 
5810 #if VMA_STATS_STRING_ENABLED
5811 
5812 #endif // #if VMA_STATS_STRING_ENABLED
5813 
5814 VmaBlockVector::VmaBlockVector(
5815  VmaAllocator hAllocator,
5816  uint32_t memoryTypeIndex,
5817  VkDeviceSize preferredBlockSize,
5818  size_t minBlockCount,
5819  size_t maxBlockCount,
5820  VkDeviceSize bufferImageGranularity,
5821  uint32_t frameInUseCount,
5822  bool isCustomPool) :
5823  m_hAllocator(hAllocator),
5824  m_MemoryTypeIndex(memoryTypeIndex),
5825  m_PreferredBlockSize(preferredBlockSize),
5826  m_MinBlockCount(minBlockCount),
5827  m_MaxBlockCount(maxBlockCount),
5828  m_BufferImageGranularity(bufferImageGranularity),
5829  m_FrameInUseCount(frameInUseCount),
5830  m_IsCustomPool(isCustomPool),
5831  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5832  m_HasEmptyBlock(false),
5833  m_pDefragmentator(VMA_NULL)
5834 {
5835 }
5836 
5837 VmaBlockVector::~VmaBlockVector()
5838 {
5839  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5840 
5841  for(size_t i = m_Blocks.size(); i--; )
5842  {
5843  m_Blocks[i]->Destroy(m_hAllocator);
5844  vma_delete(m_hAllocator, m_Blocks[i]);
5845  }
5846 }
5847 
5848 VkResult VmaBlockVector::CreateMinBlocks()
5849 {
5850  for(size_t i = 0; i < m_MinBlockCount; ++i)
5851  {
5852  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5853  if(res != VK_SUCCESS)
5854  {
5855  return res;
5856  }
5857  }
5858  return VK_SUCCESS;
5859 }
5860 
5861 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5862 {
5863  pStats->size = 0;
5864  pStats->unusedSize = 0;
5865  pStats->allocationCount = 0;
5866  pStats->unusedRangeCount = 0;
5867  pStats->unusedRangeSizeMax = 0;
5868 
5869  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5870 
5871  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5872  {
5873  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5874  VMA_ASSERT(pBlock);
5875  VMA_HEAVY_ASSERT(pBlock->Validate());
5876  pBlock->m_Metadata.AddPoolStats(*pStats);
5877  }
5878 }
5879 
5880 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5881 
5882 VkResult VmaBlockVector::Allocate(
5883  VmaPool hCurrentPool,
5884  uint32_t currentFrameIndex,
5885  const VkMemoryRequirements& vkMemReq,
5886  const VmaAllocationCreateInfo& createInfo,
5887  VmaSuballocationType suballocType,
5888  VmaAllocation* pAllocation)
5889 {
5890  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
5891  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
5892 
5893  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5894 
5895  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5896  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5897  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5898  {
5899  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5900  VMA_ASSERT(pCurrBlock);
5901  VmaAllocationRequest currRequest = {};
5902  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5903  currentFrameIndex,
5904  m_FrameInUseCount,
5905  m_BufferImageGranularity,
5906  vkMemReq.size,
5907  vkMemReq.alignment,
5908  suballocType,
5909  false, // canMakeOtherLost
5910  &currRequest))
5911  {
5912  // Allocate from pCurrBlock.
5913  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5914 
5915  if(mapped)
5916  {
5917  VkResult res = pCurrBlock->Map(m_hAllocator, nullptr);
5918  if(res != VK_SUCCESS)
5919  {
5920  return res;
5921  }
5922  }
5923 
5924  // We no longer have an empty Allocation.
5925  if(pCurrBlock->m_Metadata.IsEmpty())
5926  {
5927  m_HasEmptyBlock = false;
5928  }
5929 
5930  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5931  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5932  (*pAllocation)->InitBlockAllocation(
5933  hCurrentPool,
5934  pCurrBlock,
5935  currRequest.offset,
5936  vkMemReq.alignment,
5937  vkMemReq.size,
5938  suballocType,
5939  mapped,
5940  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5941  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5942  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5943  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5944  return VK_SUCCESS;
5945  }
5946  }
5947 
5948  const bool canCreateNewBlock =
5949  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5950  (m_Blocks.size() < m_MaxBlockCount);
5951 
5952  // 2. Try to create new block.
5953  if(canCreateNewBlock)
5954  {
5955  // Calculate optimal size for new block.
5956  VkDeviceSize newBlockSize = m_PreferredBlockSize;
5957  uint32_t newBlockSizeShift = 0;
5958  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
5959 
5960  // Allocating blocks of other sizes is allowed only in default pools.
5961  // In custom pools block size is fixed.
5962  if(m_IsCustomPool == false)
5963  {
5964  // Allocate 1/8, 1/4, 1/2 as first blocks.
5965  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
5966  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
5967  {
5968  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
5969  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
5970  {
5971  newBlockSize = smallerNewBlockSize;
5972  ++newBlockSizeShift;
5973  }
5974  }
5975  }
5976 
5977  size_t newBlockIndex = 0;
5978  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
5979  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
5980  if(m_IsCustomPool == false)
5981  {
5982  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
5983  {
5984  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
5985  if(smallerNewBlockSize >= vkMemReq.size)
5986  {
5987  newBlockSize = smallerNewBlockSize;
5988  ++newBlockSizeShift;
5989  res = CreateBlock(newBlockSize, &newBlockIndex);
5990  }
5991  else
5992  {
5993  break;
5994  }
5995  }
5996  }
5997 
5998  if(res == VK_SUCCESS)
5999  {
6000  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6001  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6002 
6003  if(mapped)
6004  {
6005  res = pBlock->Map(m_hAllocator, nullptr);
6006  if(res != VK_SUCCESS)
6007  {
6008  return res;
6009  }
6010  }
6011 
6012  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6013  VmaAllocationRequest allocRequest;
6014  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6015  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6016  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6017  (*pAllocation)->InitBlockAllocation(
6018  hCurrentPool,
6019  pBlock,
6020  allocRequest.offset,
6021  vkMemReq.alignment,
6022  vkMemReq.size,
6023  suballocType,
6024  mapped,
6025  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6026  VMA_HEAVY_ASSERT(pBlock->Validate());
6027  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6028  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6029  return VK_SUCCESS;
6030  }
6031  }
6032 
6033  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6034 
6035  // 3. Try to allocate from existing blocks with making other allocations lost.
6036  if(canMakeOtherLost)
6037  {
6038  uint32_t tryIndex = 0;
6039  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6040  {
6041  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6042  VmaAllocationRequest bestRequest = {};
6043  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6044 
6045  // 1. Search existing allocations.
6046  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6047  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6048  {
6049  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6050  VMA_ASSERT(pCurrBlock);
6051  VmaAllocationRequest currRequest = {};
6052  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6053  currentFrameIndex,
6054  m_FrameInUseCount,
6055  m_BufferImageGranularity,
6056  vkMemReq.size,
6057  vkMemReq.alignment,
6058  suballocType,
6059  canMakeOtherLost,
6060  &currRequest))
6061  {
6062  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6063  if(pBestRequestBlock == VMA_NULL ||
6064  currRequestCost < bestRequestCost)
6065  {
6066  pBestRequestBlock = pCurrBlock;
6067  bestRequest = currRequest;
6068  bestRequestCost = currRequestCost;
6069 
6070  if(bestRequestCost == 0)
6071  {
6072  break;
6073  }
6074  }
6075  }
6076  }
6077 
6078  if(pBestRequestBlock != VMA_NULL)
6079  {
6080  if(mapped)
6081  {
6082  VkResult res = pBestRequestBlock->Map(m_hAllocator, nullptr);
6083  if(res != VK_SUCCESS)
6084  {
6085  return res;
6086  }
6087  }
6088 
6089  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6090  currentFrameIndex,
6091  m_FrameInUseCount,
6092  &bestRequest))
6093  {
6094  // We no longer have an empty Allocation.
6095  if(pBestRequestBlock->m_Metadata.IsEmpty())
6096  {
6097  m_HasEmptyBlock = false;
6098  }
6099  // Allocate from this pBlock.
6100  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6101  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6102  (*pAllocation)->InitBlockAllocation(
6103  hCurrentPool,
6104  pBestRequestBlock,
6105  bestRequest.offset,
6106  vkMemReq.alignment,
6107  vkMemReq.size,
6108  suballocType,
6109  mapped,
6110  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6111  VMA_HEAVY_ASSERT(pBlock->Validate());
6112  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6113  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6114  return VK_SUCCESS;
6115  }
6116  // else: Some allocations must have been touched while we are here. Next try.
6117  }
6118  else
6119  {
6120  // Could not find place in any of the blocks - break outer loop.
6121  break;
6122  }
6123  }
6124  /* Maximum number of tries exceeded - a very unlike event when many other
6125  threads are simultaneously touching allocations making it impossible to make
6126  lost at the same time as we try to allocate. */
6127  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6128  {
6129  return VK_ERROR_TOO_MANY_OBJECTS;
6130  }
6131  }
6132 
6133  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6134 }
6135 
6136 void VmaBlockVector::Free(
6137  VmaAllocation hAllocation)
6138 {
6139  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6140 
6141  // Scope for lock.
6142  {
6143  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6144 
6145  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6146 
6147  if(hAllocation->IsPersistentMap())
6148  {
6149  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory);
6150  }
6151 
6152  pBlock->m_Metadata.Free(hAllocation);
6153  VMA_HEAVY_ASSERT(pBlock->Validate());
6154 
6155  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6156 
6157  // pBlock became empty after this deallocation.
6158  if(pBlock->m_Metadata.IsEmpty())
6159  {
6160  // Already has empty Allocation. We don't want to have two, so delete this one.
6161  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6162  {
6163  pBlockToDelete = pBlock;
6164  Remove(pBlock);
6165  }
6166  // We now have first empty Allocation.
6167  else
6168  {
6169  m_HasEmptyBlock = true;
6170  }
6171  }
6172  // pBlock didn't become empty, but we have another empty block - find and free that one.
6173  // (This is optional, heuristics.)
6174  else if(m_HasEmptyBlock)
6175  {
6176  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6177  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6178  {
6179  pBlockToDelete = pLastBlock;
6180  m_Blocks.pop_back();
6181  m_HasEmptyBlock = false;
6182  }
6183  }
6184 
6185  IncrementallySortBlocks();
6186  }
6187 
6188  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6189  // lock, for performance reason.
6190  if(pBlockToDelete != VMA_NULL)
6191  {
6192  VMA_DEBUG_LOG(" Deleted empty allocation");
6193  pBlockToDelete->Destroy(m_hAllocator);
6194  vma_delete(m_hAllocator, pBlockToDelete);
6195  }
6196 }
6197 
6198 size_t VmaBlockVector::CalcMaxBlockSize() const
6199 {
6200  size_t result = 0;
6201  for(size_t i = m_Blocks.size(); i--; )
6202  {
6203  result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());
6204  if(result >= m_PreferredBlockSize)
6205  {
6206  break;
6207  }
6208  }
6209  return result;
6210 }
6211 
6212 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6213 {
6214  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6215  {
6216  if(m_Blocks[blockIndex] == pBlock)
6217  {
6218  VmaVectorRemove(m_Blocks, blockIndex);
6219  return;
6220  }
6221  }
6222  VMA_ASSERT(0);
6223 }
6224 
6225 void VmaBlockVector::IncrementallySortBlocks()
6226 {
6227  // Bubble sort only until first swap.
6228  for(size_t i = 1; i < m_Blocks.size(); ++i)
6229  {
6230  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6231  {
6232  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6233  return;
6234  }
6235  }
6236 }
6237 
6238 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6239 {
6240  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6241  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6242  allocInfo.allocationSize = blockSize;
6243  VkDeviceMemory mem = VK_NULL_HANDLE;
6244  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6245  if(res < 0)
6246  {
6247  return res;
6248  }
6249 
6250  // New VkDeviceMemory successfully created.
6251 
6252  // Create new Allocation for it.
6253  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6254  pBlock->Init(
6255  m_MemoryTypeIndex,
6256  mem,
6257  allocInfo.allocationSize);
6258 
6259  m_Blocks.push_back(pBlock);
6260  if(pNewBlockIndex != VMA_NULL)
6261  {
6262  *pNewBlockIndex = m_Blocks.size() - 1;
6263  }
6264 
6265  return VK_SUCCESS;
6266 }
6267 
6268 #if VMA_STATS_STRING_ENABLED
6269 
6270 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6271 {
6272  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6273 
6274  json.BeginObject();
6275 
6276  if(m_IsCustomPool)
6277  {
6278  json.WriteString("MemoryTypeIndex");
6279  json.WriteNumber(m_MemoryTypeIndex);
6280 
6281  json.WriteString("BlockSize");
6282  json.WriteNumber(m_PreferredBlockSize);
6283 
6284  json.WriteString("BlockCount");
6285  json.BeginObject(true);
6286  if(m_MinBlockCount > 0)
6287  {
6288  json.WriteString("Min");
6289  json.WriteNumber(m_MinBlockCount);
6290  }
6291  if(m_MaxBlockCount < SIZE_MAX)
6292  {
6293  json.WriteString("Max");
6294  json.WriteNumber(m_MaxBlockCount);
6295  }
6296  json.WriteString("Cur");
6297  json.WriteNumber(m_Blocks.size());
6298  json.EndObject();
6299 
6300  if(m_FrameInUseCount > 0)
6301  {
6302  json.WriteString("FrameInUseCount");
6303  json.WriteNumber(m_FrameInUseCount);
6304  }
6305  }
6306  else
6307  {
6308  json.WriteString("PreferredBlockSize");
6309  json.WriteNumber(m_PreferredBlockSize);
6310  }
6311 
6312  json.WriteString("Blocks");
6313  json.BeginArray();
6314  for(size_t i = 0; i < m_Blocks.size(); ++i)
6315  {
6316  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6317  }
6318  json.EndArray();
6319 
6320  json.EndObject();
6321 }
6322 
6323 #endif // #if VMA_STATS_STRING_ENABLED
6324 
6325 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6326  VmaAllocator hAllocator,
6327  uint32_t currentFrameIndex)
6328 {
6329  if(m_pDefragmentator == VMA_NULL)
6330  {
6331  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6332  hAllocator,
6333  this,
6334  currentFrameIndex);
6335  }
6336 
6337  return m_pDefragmentator;
6338 }
6339 
6340 VkResult VmaBlockVector::Defragment(
6341  VmaDefragmentationStats* pDefragmentationStats,
6342  VkDeviceSize& maxBytesToMove,
6343  uint32_t& maxAllocationsToMove)
6344 {
6345  if(m_pDefragmentator == VMA_NULL)
6346  {
6347  return VK_SUCCESS;
6348  }
6349 
6350  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6351 
6352  // Defragment.
6353  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6354 
6355  // Accumulate statistics.
6356  if(pDefragmentationStats != VMA_NULL)
6357  {
6358  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6359  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6360  pDefragmentationStats->bytesMoved += bytesMoved;
6361  pDefragmentationStats->allocationsMoved += allocationsMoved;
6362  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6363  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6364  maxBytesToMove -= bytesMoved;
6365  maxAllocationsToMove -= allocationsMoved;
6366  }
6367 
6368  // Free empty blocks.
6369  m_HasEmptyBlock = false;
6370  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6371  {
6372  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6373  if(pBlock->m_Metadata.IsEmpty())
6374  {
6375  if(m_Blocks.size() > m_MinBlockCount)
6376  {
6377  if(pDefragmentationStats != VMA_NULL)
6378  {
6379  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6380  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6381  }
6382 
6383  VmaVectorRemove(m_Blocks, blockIndex);
6384  pBlock->Destroy(m_hAllocator);
6385  vma_delete(m_hAllocator, pBlock);
6386  }
6387  else
6388  {
6389  m_HasEmptyBlock = true;
6390  }
6391  }
6392  }
6393 
6394  return result;
6395 }
6396 
6397 void VmaBlockVector::DestroyDefragmentator()
6398 {
6399  if(m_pDefragmentator != VMA_NULL)
6400  {
6401  vma_delete(m_hAllocator, m_pDefragmentator);
6402  m_pDefragmentator = VMA_NULL;
6403  }
6404 }
6405 
6406 void VmaBlockVector::MakePoolAllocationsLost(
6407  uint32_t currentFrameIndex,
6408  size_t* pLostAllocationCount)
6409 {
6410  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6411  size_t lostAllocationCount = 0;
6412  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6413  {
6414  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6415  VMA_ASSERT(pBlock);
6416  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6417  }
6418  if(pLostAllocationCount != VMA_NULL)
6419  {
6420  *pLostAllocationCount = lostAllocationCount;
6421  }
6422 }
6423 
6424 void VmaBlockVector::AddStats(VmaStats* pStats)
6425 {
6426  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6427  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6428 
6429  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6430 
6431  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6432  {
6433  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6434  VMA_ASSERT(pBlock);
6435  VMA_HEAVY_ASSERT(pBlock->Validate());
6436  VmaStatInfo allocationStatInfo;
6437  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6438  VmaAddStatInfo(pStats->total, allocationStatInfo);
6439  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6440  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6441  }
6442 }
6443 
6445 // VmaDefragmentator members definition
6446 
6447 VmaDefragmentator::VmaDefragmentator(
6448  VmaAllocator hAllocator,
6449  VmaBlockVector* pBlockVector,
6450  uint32_t currentFrameIndex) :
6451  m_hAllocator(hAllocator),
6452  m_pBlockVector(pBlockVector),
6453  m_CurrentFrameIndex(currentFrameIndex),
6454  m_BytesMoved(0),
6455  m_AllocationsMoved(0),
6456  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6457  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6458 {
6459 }
6460 
6461 VmaDefragmentator::~VmaDefragmentator()
6462 {
6463  for(size_t i = m_Blocks.size(); i--; )
6464  {
6465  vma_delete(m_hAllocator, m_Blocks[i]);
6466  }
6467 }
6468 
6469 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6470 {
6471  AllocationInfo allocInfo;
6472  allocInfo.m_hAllocation = hAlloc;
6473  allocInfo.m_pChanged = pChanged;
6474  m_Allocations.push_back(allocInfo);
6475 }
6476 
6477 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6478 {
6479  // It has already been mapped for defragmentation.
6480  if(m_pMappedDataForDefragmentation)
6481  {
6482  *ppMappedData = m_pMappedDataForDefragmentation;
6483  return VK_SUCCESS;
6484  }
6485 
6486  // It is originally mapped.
6487  if(m_pBlock->m_Mapping.GetMappedData())
6488  {
6489  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6490  return VK_SUCCESS;
6491  }
6492 
6493  // Map on first usage.
6494  VkResult res = m_pBlock->Map(hAllocator, &m_pMappedDataForDefragmentation);
6495  *ppMappedData = m_pMappedDataForDefragmentation;
6496  return res;
6497 }
6498 
6499 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6500 {
6501  if(m_pMappedDataForDefragmentation != VMA_NULL)
6502  {
6503  m_pBlock->Unmap(hAllocator);
6504  }
6505 }
6506 
6507 VkResult VmaDefragmentator::DefragmentRound(
6508  VkDeviceSize maxBytesToMove,
6509  uint32_t maxAllocationsToMove)
6510 {
6511  if(m_Blocks.empty())
6512  {
6513  return VK_SUCCESS;
6514  }
6515 
6516  size_t srcBlockIndex = m_Blocks.size() - 1;
6517  size_t srcAllocIndex = SIZE_MAX;
6518  for(;;)
6519  {
6520  // 1. Find next allocation to move.
6521  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6522  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6523  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6524  {
6525  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6526  {
6527  // Finished: no more allocations to process.
6528  if(srcBlockIndex == 0)
6529  {
6530  return VK_SUCCESS;
6531  }
6532  else
6533  {
6534  --srcBlockIndex;
6535  srcAllocIndex = SIZE_MAX;
6536  }
6537  }
6538  else
6539  {
6540  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6541  }
6542  }
6543 
6544  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6545  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6546 
6547  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6548  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6549  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6550  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6551 
6552  // 2. Try to find new place for this allocation in preceding or current block.
6553  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6554  {
6555  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6556  VmaAllocationRequest dstAllocRequest;
6557  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6558  m_CurrentFrameIndex,
6559  m_pBlockVector->GetFrameInUseCount(),
6560  m_pBlockVector->GetBufferImageGranularity(),
6561  size,
6562  alignment,
6563  suballocType,
6564  false, // canMakeOtherLost
6565  &dstAllocRequest) &&
6566  MoveMakesSense(
6567  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6568  {
6569  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6570 
6571  // Reached limit on number of allocations or bytes to move.
6572  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6573  (m_BytesMoved + size > maxBytesToMove))
6574  {
6575  return VK_INCOMPLETE;
6576  }
6577 
6578  void* pDstMappedData = VMA_NULL;
6579  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6580  if(res != VK_SUCCESS)
6581  {
6582  return res;
6583  }
6584 
6585  void* pSrcMappedData = VMA_NULL;
6586  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6587  if(res != VK_SUCCESS)
6588  {
6589  return res;
6590  }
6591 
6592  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6593  memcpy(
6594  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6595  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6596  static_cast<size_t>(size));
6597 
6598  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6599  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6600 
6601  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6602 
6603  if(allocInfo.m_pChanged != VMA_NULL)
6604  {
6605  *allocInfo.m_pChanged = VK_TRUE;
6606  }
6607 
6608  ++m_AllocationsMoved;
6609  m_BytesMoved += size;
6610 
6611  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6612 
6613  break;
6614  }
6615  }
6616 
6617  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6618 
6619  if(srcAllocIndex > 0)
6620  {
6621  --srcAllocIndex;
6622  }
6623  else
6624  {
6625  if(srcBlockIndex > 0)
6626  {
6627  --srcBlockIndex;
6628  srcAllocIndex = SIZE_MAX;
6629  }
6630  else
6631  {
6632  return VK_SUCCESS;
6633  }
6634  }
6635  }
6636 }
6637 
6638 VkResult VmaDefragmentator::Defragment(
6639  VkDeviceSize maxBytesToMove,
6640  uint32_t maxAllocationsToMove)
6641 {
6642  if(m_Allocations.empty())
6643  {
6644  return VK_SUCCESS;
6645  }
6646 
6647  // Create block info for each block.
6648  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6649  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6650  {
6651  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6652  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6653  m_Blocks.push_back(pBlockInfo);
6654  }
6655 
6656  // Sort them by m_pBlock pointer value.
6657  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6658 
6659  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6660  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6661  {
6662  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6663  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6664  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6665  {
6666  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6667  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6668  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6669  {
6670  (*it)->m_Allocations.push_back(allocInfo);
6671  }
6672  else
6673  {
6674  VMA_ASSERT(0);
6675  }
6676  }
6677  }
6678  m_Allocations.clear();
6679 
6680  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6681  {
6682  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6683  pBlockInfo->CalcHasNonMovableAllocations();
6684  pBlockInfo->SortAllocationsBySizeDescecnding();
6685  }
6686 
6687  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6688  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6689 
6690  // Execute defragmentation rounds (the main part).
6691  VkResult result = VK_SUCCESS;
6692  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6693  {
6694  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6695  }
6696 
6697  // Unmap blocks that were mapped for defragmentation.
6698  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6699  {
6700  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6701  }
6702 
6703  return result;
6704 }
6705 
6706 bool VmaDefragmentator::MoveMakesSense(
6707  size_t dstBlockIndex, VkDeviceSize dstOffset,
6708  size_t srcBlockIndex, VkDeviceSize srcOffset)
6709 {
6710  if(dstBlockIndex < srcBlockIndex)
6711  {
6712  return true;
6713  }
6714  if(dstBlockIndex > srcBlockIndex)
6715  {
6716  return false;
6717  }
6718  if(dstOffset < srcOffset)
6719  {
6720  return true;
6721  }
6722  return false;
6723 }
6724 
6726 // VmaAllocator_T
6727 
6728 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6729  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6730  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6731  m_hDevice(pCreateInfo->device),
6732  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6733  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6734  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6735  m_PreferredLargeHeapBlockSize(0),
6736  m_PhysicalDevice(pCreateInfo->physicalDevice),
6737  m_CurrentFrameIndex(0),
6738  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6739 {
6740  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6741 
6742  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6743  memset(&m_MemProps, 0, sizeof(m_MemProps));
6744  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6745 
6746  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6747  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6748 
6749  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6750  {
6751  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6752  }
6753 
6754  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6755  {
6756  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6757  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6758  }
6759 
6760  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6761 
6762  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6763  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6764 
6765  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6766  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6767 
6768  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6769  {
6770  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6771  {
6772  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6773  if(limit != VK_WHOLE_SIZE)
6774  {
6775  m_HeapSizeLimit[heapIndex] = limit;
6776  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6777  {
6778  m_MemProps.memoryHeaps[heapIndex].size = limit;
6779  }
6780  }
6781  }
6782  }
6783 
6784  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6785  {
6786  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6787 
6788  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
6789  this,
6790  memTypeIndex,
6791  preferredBlockSize,
6792  0,
6793  SIZE_MAX,
6794  GetBufferImageGranularity(),
6795  pCreateInfo->frameInUseCount,
6796  false); // isCustomPool
6797  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6798  // becase minBlockCount is 0.
6799  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6800  }
6801 }
6802 
6803 VmaAllocator_T::~VmaAllocator_T()
6804 {
6805  VMA_ASSERT(m_Pools.empty());
6806 
6807  for(size_t i = GetMemoryTypeCount(); i--; )
6808  {
6809  vma_delete(this, m_pDedicatedAllocations[i]);
6810  vma_delete(this, m_pBlockVectors[i]);
6811  }
6812 }
6813 
6814 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6815 {
6816 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6817  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6818  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6819  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6820  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6821  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6822  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6823  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6824  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6825  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6826  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6827  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6828  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6829  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6830  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6831  if(m_UseKhrDedicatedAllocation)
6832  {
6833  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
6834  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
6835  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
6836  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
6837  }
6838 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6839 
6840 #define VMA_COPY_IF_NOT_NULL(funcName) \
6841  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6842 
6843  if(pVulkanFunctions != VMA_NULL)
6844  {
6845  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6846  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6847  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6848  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6849  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6850  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6851  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6852  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6853  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6854  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6855  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6856  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6857  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6858  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6859  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6860  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6861  }
6862 
6863 #undef VMA_COPY_IF_NOT_NULL
6864 
6865  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6866  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6867  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6868  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6869  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6870  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6871  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6872  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6873  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6874  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6875  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6876  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6877  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6878  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6879  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6880  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6881  if(m_UseKhrDedicatedAllocation)
6882  {
6883  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6884  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6885  }
6886 }
6887 
6888 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6889 {
6890  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6891  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6892  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
6893  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
6894 }
6895 
6896 VkResult VmaAllocator_T::AllocateMemoryOfType(
6897  const VkMemoryRequirements& vkMemReq,
6898  bool dedicatedAllocation,
6899  VkBuffer dedicatedBuffer,
6900  VkImage dedicatedImage,
6901  const VmaAllocationCreateInfo& createInfo,
6902  uint32_t memTypeIndex,
6903  VmaSuballocationType suballocType,
6904  VmaAllocation* pAllocation)
6905 {
6906  VMA_ASSERT(pAllocation != VMA_NULL);
6907  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6908 
6909  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6910 
6911  // If memory type is not HOST_VISIBLE, disable MAPPED.
6912  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6913  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6914  {
6915  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
6916  }
6917 
6918  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
6919  VMA_ASSERT(blockVector);
6920 
6921  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6922  bool preferDedicatedMemory =
6923  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6924  dedicatedAllocation ||
6925  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6926  vkMemReq.size > preferredBlockSize / 2;
6927 
6928  if(preferDedicatedMemory &&
6929  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6930  finalCreateInfo.pool == VK_NULL_HANDLE)
6931  {
6933  }
6934 
6935  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6936  {
6937  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6938  {
6939  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6940  }
6941  else
6942  {
6943  return AllocateDedicatedMemory(
6944  vkMemReq.size,
6945  suballocType,
6946  memTypeIndex,
6947  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6948  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6949  finalCreateInfo.pUserData,
6950  dedicatedBuffer,
6951  dedicatedImage,
6952  pAllocation);
6953  }
6954  }
6955  else
6956  {
6957  VkResult res = blockVector->Allocate(
6958  VK_NULL_HANDLE, // hCurrentPool
6959  m_CurrentFrameIndex.load(),
6960  vkMemReq,
6961  finalCreateInfo,
6962  suballocType,
6963  pAllocation);
6964  if(res == VK_SUCCESS)
6965  {
6966  return res;
6967  }
6968 
6969  // 5. Try dedicated memory.
6970  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6971  {
6972  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6973  }
6974  else
6975  {
6976  res = AllocateDedicatedMemory(
6977  vkMemReq.size,
6978  suballocType,
6979  memTypeIndex,
6980  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6981  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6982  finalCreateInfo.pUserData,
6983  dedicatedBuffer,
6984  dedicatedImage,
6985  pAllocation);
6986  if(res == VK_SUCCESS)
6987  {
6988  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6989  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6990  return VK_SUCCESS;
6991  }
6992  else
6993  {
6994  // Everything failed: Return error code.
6995  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6996  return res;
6997  }
6998  }
6999  }
7000 }
7001 
7002 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7003  VkDeviceSize size,
7004  VmaSuballocationType suballocType,
7005  uint32_t memTypeIndex,
7006  bool map,
7007  bool isUserDataString,
7008  void* pUserData,
7009  VkBuffer dedicatedBuffer,
7010  VkImage dedicatedImage,
7011  VmaAllocation* pAllocation)
7012 {
7013  VMA_ASSERT(pAllocation);
7014 
7015  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7016  allocInfo.memoryTypeIndex = memTypeIndex;
7017  allocInfo.allocationSize = size;
7018 
7019  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7020  if(m_UseKhrDedicatedAllocation)
7021  {
7022  if(dedicatedBuffer != VK_NULL_HANDLE)
7023  {
7024  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7025  dedicatedAllocInfo.buffer = dedicatedBuffer;
7026  allocInfo.pNext = &dedicatedAllocInfo;
7027  }
7028  else if(dedicatedImage != VK_NULL_HANDLE)
7029  {
7030  dedicatedAllocInfo.image = dedicatedImage;
7031  allocInfo.pNext = &dedicatedAllocInfo;
7032  }
7033  }
7034 
7035  // Allocate VkDeviceMemory.
7036  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7037  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7038  if(res < 0)
7039  {
7040  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7041  return res;
7042  }
7043 
7044  void* pMappedData = nullptr;
7045  if(map)
7046  {
7047  res = (*m_VulkanFunctions.vkMapMemory)(
7048  m_hDevice,
7049  hMemory,
7050  0,
7051  VK_WHOLE_SIZE,
7052  0,
7053  &pMappedData);
7054  if(res < 0)
7055  {
7056  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7057  FreeVulkanMemory(memTypeIndex, size, hMemory);
7058  return res;
7059  }
7060  }
7061 
7062  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7063  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7064  (*pAllocation)->SetUserData(this, pUserData);
7065 
7066  // Register it in m_pDedicatedAllocations.
7067  {
7068  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7069  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7070  VMA_ASSERT(pDedicatedAllocations);
7071  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7072  }
7073 
7074  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7075 
7076  return VK_SUCCESS;
7077 }
7078 
7079 void VmaAllocator_T::GetBufferMemoryRequirements(
7080  VkBuffer hBuffer,
7081  VkMemoryRequirements& memReq,
7082  bool& requiresDedicatedAllocation,
7083  bool& prefersDedicatedAllocation) const
7084 {
7085  if(m_UseKhrDedicatedAllocation)
7086  {
7087  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7088  memReqInfo.buffer = hBuffer;
7089 
7090  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7091 
7092  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7093  memReq2.pNext = &memDedicatedReq;
7094 
7095  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7096 
7097  memReq = memReq2.memoryRequirements;
7098  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7099  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7100  }
7101  else
7102  {
7103  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7104  requiresDedicatedAllocation = false;
7105  prefersDedicatedAllocation = false;
7106  }
7107 }
7108 
7109 void VmaAllocator_T::GetImageMemoryRequirements(
7110  VkImage hImage,
7111  VkMemoryRequirements& memReq,
7112  bool& requiresDedicatedAllocation,
7113  bool& prefersDedicatedAllocation) const
7114 {
7115  if(m_UseKhrDedicatedAllocation)
7116  {
7117  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7118  memReqInfo.image = hImage;
7119 
7120  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7121 
7122  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7123  memReq2.pNext = &memDedicatedReq;
7124 
7125  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7126 
7127  memReq = memReq2.memoryRequirements;
7128  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7129  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7130  }
7131  else
7132  {
7133  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7134  requiresDedicatedAllocation = false;
7135  prefersDedicatedAllocation = false;
7136  }
7137 }
7138 
7139 VkResult VmaAllocator_T::AllocateMemory(
7140  const VkMemoryRequirements& vkMemReq,
7141  bool requiresDedicatedAllocation,
7142  bool prefersDedicatedAllocation,
7143  VkBuffer dedicatedBuffer,
7144  VkImage dedicatedImage,
7145  const VmaAllocationCreateInfo& createInfo,
7146  VmaSuballocationType suballocType,
7147  VmaAllocation* pAllocation)
7148 {
7149  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7150  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7151  {
7152  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7153  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7154  }
7155  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7157  {
7158  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7159  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7160  }
7161  if(requiresDedicatedAllocation)
7162  {
7163  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7164  {
7165  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7166  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7167  }
7168  if(createInfo.pool != VK_NULL_HANDLE)
7169  {
7170  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7171  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7172  }
7173  }
7174  if((createInfo.pool != VK_NULL_HANDLE) &&
7175  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7176  {
7177  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7178  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7179  }
7180 
7181  if(createInfo.pool != VK_NULL_HANDLE)
7182  {
7183  return createInfo.pool->m_BlockVector.Allocate(
7184  createInfo.pool,
7185  m_CurrentFrameIndex.load(),
7186  vkMemReq,
7187  createInfo,
7188  suballocType,
7189  pAllocation);
7190  }
7191  else
7192  {
7193  // Bit mask of memory Vulkan types acceptable for this allocation.
7194  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7195  uint32_t memTypeIndex = UINT32_MAX;
7196  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7197  if(res == VK_SUCCESS)
7198  {
7199  res = AllocateMemoryOfType(
7200  vkMemReq,
7201  requiresDedicatedAllocation || prefersDedicatedAllocation,
7202  dedicatedBuffer,
7203  dedicatedImage,
7204  createInfo,
7205  memTypeIndex,
7206  suballocType,
7207  pAllocation);
7208  // Succeeded on first try.
7209  if(res == VK_SUCCESS)
7210  {
7211  return res;
7212  }
7213  // Allocation from this memory type failed. Try other compatible memory types.
7214  else
7215  {
7216  for(;;)
7217  {
7218  // Remove old memTypeIndex from list of possibilities.
7219  memoryTypeBits &= ~(1u << memTypeIndex);
7220  // Find alternative memTypeIndex.
7221  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7222  if(res == VK_SUCCESS)
7223  {
7224  res = AllocateMemoryOfType(
7225  vkMemReq,
7226  requiresDedicatedAllocation || prefersDedicatedAllocation,
7227  dedicatedBuffer,
7228  dedicatedImage,
7229  createInfo,
7230  memTypeIndex,
7231  suballocType,
7232  pAllocation);
7233  // Allocation from this alternative memory type succeeded.
7234  if(res == VK_SUCCESS)
7235  {
7236  return res;
7237  }
7238  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7239  }
7240  // No other matching memory type index could be found.
7241  else
7242  {
7243  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7244  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7245  }
7246  }
7247  }
7248  }
7249  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7250  else
7251  return res;
7252  }
7253 }
7254 
7255 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7256 {
7257  VMA_ASSERT(allocation);
7258 
7259  if(allocation->CanBecomeLost() == false ||
7260  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7261  {
7262  switch(allocation->GetType())
7263  {
7264  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7265  {
7266  VmaBlockVector* pBlockVector = VMA_NULL;
7267  VmaPool hPool = allocation->GetPool();
7268  if(hPool != VK_NULL_HANDLE)
7269  {
7270  pBlockVector = &hPool->m_BlockVector;
7271  }
7272  else
7273  {
7274  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7275  pBlockVector = m_pBlockVectors[memTypeIndex];
7276  }
7277  pBlockVector->Free(allocation);
7278  }
7279  break;
7280  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7281  FreeDedicatedMemory(allocation);
7282  break;
7283  default:
7284  VMA_ASSERT(0);
7285  }
7286  }
7287 
7288  allocation->SetUserData(this, VMA_NULL);
7289  vma_delete(this, allocation);
7290 }
7291 
7292 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7293 {
7294  // Initialize.
7295  InitStatInfo(pStats->total);
7296  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7297  InitStatInfo(pStats->memoryType[i]);
7298  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7299  InitStatInfo(pStats->memoryHeap[i]);
7300 
7301  // Process default pools.
7302  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7303  {
7304  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7305  VMA_ASSERT(pBlockVector);
7306  pBlockVector->AddStats(pStats);
7307  }
7308 
7309  // Process custom pools.
7310  {
7311  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7312  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7313  {
7314  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7315  }
7316  }
7317 
7318  // Process dedicated allocations.
7319  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7320  {
7321  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7322  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7323  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7324  VMA_ASSERT(pDedicatedAllocVector);
7325  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7326  {
7327  VmaStatInfo allocationStatInfo;
7328  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7329  VmaAddStatInfo(pStats->total, allocationStatInfo);
7330  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7331  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7332  }
7333  }
7334 
7335  // Postprocess.
7336  VmaPostprocessCalcStatInfo(pStats->total);
7337  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7338  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7339  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7340  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7341 }
7342 
7343 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7344 
7345 VkResult VmaAllocator_T::Defragment(
7346  VmaAllocation* pAllocations,
7347  size_t allocationCount,
7348  VkBool32* pAllocationsChanged,
7349  const VmaDefragmentationInfo* pDefragmentationInfo,
7350  VmaDefragmentationStats* pDefragmentationStats)
7351 {
7352  if(pAllocationsChanged != VMA_NULL)
7353  {
7354  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7355  }
7356  if(pDefragmentationStats != VMA_NULL)
7357  {
7358  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7359  }
7360 
7361  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7362 
7363  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7364 
7365  const size_t poolCount = m_Pools.size();
7366 
7367  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7368  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7369  {
7370  VmaAllocation hAlloc = pAllocations[allocIndex];
7371  VMA_ASSERT(hAlloc);
7372  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7373  // DedicatedAlloc cannot be defragmented.
7374  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7375  // Only HOST_VISIBLE memory types can be defragmented.
7376  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7377  // Lost allocation cannot be defragmented.
7378  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7379  {
7380  VmaBlockVector* pAllocBlockVector = nullptr;
7381 
7382  const VmaPool hAllocPool = hAlloc->GetPool();
7383  // This allocation belongs to custom pool.
7384  if(hAllocPool != VK_NULL_HANDLE)
7385  {
7386  pAllocBlockVector = &hAllocPool->GetBlockVector();
7387  }
7388  // This allocation belongs to general pool.
7389  else
7390  {
7391  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7392  }
7393 
7394  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7395 
7396  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7397  &pAllocationsChanged[allocIndex] : VMA_NULL;
7398  pDefragmentator->AddAllocation(hAlloc, pChanged);
7399  }
7400  }
7401 
7402  VkResult result = VK_SUCCESS;
7403 
7404  // ======== Main processing.
7405 
7406  VkDeviceSize maxBytesToMove = SIZE_MAX;
7407  uint32_t maxAllocationsToMove = UINT32_MAX;
7408  if(pDefragmentationInfo != VMA_NULL)
7409  {
7410  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7411  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7412  }
7413 
7414  // Process standard memory.
7415  for(uint32_t memTypeIndex = 0;
7416  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7417  ++memTypeIndex)
7418  {
7419  // Only HOST_VISIBLE memory types can be defragmented.
7420  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7421  {
7422  result = m_pBlockVectors[memTypeIndex]->Defragment(
7423  pDefragmentationStats,
7424  maxBytesToMove,
7425  maxAllocationsToMove);
7426  }
7427  }
7428 
7429  // Process custom pools.
7430  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7431  {
7432  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7433  pDefragmentationStats,
7434  maxBytesToMove,
7435  maxAllocationsToMove);
7436  }
7437 
7438  // ======== Destroy defragmentators.
7439 
7440  // Process custom pools.
7441  for(size_t poolIndex = poolCount; poolIndex--; )
7442  {
7443  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7444  }
7445 
7446  // Process standard memory.
7447  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7448  {
7449  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7450  {
7451  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7452  }
7453  }
7454 
7455  return result;
7456 }
7457 
7458 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7459 {
7460  if(hAllocation->CanBecomeLost())
7461  {
7462  /*
7463  Warning: This is a carefully designed algorithm.
7464  Do not modify unless you really know what you're doing :)
7465  */
7466  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7467  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7468  for(;;)
7469  {
7470  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7471  {
7472  pAllocationInfo->memoryType = UINT32_MAX;
7473  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7474  pAllocationInfo->offset = 0;
7475  pAllocationInfo->size = hAllocation->GetSize();
7476  pAllocationInfo->pMappedData = VMA_NULL;
7477  pAllocationInfo->pUserData = hAllocation->GetUserData();
7478  return;
7479  }
7480  else if(localLastUseFrameIndex == localCurrFrameIndex)
7481  {
7482  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7483  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7484  pAllocationInfo->offset = hAllocation->GetOffset();
7485  pAllocationInfo->size = hAllocation->GetSize();
7486  pAllocationInfo->pMappedData = VMA_NULL;
7487  pAllocationInfo->pUserData = hAllocation->GetUserData();
7488  return;
7489  }
7490  else // Last use time earlier than current time.
7491  {
7492  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7493  {
7494  localLastUseFrameIndex = localCurrFrameIndex;
7495  }
7496  }
7497  }
7498  }
7499  else
7500  {
7501  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7502  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7503  pAllocationInfo->offset = hAllocation->GetOffset();
7504  pAllocationInfo->size = hAllocation->GetSize();
7505  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7506  pAllocationInfo->pUserData = hAllocation->GetUserData();
7507  }
7508 }
7509 
7510 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7511 {
7512  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7513 
7514  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7515 
7516  if(newCreateInfo.maxBlockCount == 0)
7517  {
7518  newCreateInfo.maxBlockCount = SIZE_MAX;
7519  }
7520  if(newCreateInfo.blockSize == 0)
7521  {
7522  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7523  }
7524 
7525  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7526 
7527  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7528  if(res != VK_SUCCESS)
7529  {
7530  vma_delete(this, *pPool);
7531  *pPool = VMA_NULL;
7532  return res;
7533  }
7534 
7535  // Add to m_Pools.
7536  {
7537  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7538  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7539  }
7540 
7541  return VK_SUCCESS;
7542 }
7543 
7544 void VmaAllocator_T::DestroyPool(VmaPool pool)
7545 {
7546  // Remove from m_Pools.
7547  {
7548  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7549  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7550  VMA_ASSERT(success && "Pool not found in Allocator.");
7551  }
7552 
7553  vma_delete(this, pool);
7554 }
7555 
7556 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7557 {
7558  pool->m_BlockVector.GetPoolStats(pPoolStats);
7559 }
7560 
7561 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7562 {
7563  m_CurrentFrameIndex.store(frameIndex);
7564 }
7565 
7566 void VmaAllocator_T::MakePoolAllocationsLost(
7567  VmaPool hPool,
7568  size_t* pLostAllocationCount)
7569 {
7570  hPool->m_BlockVector.MakePoolAllocationsLost(
7571  m_CurrentFrameIndex.load(),
7572  pLostAllocationCount);
7573 }
7574 
7575 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7576 {
7577  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7578  (*pAllocation)->InitLost();
7579 }
7580 
7581 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7582 {
7583  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7584 
7585  VkResult res;
7586  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7587  {
7588  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7589  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7590  {
7591  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7592  if(res == VK_SUCCESS)
7593  {
7594  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7595  }
7596  }
7597  else
7598  {
7599  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7600  }
7601  }
7602  else
7603  {
7604  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7605  }
7606 
7607  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7608  {
7609  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7610  }
7611 
7612  return res;
7613 }
7614 
7615 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7616 {
7617  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7618  {
7619  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7620  }
7621 
7622  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7623 
7624  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7625  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7626  {
7627  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7628  m_HeapSizeLimit[heapIndex] += size;
7629  }
7630 }
7631 
7632 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7633 {
7634  if(hAllocation->CanBecomeLost())
7635  {
7636  return VK_ERROR_MEMORY_MAP_FAILED;
7637  }
7638 
7639  switch(hAllocation->GetType())
7640  {
7641  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7642  {
7643  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7644  char *pBytes = nullptr;
7645  VkResult res = pBlock->Map(this, (void**)&pBytes);
7646  if(res == VK_SUCCESS)
7647  {
7648  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7649  hAllocation->BlockAllocMap();
7650  }
7651  return res;
7652  }
7653  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7654  return hAllocation->DedicatedAllocMap(this, ppData);
7655  default:
7656  VMA_ASSERT(0);
7657  return VK_ERROR_MEMORY_MAP_FAILED;
7658  }
7659 }
7660 
7661 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7662 {
7663  switch(hAllocation->GetType())
7664  {
7665  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7666  {
7667  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7668  hAllocation->BlockAllocUnmap();
7669  pBlock->Unmap(this);
7670  }
7671  break;
7672  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7673  hAllocation->DedicatedAllocUnmap(this);
7674  break;
7675  default:
7676  VMA_ASSERT(0);
7677  }
7678 }
7679 
7680 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7681 {
7682  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7683 
7684  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7685  {
7686  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7687  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7688  VMA_ASSERT(pDedicatedAllocations);
7689  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7690  VMA_ASSERT(success);
7691  }
7692 
7693  VkDeviceMemory hMemory = allocation->GetMemory();
7694 
7695  if(allocation->GetMappedData() != VMA_NULL)
7696  {
7697  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7698  }
7699 
7700  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7701 
7702  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7703 }
7704 
7705 #if VMA_STATS_STRING_ENABLED
7706 
7707 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7708 {
7709  bool dedicatedAllocationsStarted = false;
7710  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7711  {
7712  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7713  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7714  VMA_ASSERT(pDedicatedAllocVector);
7715  if(pDedicatedAllocVector->empty() == false)
7716  {
7717  if(dedicatedAllocationsStarted == false)
7718  {
7719  dedicatedAllocationsStarted = true;
7720  json.WriteString("DedicatedAllocations");
7721  json.BeginObject();
7722  }
7723 
7724  json.BeginString("Type ");
7725  json.ContinueString(memTypeIndex);
7726  json.EndString();
7727 
7728  json.BeginArray();
7729 
7730  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7731  {
7732  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7733  json.BeginObject(true);
7734 
7735  json.WriteString("Type");
7736  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7737 
7738  json.WriteString("Size");
7739  json.WriteNumber(hAlloc->GetSize());
7740 
7741  const void* pUserData = hAlloc->GetUserData();
7742  if(pUserData != VMA_NULL)
7743  {
7744  json.WriteString("UserData");
7745  if(hAlloc->IsUserDataString())
7746  {
7747  json.WriteString((const char*)pUserData);
7748  }
7749  else
7750  {
7751  json.BeginString();
7752  json.ContinueString_Pointer(pUserData);
7753  json.EndString();
7754  }
7755  }
7756 
7757  json.EndObject();
7758  }
7759 
7760  json.EndArray();
7761  }
7762  }
7763  if(dedicatedAllocationsStarted)
7764  {
7765  json.EndObject();
7766  }
7767 
7768  {
7769  bool allocationsStarted = false;
7770  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7771  {
7772  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
7773  {
7774  if(allocationsStarted == false)
7775  {
7776  allocationsStarted = true;
7777  json.WriteString("DefaultPools");
7778  json.BeginObject();
7779  }
7780 
7781  json.BeginString("Type ");
7782  json.ContinueString(memTypeIndex);
7783  json.EndString();
7784 
7785  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
7786  }
7787  }
7788  if(allocationsStarted)
7789  {
7790  json.EndObject();
7791  }
7792  }
7793 
7794  {
7795  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7796  const size_t poolCount = m_Pools.size();
7797  if(poolCount > 0)
7798  {
7799  json.WriteString("Pools");
7800  json.BeginArray();
7801  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7802  {
7803  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7804  }
7805  json.EndArray();
7806  }
7807  }
7808 }
7809 
7810 #endif // #if VMA_STATS_STRING_ENABLED
7811 
7812 static VkResult AllocateMemoryForImage(
7813  VmaAllocator allocator,
7814  VkImage image,
7815  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7816  VmaSuballocationType suballocType,
7817  VmaAllocation* pAllocation)
7818 {
7819  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7820 
7821  VkMemoryRequirements vkMemReq = {};
7822  bool requiresDedicatedAllocation = false;
7823  bool prefersDedicatedAllocation = false;
7824  allocator->GetImageMemoryRequirements(image, vkMemReq,
7825  requiresDedicatedAllocation, prefersDedicatedAllocation);
7826 
7827  return allocator->AllocateMemory(
7828  vkMemReq,
7829  requiresDedicatedAllocation,
7830  prefersDedicatedAllocation,
7831  VK_NULL_HANDLE, // dedicatedBuffer
7832  image, // dedicatedImage
7833  *pAllocationCreateInfo,
7834  suballocType,
7835  pAllocation);
7836 }
7837 
7839 // Public interface
7840 
7841 VkResult vmaCreateAllocator(
7842  const VmaAllocatorCreateInfo* pCreateInfo,
7843  VmaAllocator* pAllocator)
7844 {
7845  VMA_ASSERT(pCreateInfo && pAllocator);
7846  VMA_DEBUG_LOG("vmaCreateAllocator");
7847  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7848  return VK_SUCCESS;
7849 }
7850 
7851 void vmaDestroyAllocator(
7852  VmaAllocator allocator)
7853 {
7854  if(allocator != VK_NULL_HANDLE)
7855  {
7856  VMA_DEBUG_LOG("vmaDestroyAllocator");
7857  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7858  vma_delete(&allocationCallbacks, allocator);
7859  }
7860 }
7861 
7863  VmaAllocator allocator,
7864  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7865 {
7866  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7867  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7868 }
7869 
7871  VmaAllocator allocator,
7872  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7873 {
7874  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7875  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7876 }
7877 
7879  VmaAllocator allocator,
7880  uint32_t memoryTypeIndex,
7881  VkMemoryPropertyFlags* pFlags)
7882 {
7883  VMA_ASSERT(allocator && pFlags);
7884  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7885  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7886 }
7887 
7889  VmaAllocator allocator,
7890  uint32_t frameIndex)
7891 {
7892  VMA_ASSERT(allocator);
7893  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7894 
7895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7896 
7897  allocator->SetCurrentFrameIndex(frameIndex);
7898 }
7899 
7900 void vmaCalculateStats(
7901  VmaAllocator allocator,
7902  VmaStats* pStats)
7903 {
7904  VMA_ASSERT(allocator && pStats);
7905  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7906  allocator->CalculateStats(pStats);
7907 }
7908 
7909 #if VMA_STATS_STRING_ENABLED
7910 
7911 void vmaBuildStatsString(
7912  VmaAllocator allocator,
7913  char** ppStatsString,
7914  VkBool32 detailedMap)
7915 {
7916  VMA_ASSERT(allocator && ppStatsString);
7917  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7918 
7919  VmaStringBuilder sb(allocator);
7920  {
7921  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7922  json.BeginObject();
7923 
7924  VmaStats stats;
7925  allocator->CalculateStats(&stats);
7926 
7927  json.WriteString("Total");
7928  VmaPrintStatInfo(json, stats.total);
7929 
7930  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7931  {
7932  json.BeginString("Heap ");
7933  json.ContinueString(heapIndex);
7934  json.EndString();
7935  json.BeginObject();
7936 
7937  json.WriteString("Size");
7938  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7939 
7940  json.WriteString("Flags");
7941  json.BeginArray(true);
7942  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7943  {
7944  json.WriteString("DEVICE_LOCAL");
7945  }
7946  json.EndArray();
7947 
7948  if(stats.memoryHeap[heapIndex].blockCount > 0)
7949  {
7950  json.WriteString("Stats");
7951  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7952  }
7953 
7954  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7955  {
7956  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7957  {
7958  json.BeginString("Type ");
7959  json.ContinueString(typeIndex);
7960  json.EndString();
7961 
7962  json.BeginObject();
7963 
7964  json.WriteString("Flags");
7965  json.BeginArray(true);
7966  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7967  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7968  {
7969  json.WriteString("DEVICE_LOCAL");
7970  }
7971  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7972  {
7973  json.WriteString("HOST_VISIBLE");
7974  }
7975  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7976  {
7977  json.WriteString("HOST_COHERENT");
7978  }
7979  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7980  {
7981  json.WriteString("HOST_CACHED");
7982  }
7983  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7984  {
7985  json.WriteString("LAZILY_ALLOCATED");
7986  }
7987  json.EndArray();
7988 
7989  if(stats.memoryType[typeIndex].blockCount > 0)
7990  {
7991  json.WriteString("Stats");
7992  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7993  }
7994 
7995  json.EndObject();
7996  }
7997  }
7998 
7999  json.EndObject();
8000  }
8001  if(detailedMap == VK_TRUE)
8002  {
8003  allocator->PrintDetailedMap(json);
8004  }
8005 
8006  json.EndObject();
8007  }
8008 
8009  const size_t len = sb.GetLength();
8010  char* const pChars = vma_new_array(allocator, char, len + 1);
8011  if(len > 0)
8012  {
8013  memcpy(pChars, sb.GetData(), len);
8014  }
8015  pChars[len] = '\0';
8016  *ppStatsString = pChars;
8017 }
8018 
8019 void vmaFreeStatsString(
8020  VmaAllocator allocator,
8021  char* pStatsString)
8022 {
8023  if(pStatsString != VMA_NULL)
8024  {
8025  VMA_ASSERT(allocator);
8026  size_t len = strlen(pStatsString);
8027  vma_delete_array(allocator, pStatsString, len + 1);
8028  }
8029 }
8030 
8031 #endif // #if VMA_STATS_STRING_ENABLED
8032 
8033 /*
8034 This function is not protected by any mutex because it just reads immutable data.
8035 */
8036 VkResult vmaFindMemoryTypeIndex(
8037  VmaAllocator allocator,
8038  uint32_t memoryTypeBits,
8039  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8040  uint32_t* pMemoryTypeIndex)
8041 {
8042  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8043  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8044  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8045 
8046  if(pAllocationCreateInfo->memoryTypeBits != 0)
8047  {
8048  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8049  }
8050 
8051  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8052  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8053 
8054  // Convert usage to requiredFlags and preferredFlags.
8055  switch(pAllocationCreateInfo->usage)
8056  {
8058  break;
8060  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8061  break;
8063  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8064  break;
8066  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8067  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8068  break;
8070  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8071  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8072  break;
8073  default:
8074  break;
8075  }
8076 
8077  *pMemoryTypeIndex = UINT32_MAX;
8078  uint32_t minCost = UINT32_MAX;
8079  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8080  memTypeIndex < allocator->GetMemoryTypeCount();
8081  ++memTypeIndex, memTypeBit <<= 1)
8082  {
8083  // This memory type is acceptable according to memoryTypeBits bitmask.
8084  if((memTypeBit & memoryTypeBits) != 0)
8085  {
8086  const VkMemoryPropertyFlags currFlags =
8087  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8088  // This memory type contains requiredFlags.
8089  if((requiredFlags & ~currFlags) == 0)
8090  {
8091  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8092  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8093  // Remember memory type with lowest cost.
8094  if(currCost < minCost)
8095  {
8096  *pMemoryTypeIndex = memTypeIndex;
8097  if(currCost == 0)
8098  {
8099  return VK_SUCCESS;
8100  }
8101  minCost = currCost;
8102  }
8103  }
8104  }
8105  }
8106  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8107 }
8108 
8109 VkResult vmaCreatePool(
8110  VmaAllocator allocator,
8111  const VmaPoolCreateInfo* pCreateInfo,
8112  VmaPool* pPool)
8113 {
8114  VMA_ASSERT(allocator && pCreateInfo && pPool);
8115 
8116  VMA_DEBUG_LOG("vmaCreatePool");
8117 
8118  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8119 
8120  return allocator->CreatePool(pCreateInfo, pPool);
8121 }
8122 
8123 void vmaDestroyPool(
8124  VmaAllocator allocator,
8125  VmaPool pool)
8126 {
8127  VMA_ASSERT(allocator);
8128 
8129  if(pool == VK_NULL_HANDLE)
8130  {
8131  return;
8132  }
8133 
8134  VMA_DEBUG_LOG("vmaDestroyPool");
8135 
8136  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8137 
8138  allocator->DestroyPool(pool);
8139 }
8140 
8141 void vmaGetPoolStats(
8142  VmaAllocator allocator,
8143  VmaPool pool,
8144  VmaPoolStats* pPoolStats)
8145 {
8146  VMA_ASSERT(allocator && pool && pPoolStats);
8147 
8148  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8149 
8150  allocator->GetPoolStats(pool, pPoolStats);
8151 }
8152 
8154  VmaAllocator allocator,
8155  VmaPool pool,
8156  size_t* pLostAllocationCount)
8157 {
8158  VMA_ASSERT(allocator && pool);
8159 
8160  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8161 
8162  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8163 }
8164 
8165 VkResult vmaAllocateMemory(
8166  VmaAllocator allocator,
8167  const VkMemoryRequirements* pVkMemoryRequirements,
8168  const VmaAllocationCreateInfo* pCreateInfo,
8169  VmaAllocation* pAllocation,
8170  VmaAllocationInfo* pAllocationInfo)
8171 {
8172  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8173 
8174  VMA_DEBUG_LOG("vmaAllocateMemory");
8175 
8176  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8177 
8178  VkResult result = allocator->AllocateMemory(
8179  *pVkMemoryRequirements,
8180  false, // requiresDedicatedAllocation
8181  false, // prefersDedicatedAllocation
8182  VK_NULL_HANDLE, // dedicatedBuffer
8183  VK_NULL_HANDLE, // dedicatedImage
8184  *pCreateInfo,
8185  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8186  pAllocation);
8187 
8188  if(pAllocationInfo && result == VK_SUCCESS)
8189  {
8190  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8191  }
8192 
8193  return result;
8194 }
8195 
8197  VmaAllocator allocator,
8198  VkBuffer buffer,
8199  const VmaAllocationCreateInfo* pCreateInfo,
8200  VmaAllocation* pAllocation,
8201  VmaAllocationInfo* pAllocationInfo)
8202 {
8203  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8204 
8205  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8206 
8207  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8208 
8209  VkMemoryRequirements vkMemReq = {};
8210  bool requiresDedicatedAllocation = false;
8211  bool prefersDedicatedAllocation = false;
8212  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8213  requiresDedicatedAllocation,
8214  prefersDedicatedAllocation);
8215 
8216  VkResult result = allocator->AllocateMemory(
8217  vkMemReq,
8218  requiresDedicatedAllocation,
8219  prefersDedicatedAllocation,
8220  buffer, // dedicatedBuffer
8221  VK_NULL_HANDLE, // dedicatedImage
8222  *pCreateInfo,
8223  VMA_SUBALLOCATION_TYPE_BUFFER,
8224  pAllocation);
8225 
8226  if(pAllocationInfo && result == VK_SUCCESS)
8227  {
8228  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8229  }
8230 
8231  return result;
8232 }
8233 
8234 VkResult vmaAllocateMemoryForImage(
8235  VmaAllocator allocator,
8236  VkImage image,
8237  const VmaAllocationCreateInfo* pCreateInfo,
8238  VmaAllocation* pAllocation,
8239  VmaAllocationInfo* pAllocationInfo)
8240 {
8241  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8242 
8243  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8244 
8245  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8246 
8247  VkResult result = AllocateMemoryForImage(
8248  allocator,
8249  image,
8250  pCreateInfo,
8251  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8252  pAllocation);
8253 
8254  if(pAllocationInfo && result == VK_SUCCESS)
8255  {
8256  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8257  }
8258 
8259  return result;
8260 }
8261 
8262 void vmaFreeMemory(
8263  VmaAllocator allocator,
8264  VmaAllocation allocation)
8265 {
8266  VMA_ASSERT(allocator && allocation);
8267 
8268  VMA_DEBUG_LOG("vmaFreeMemory");
8269 
8270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8271 
8272  allocator->FreeMemory(allocation);
8273 }
8274 
8276  VmaAllocator allocator,
8277  VmaAllocation allocation,
8278  VmaAllocationInfo* pAllocationInfo)
8279 {
8280  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8281 
8282  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8283 
8284  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8285 }
8286 
8288  VmaAllocator allocator,
8289  VmaAllocation allocation,
8290  void* pUserData)
8291 {
8292  VMA_ASSERT(allocator && allocation);
8293 
8294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8295 
8296  allocation->SetUserData(allocator, pUserData);
8297 }
8298 
8300  VmaAllocator allocator,
8301  VmaAllocation* pAllocation)
8302 {
8303  VMA_ASSERT(allocator && pAllocation);
8304 
8305  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8306 
8307  allocator->CreateLostAllocation(pAllocation);
8308 }
8309 
8310 VkResult vmaMapMemory(
8311  VmaAllocator allocator,
8312  VmaAllocation allocation,
8313  void** ppData)
8314 {
8315  VMA_ASSERT(allocator && allocation && ppData);
8316 
8317  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8318 
8319  return allocator->Map(allocation, ppData);
8320 }
8321 
8322 void vmaUnmapMemory(
8323  VmaAllocator allocator,
8324  VmaAllocation allocation)
8325 {
8326  VMA_ASSERT(allocator && allocation);
8327 
8328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8329 
8330  allocator->Unmap(allocation);
8331 }
8332 
8333 VkResult vmaDefragment(
8334  VmaAllocator allocator,
8335  VmaAllocation* pAllocations,
8336  size_t allocationCount,
8337  VkBool32* pAllocationsChanged,
8338  const VmaDefragmentationInfo *pDefragmentationInfo,
8339  VmaDefragmentationStats* pDefragmentationStats)
8340 {
8341  VMA_ASSERT(allocator && pAllocations);
8342 
8343  VMA_DEBUG_LOG("vmaDefragment");
8344 
8345  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8346 
8347  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8348 }
8349 
8350 VkResult vmaCreateBuffer(
8351  VmaAllocator allocator,
8352  const VkBufferCreateInfo* pBufferCreateInfo,
8353  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8354  VkBuffer* pBuffer,
8355  VmaAllocation* pAllocation,
8356  VmaAllocationInfo* pAllocationInfo)
8357 {
8358  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8359 
8360  VMA_DEBUG_LOG("vmaCreateBuffer");
8361 
8362  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8363 
8364  *pBuffer = VK_NULL_HANDLE;
8365  *pAllocation = VK_NULL_HANDLE;
8366 
8367  // 1. Create VkBuffer.
8368  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8369  allocator->m_hDevice,
8370  pBufferCreateInfo,
8371  allocator->GetAllocationCallbacks(),
8372  pBuffer);
8373  if(res >= 0)
8374  {
8375  // 2. vkGetBufferMemoryRequirements.
8376  VkMemoryRequirements vkMemReq = {};
8377  bool requiresDedicatedAllocation = false;
8378  bool prefersDedicatedAllocation = false;
8379  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8380  requiresDedicatedAllocation, prefersDedicatedAllocation);
8381 
8382  // Make sure alignment requirements for specific buffer usages reported
8383  // in Physical Device Properties are included in alignment reported by memory requirements.
8384  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8385  {
8386  VMA_ASSERT(vkMemReq.alignment %
8387  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8388  }
8389  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8390  {
8391  VMA_ASSERT(vkMemReq.alignment %
8392  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8393  }
8394  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8395  {
8396  VMA_ASSERT(vkMemReq.alignment %
8397  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8398  }
8399 
8400  // 3. Allocate memory using allocator.
8401  res = allocator->AllocateMemory(
8402  vkMemReq,
8403  requiresDedicatedAllocation,
8404  prefersDedicatedAllocation,
8405  *pBuffer, // dedicatedBuffer
8406  VK_NULL_HANDLE, // dedicatedImage
8407  *pAllocationCreateInfo,
8408  VMA_SUBALLOCATION_TYPE_BUFFER,
8409  pAllocation);
8410  if(res >= 0)
8411  {
8412  // 3. Bind buffer with memory.
8413  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8414  allocator->m_hDevice,
8415  *pBuffer,
8416  (*pAllocation)->GetMemory(),
8417  (*pAllocation)->GetOffset());
8418  if(res >= 0)
8419  {
8420  // All steps succeeded.
8421  if(pAllocationInfo != VMA_NULL)
8422  {
8423  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8424  }
8425  return VK_SUCCESS;
8426  }
8427  allocator->FreeMemory(*pAllocation);
8428  *pAllocation = VK_NULL_HANDLE;
8429  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8430  *pBuffer = VK_NULL_HANDLE;
8431  return res;
8432  }
8433  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8434  *pBuffer = VK_NULL_HANDLE;
8435  return res;
8436  }
8437  return res;
8438 }
8439 
8440 void vmaDestroyBuffer(
8441  VmaAllocator allocator,
8442  VkBuffer buffer,
8443  VmaAllocation allocation)
8444 {
8445  if(buffer != VK_NULL_HANDLE)
8446  {
8447  VMA_ASSERT(allocator);
8448 
8449  VMA_DEBUG_LOG("vmaDestroyBuffer");
8450 
8451  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8452 
8453  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8454 
8455  allocator->FreeMemory(allocation);
8456  }
8457 }
8458 
8459 VkResult vmaCreateImage(
8460  VmaAllocator allocator,
8461  const VkImageCreateInfo* pImageCreateInfo,
8462  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8463  VkImage* pImage,
8464  VmaAllocation* pAllocation,
8465  VmaAllocationInfo* pAllocationInfo)
8466 {
8467  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8468 
8469  VMA_DEBUG_LOG("vmaCreateImage");
8470 
8471  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8472 
8473  *pImage = VK_NULL_HANDLE;
8474  *pAllocation = VK_NULL_HANDLE;
8475 
8476  // 1. Create VkImage.
8477  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8478  allocator->m_hDevice,
8479  pImageCreateInfo,
8480  allocator->GetAllocationCallbacks(),
8481  pImage);
8482  if(res >= 0)
8483  {
8484  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8485  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8486  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8487 
8488  // 2. Allocate memory using allocator.
8489  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8490  if(res >= 0)
8491  {
8492  // 3. Bind image with memory.
8493  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8494  allocator->m_hDevice,
8495  *pImage,
8496  (*pAllocation)->GetMemory(),
8497  (*pAllocation)->GetOffset());
8498  if(res >= 0)
8499  {
8500  // All steps succeeded.
8501  if(pAllocationInfo != VMA_NULL)
8502  {
8503  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8504  }
8505  return VK_SUCCESS;
8506  }
8507  allocator->FreeMemory(*pAllocation);
8508  *pAllocation = VK_NULL_HANDLE;
8509  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8510  *pImage = VK_NULL_HANDLE;
8511  return res;
8512  }
8513  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8514  *pImage = VK_NULL_HANDLE;
8515  return res;
8516  }
8517  return res;
8518 }
8519 
8520 void vmaDestroyImage(
8521  VmaAllocator allocator,
8522  VkImage image,
8523  VmaAllocation allocation)
8524 {
8525  if(image != VK_NULL_HANDLE)
8526  {
8527  VMA_ASSERT(allocator);
8528 
8529  VMA_DEBUG_LOG("vmaDestroyImage");
8530 
8531  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8532 
8533  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8534 
8535  allocator->FreeMemory(allocation);
8536  }
8537 }
8538 
8539 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:758
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1002
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:783
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:768
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:965
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:762
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1270
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:780
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1436
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1140
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1194
Definition: vk_mem_alloc.h:1039
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:751
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1077
Definition: vk_mem_alloc.h:986
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:792
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:845
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:777
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:990
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:910
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:765
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:909
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:773
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1440
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:809
VmaStatInfo total
Definition: vk_mem_alloc.h:919
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1448
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1061
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1431
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:766
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:693
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:786
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1148
Definition: vk_mem_alloc.h:1142
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1280
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:763
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1098
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1164
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1200
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:749
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1151
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:947
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1426
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1444
Definition: vk_mem_alloc.h:980
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1085
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:764
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:915
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:699
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:720
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:725
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1446
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1072
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1210
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:759
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:898
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1159
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:712
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1046
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:911
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:716
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1154
Definition: vk_mem_alloc.h:985
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1067
Definition: vk_mem_alloc.h:1058
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:901
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:761
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1172
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:795
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1203
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1056
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1091
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:833
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:917
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1026
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:910
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:770
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:714
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:769
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1186
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1294
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:789
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:910
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:907
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1191
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1275
Definition: vk_mem_alloc.h:1054
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1442
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:757
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:772
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:905
Definition: vk_mem_alloc.h:952
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1144
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:903
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:767
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:771
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1013
Definition: vk_mem_alloc.h:974
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1289
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:747
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:760
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1256
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1122
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:911
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:918
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1197
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:911
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1261