Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
688 #include <vulkan/vulkan.h>
689 
690 VK_DEFINE_HANDLE(VmaAllocator)
691 
692 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
694  VmaAllocator allocator,
695  uint32_t memoryType,
696  VkDeviceMemory memory,
697  VkDeviceSize size);
699 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
700  VmaAllocator allocator,
701  uint32_t memoryType,
702  VkDeviceMemory memory,
703  VkDeviceSize size);
704 
712 typedef struct VmaDeviceMemoryCallbacks {
718 
754 
757 typedef VkFlags VmaAllocatorCreateFlags;
758 
763 typedef struct VmaVulkanFunctions {
764  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
765  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
766  PFN_vkAllocateMemory vkAllocateMemory;
767  PFN_vkFreeMemory vkFreeMemory;
768  PFN_vkMapMemory vkMapMemory;
769  PFN_vkUnmapMemory vkUnmapMemory;
770  PFN_vkBindBufferMemory vkBindBufferMemory;
771  PFN_vkBindImageMemory vkBindImageMemory;
772  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
773  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
774  PFN_vkCreateBuffer vkCreateBuffer;
775  PFN_vkDestroyBuffer vkDestroyBuffer;
776  PFN_vkCreateImage vkCreateImage;
777  PFN_vkDestroyImage vkDestroyImage;
778  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
779  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
781 
784 {
786  VmaAllocatorCreateFlags flags;
788 
789  VkPhysicalDevice physicalDevice;
791 
792  VkDevice device;
794 
797 
800 
801  const VkAllocationCallbacks* pAllocationCallbacks;
803 
818  uint32_t frameInUseCount;
842  const VkDeviceSize* pHeapSizeLimit;
856 
858 VkResult vmaCreateAllocator(
859  const VmaAllocatorCreateInfo* pCreateInfo,
860  VmaAllocator* pAllocator);
861 
864  VmaAllocator allocator);
865 
871  VmaAllocator allocator,
872  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
873 
879  VmaAllocator allocator,
880  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
881 
889  VmaAllocator allocator,
890  uint32_t memoryTypeIndex,
891  VkMemoryPropertyFlags* pFlags);
892 
902  VmaAllocator allocator,
903  uint32_t frameIndex);
904 
907 typedef struct VmaStatInfo
908 {
910  uint32_t blockCount;
912  uint32_t allocationCount;
916  VkDeviceSize usedBytes;
918  VkDeviceSize unusedBytes;
919  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
920  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
921 } VmaStatInfo;
922 
924 typedef struct VmaStats
925 {
926  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
927  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
929 } VmaStats;
930 
932 void vmaCalculateStats(
933  VmaAllocator allocator,
934  VmaStats* pStats);
935 
936 #define VMA_STATS_STRING_ENABLED 1
937 
938 #if VMA_STATS_STRING_ENABLED
939 
941 
944  VmaAllocator allocator,
945  char** ppStatsString,
946  VkBool32 detailedMap);
947 
948 void vmaFreeStatsString(
949  VmaAllocator allocator,
950  char* pStatsString);
951 
952 #endif // #if VMA_STATS_STRING_ENABLED
953 
954 VK_DEFINE_HANDLE(VmaPool)
955 
956 typedef enum VmaMemoryUsage
957 {
997 
1012 
1062 
1066 
1068 {
1070  VmaAllocationCreateFlags flags;
1081  VkMemoryPropertyFlags requiredFlags;
1086  VkMemoryPropertyFlags preferredFlags;
1094  uint32_t memoryTypeBits;
1100  VmaPool pool;
1107  void* pUserData;
1109 
1124 VkResult vmaFindMemoryTypeIndex(
1125  VmaAllocator allocator,
1126  uint32_t memoryTypeBits,
1127  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1128  uint32_t* pMemoryTypeIndex);
1129 
1150 
1153 typedef VkFlags VmaPoolCreateFlags;
1154 
1157 typedef struct VmaPoolCreateInfo {
1163  VmaPoolCreateFlags flags;
1168  VkDeviceSize blockSize;
1197 
1200 typedef struct VmaPoolStats {
1203  VkDeviceSize size;
1206  VkDeviceSize unusedSize;
1219  VkDeviceSize unusedRangeSizeMax;
1220 } VmaPoolStats;
1221 
1228 VkResult vmaCreatePool(
1229  VmaAllocator allocator,
1230  const VmaPoolCreateInfo* pCreateInfo,
1231  VmaPool* pPool);
1232 
1235 void vmaDestroyPool(
1236  VmaAllocator allocator,
1237  VmaPool pool);
1238 
1245 void vmaGetPoolStats(
1246  VmaAllocator allocator,
1247  VmaPool pool,
1248  VmaPoolStats* pPoolStats);
1249 
1257  VmaAllocator allocator,
1258  VmaPool pool,
1259  size_t* pLostAllocationCount);
1260 
1261 VK_DEFINE_HANDLE(VmaAllocation)
1262 
1263 
1265 typedef struct VmaAllocationInfo {
1270  uint32_t memoryType;
1279  VkDeviceMemory deviceMemory;
1284  VkDeviceSize offset;
1289  VkDeviceSize size;
1303  void* pUserData;
1305 
1316 VkResult vmaAllocateMemory(
1317  VmaAllocator allocator,
1318  const VkMemoryRequirements* pVkMemoryRequirements,
1319  const VmaAllocationCreateInfo* pCreateInfo,
1320  VmaAllocation* pAllocation,
1321  VmaAllocationInfo* pAllocationInfo);
1322 
1330  VmaAllocator allocator,
1331  VkBuffer buffer,
1332  const VmaAllocationCreateInfo* pCreateInfo,
1333  VmaAllocation* pAllocation,
1334  VmaAllocationInfo* pAllocationInfo);
1335 
1337 VkResult vmaAllocateMemoryForImage(
1338  VmaAllocator allocator,
1339  VkImage image,
1340  const VmaAllocationCreateInfo* pCreateInfo,
1341  VmaAllocation* pAllocation,
1342  VmaAllocationInfo* pAllocationInfo);
1343 
1345 void vmaFreeMemory(
1346  VmaAllocator allocator,
1347  VmaAllocation allocation);
1348 
1351  VmaAllocator allocator,
1352  VmaAllocation allocation,
1353  VmaAllocationInfo* pAllocationInfo);
1354 
1369  VmaAllocator allocator,
1370  VmaAllocation allocation,
1371  void* pUserData);
1372 
1384  VmaAllocator allocator,
1385  VmaAllocation* pAllocation);
1386 
1421 VkResult vmaMapMemory(
1422  VmaAllocator allocator,
1423  VmaAllocation allocation,
1424  void** ppData);
1425 
1430 void vmaUnmapMemory(
1431  VmaAllocator allocator,
1432  VmaAllocation allocation);
1433 
1435 typedef struct VmaDefragmentationInfo {
1440  VkDeviceSize maxBytesToMove;
1447 
1449 typedef struct VmaDefragmentationStats {
1451  VkDeviceSize bytesMoved;
1453  VkDeviceSize bytesFreed;
1459 
1536 VkResult vmaDefragment(
1537  VmaAllocator allocator,
1538  VmaAllocation* pAllocations,
1539  size_t allocationCount,
1540  VkBool32* pAllocationsChanged,
1541  const VmaDefragmentationInfo *pDefragmentationInfo,
1542  VmaDefragmentationStats* pDefragmentationStats);
1543 
1570 VkResult vmaCreateBuffer(
1571  VmaAllocator allocator,
1572  const VkBufferCreateInfo* pBufferCreateInfo,
1573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1574  VkBuffer* pBuffer,
1575  VmaAllocation* pAllocation,
1576  VmaAllocationInfo* pAllocationInfo);
1577 
1589 void vmaDestroyBuffer(
1590  VmaAllocator allocator,
1591  VkBuffer buffer,
1592  VmaAllocation allocation);
1593 
1595 VkResult vmaCreateImage(
1596  VmaAllocator allocator,
1597  const VkImageCreateInfo* pImageCreateInfo,
1598  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1599  VkImage* pImage,
1600  VmaAllocation* pAllocation,
1601  VmaAllocationInfo* pAllocationInfo);
1602 
1614 void vmaDestroyImage(
1615  VmaAllocator allocator,
1616  VkImage image,
1617  VmaAllocation allocation);
1618 
1619 #ifdef __cplusplus
1620 }
1621 #endif
1622 
1623 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1624 
1625 // For Visual Studio IntelliSense.
1626 #ifdef __INTELLISENSE__
1627 #define VMA_IMPLEMENTATION
1628 #endif
1629 
1630 #ifdef VMA_IMPLEMENTATION
1631 #undef VMA_IMPLEMENTATION
1632 
1633 #include <cstdint>
1634 #include <cstdlib>
1635 #include <cstring>
1636 
1637 /*******************************************************************************
1638 CONFIGURATION SECTION
1639 
1640 Define some of these macros before each #include of this header or change them
1641 here if you need other then default behavior depending on your environment.
1642 */
1643 
1644 /*
1645 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1646 internally, like:
1647 
1648  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1649 
1650 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1651 VmaAllocatorCreateInfo::pVulkanFunctions.
1652 */
1653 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1654 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1655 #endif
1656 
1657 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1658 //#define VMA_USE_STL_CONTAINERS 1
1659 
1660 /* Set this macro to 1 to make the library including and using STL containers:
1661 std::pair, std::vector, std::list, std::unordered_map.
1662 
1663 Set it to 0 or undefined to make the library using its own implementation of
1664 the containers.
1665 */
1666 #if VMA_USE_STL_CONTAINERS
1667  #define VMA_USE_STL_VECTOR 1
1668  #define VMA_USE_STL_UNORDERED_MAP 1
1669  #define VMA_USE_STL_LIST 1
1670 #endif
1671 
1672 #if VMA_USE_STL_VECTOR
1673  #include <vector>
1674 #endif
1675 
1676 #if VMA_USE_STL_UNORDERED_MAP
1677  #include <unordered_map>
1678 #endif
1679 
1680 #if VMA_USE_STL_LIST
1681  #include <list>
1682 #endif
1683 
1684 /*
1685 Following headers are used in this CONFIGURATION section only, so feel free to
1686 remove them if not needed.
1687 */
1688 #include <cassert> // for assert
1689 #include <algorithm> // for min, max
1690 #include <mutex> // for std::mutex
1691 #include <atomic> // for std::atomic
1692 
1693 #if !defined(_WIN32)
1694  #include <malloc.h> // for aligned_alloc()
1695 #endif
1696 
1697 // Normal assert to check for programmer's errors, especially in Debug configuration.
1698 #ifndef VMA_ASSERT
1699  #ifdef _DEBUG
1700  #define VMA_ASSERT(expr) assert(expr)
1701  #else
1702  #define VMA_ASSERT(expr)
1703  #endif
1704 #endif
1705 
1706 // Assert that will be called very often, like inside data structures e.g. operator[].
1707 // Making it non-empty can make program slow.
1708 #ifndef VMA_HEAVY_ASSERT
1709  #ifdef _DEBUG
1710  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1711  #else
1712  #define VMA_HEAVY_ASSERT(expr)
1713  #endif
1714 #endif
1715 
1716 #ifndef VMA_NULL
1717  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1718  #define VMA_NULL nullptr
1719 #endif
1720 
1721 #ifndef VMA_ALIGN_OF
1722  #define VMA_ALIGN_OF(type) (__alignof(type))
1723 #endif
1724 
1725 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1726  #if defined(_WIN32)
1727  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1728  #else
1729  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1730  #endif
1731 #endif
1732 
1733 #ifndef VMA_SYSTEM_FREE
1734  #if defined(_WIN32)
1735  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1736  #else
1737  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1738  #endif
1739 #endif
1740 
1741 #ifndef VMA_MIN
1742  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1743 #endif
1744 
1745 #ifndef VMA_MAX
1746  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1747 #endif
1748 
1749 #ifndef VMA_SWAP
1750  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1751 #endif
1752 
1753 #ifndef VMA_SORT
1754  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1755 #endif
1756 
1757 #ifndef VMA_DEBUG_LOG
1758  #define VMA_DEBUG_LOG(format, ...)
1759  /*
1760  #define VMA_DEBUG_LOG(format, ...) do { \
1761  printf(format, __VA_ARGS__); \
1762  printf("\n"); \
1763  } while(false)
1764  */
1765 #endif
1766 
1767 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1768 #if VMA_STATS_STRING_ENABLED
1769  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1770  {
1771  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1772  }
1773  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1774  {
1775  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1776  }
1777  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1778  {
1779  snprintf(outStr, strLen, "%p", ptr);
1780  }
1781 #endif
1782 
1783 #ifndef VMA_MUTEX
1784  class VmaMutex
1785  {
1786  public:
1787  VmaMutex() { }
1788  ~VmaMutex() { }
1789  void Lock() { m_Mutex.lock(); }
1790  void Unlock() { m_Mutex.unlock(); }
1791  private:
1792  std::mutex m_Mutex;
1793  };
1794  #define VMA_MUTEX VmaMutex
1795 #endif
1796 
1797 /*
1798 If providing your own implementation, you need to implement a subset of std::atomic:
1799 
1800 - Constructor(uint32_t desired)
1801 - uint32_t load() const
1802 - void store(uint32_t desired)
1803 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1804 */
1805 #ifndef VMA_ATOMIC_UINT32
1806  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1807 #endif
1808 
1809 #ifndef VMA_BEST_FIT
1810 
1822  #define VMA_BEST_FIT (1)
1823 #endif
1824 
1825 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1826 
1830  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1831 #endif
1832 
1833 #ifndef VMA_DEBUG_ALIGNMENT
1834 
1838  #define VMA_DEBUG_ALIGNMENT (1)
1839 #endif
1840 
1841 #ifndef VMA_DEBUG_MARGIN
1842 
1846  #define VMA_DEBUG_MARGIN (0)
1847 #endif
1848 
1849 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1850 
1854  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1855 #endif
1856 
1857 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1858 
1862  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1863 #endif
1864 
1865 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1866  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1868 #endif
1869 
1870 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1871  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1873 #endif
1874 
1875 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1876  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1878 #endif
1879 
1880 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1881 
1882 /*******************************************************************************
1883 END OF CONFIGURATION
1884 */
1885 
1886 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1887  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1888 
1889 // Returns number of bits set to 1 in (v).
1890 static inline uint32_t VmaCountBitsSet(uint32_t v)
1891 {
1892  uint32_t c = v - ((v >> 1) & 0x55555555);
1893  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1894  c = ((c >> 4) + c) & 0x0F0F0F0F;
1895  c = ((c >> 8) + c) & 0x00FF00FF;
1896  c = ((c >> 16) + c) & 0x0000FFFF;
1897  return c;
1898 }
1899 
1900 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1901 // Use types like uint32_t, uint64_t as T.
1902 template <typename T>
1903 static inline T VmaAlignUp(T val, T align)
1904 {
1905  return (val + align - 1) / align * align;
1906 }
1907 
1908 // Division with mathematical rounding to nearest number.
1909 template <typename T>
1910 inline T VmaRoundDiv(T x, T y)
1911 {
1912  return (x + (y / (T)2)) / y;
1913 }
1914 
1915 #ifndef VMA_SORT
1916 
1917 template<typename Iterator, typename Compare>
1918 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1919 {
1920  Iterator centerValue = end; --centerValue;
1921  Iterator insertIndex = beg;
1922  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1923  {
1924  if(cmp(*memTypeIndex, *centerValue))
1925  {
1926  if(insertIndex != memTypeIndex)
1927  {
1928  VMA_SWAP(*memTypeIndex, *insertIndex);
1929  }
1930  ++insertIndex;
1931  }
1932  }
1933  if(insertIndex != centerValue)
1934  {
1935  VMA_SWAP(*insertIndex, *centerValue);
1936  }
1937  return insertIndex;
1938 }
1939 
1940 template<typename Iterator, typename Compare>
1941 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1942 {
1943  if(beg < end)
1944  {
1945  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1946  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1947  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1948  }
1949 }
1950 
1951 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1952 
1953 #endif // #ifndef VMA_SORT
1954 
1955 /*
1956 Returns true if two memory blocks occupy overlapping pages.
1957 ResourceA must be in less memory offset than ResourceB.
1958 
1959 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1960 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1961 */
1962 static inline bool VmaBlocksOnSamePage(
1963  VkDeviceSize resourceAOffset,
1964  VkDeviceSize resourceASize,
1965  VkDeviceSize resourceBOffset,
1966  VkDeviceSize pageSize)
1967 {
1968  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1969  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1970  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1971  VkDeviceSize resourceBStart = resourceBOffset;
1972  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1973  return resourceAEndPage == resourceBStartPage;
1974 }
1975 
1976 enum VmaSuballocationType
1977 {
1978  VMA_SUBALLOCATION_TYPE_FREE = 0,
1979  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1980  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1981  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1982  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1983  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1984  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1985 };
1986 
1987 /*
1988 Returns true if given suballocation types could conflict and must respect
1989 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1990 or linear image and another one is optimal image. If type is unknown, behave
1991 conservatively.
1992 */
1993 static inline bool VmaIsBufferImageGranularityConflict(
1994  VmaSuballocationType suballocType1,
1995  VmaSuballocationType suballocType2)
1996 {
1997  if(suballocType1 > suballocType2)
1998  {
1999  VMA_SWAP(suballocType1, suballocType2);
2000  }
2001 
2002  switch(suballocType1)
2003  {
2004  case VMA_SUBALLOCATION_TYPE_FREE:
2005  return false;
2006  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2007  return true;
2008  case VMA_SUBALLOCATION_TYPE_BUFFER:
2009  return
2010  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2011  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2012  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2013  return
2014  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2015  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2016  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2017  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2018  return
2019  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2020  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2021  return false;
2022  default:
2023  VMA_ASSERT(0);
2024  return true;
2025  }
2026 }
2027 
2028 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2029 struct VmaMutexLock
2030 {
2031 public:
2032  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2033  m_pMutex(useMutex ? &mutex : VMA_NULL)
2034  {
2035  if(m_pMutex)
2036  {
2037  m_pMutex->Lock();
2038  }
2039  }
2040 
2041  ~VmaMutexLock()
2042  {
2043  if(m_pMutex)
2044  {
2045  m_pMutex->Unlock();
2046  }
2047  }
2048 
2049 private:
2050  VMA_MUTEX* m_pMutex;
2051 };
2052 
2053 #if VMA_DEBUG_GLOBAL_MUTEX
2054  static VMA_MUTEX gDebugGlobalMutex;
2055  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2056 #else
2057  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2058 #endif
2059 
2060 // Minimum size of a free suballocation to register it in the free suballocation collection.
2061 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2062 
2063 /*
2064 Performs binary search and returns iterator to first element that is greater or
2065 equal to (key), according to comparison (cmp).
2066 
2067 Cmp should return true if first argument is less than second argument.
2068 
2069 Returned value is the found element, if present in the collection or place where
2070 new element with value (key) should be inserted.
2071 */
2072 template <typename IterT, typename KeyT, typename CmpT>
2073 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2074 {
2075  size_t down = 0, up = (end - beg);
2076  while(down < up)
2077  {
2078  const size_t mid = (down + up) / 2;
2079  if(cmp(*(beg+mid), key))
2080  {
2081  down = mid + 1;
2082  }
2083  else
2084  {
2085  up = mid;
2086  }
2087  }
2088  return beg + down;
2089 }
2090 
2092 // Memory allocation
2093 
2094 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2095 {
2096  if((pAllocationCallbacks != VMA_NULL) &&
2097  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2098  {
2099  return (*pAllocationCallbacks->pfnAllocation)(
2100  pAllocationCallbacks->pUserData,
2101  size,
2102  alignment,
2103  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2104  }
2105  else
2106  {
2107  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2108  }
2109 }
2110 
2111 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2112 {
2113  if((pAllocationCallbacks != VMA_NULL) &&
2114  (pAllocationCallbacks->pfnFree != VMA_NULL))
2115  {
2116  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2117  }
2118  else
2119  {
2120  VMA_SYSTEM_FREE(ptr);
2121  }
2122 }
2123 
2124 template<typename T>
2125 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2126 {
2127  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2128 }
2129 
2130 template<typename T>
2131 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2132 {
2133  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2134 }
2135 
2136 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2137 
2138 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2139 
2140 template<typename T>
2141 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2142 {
2143  ptr->~T();
2144  VmaFree(pAllocationCallbacks, ptr);
2145 }
2146 
2147 template<typename T>
2148 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2149 {
2150  if(ptr != VMA_NULL)
2151  {
2152  for(size_t i = count; i--; )
2153  {
2154  ptr[i].~T();
2155  }
2156  VmaFree(pAllocationCallbacks, ptr);
2157  }
2158 }
2159 
2160 // STL-compatible allocator.
2161 template<typename T>
2162 class VmaStlAllocator
2163 {
2164 public:
2165  const VkAllocationCallbacks* const m_pCallbacks;
2166  typedef T value_type;
2167 
2168  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2169  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2170 
2171  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2172  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2173 
2174  template<typename U>
2175  bool operator==(const VmaStlAllocator<U>& rhs) const
2176  {
2177  return m_pCallbacks == rhs.m_pCallbacks;
2178  }
2179  template<typename U>
2180  bool operator!=(const VmaStlAllocator<U>& rhs) const
2181  {
2182  return m_pCallbacks != rhs.m_pCallbacks;
2183  }
2184 
2185  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2186 };
2187 
2188 #if VMA_USE_STL_VECTOR
2189 
2190 #define VmaVector std::vector
2191 
2192 template<typename T, typename allocatorT>
2193 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2194 {
2195  vec.insert(vec.begin() + index, item);
2196 }
2197 
2198 template<typename T, typename allocatorT>
2199 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2200 {
2201  vec.erase(vec.begin() + index);
2202 }
2203 
2204 #else // #if VMA_USE_STL_VECTOR
2205 
2206 /* Class with interface compatible with subset of std::vector.
2207 T must be POD because constructors and destructors are not called and memcpy is
2208 used for these objects. */
2209 template<typename T, typename AllocatorT>
2210 class VmaVector
2211 {
2212 public:
2213  typedef T value_type;
2214 
2215  VmaVector(const AllocatorT& allocator) :
2216  m_Allocator(allocator),
2217  m_pArray(VMA_NULL),
2218  m_Count(0),
2219  m_Capacity(0)
2220  {
2221  }
2222 
2223  VmaVector(size_t count, const AllocatorT& allocator) :
2224  m_Allocator(allocator),
2225  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2226  m_Count(count),
2227  m_Capacity(count)
2228  {
2229  }
2230 
2231  VmaVector(const VmaVector<T, AllocatorT>& src) :
2232  m_Allocator(src.m_Allocator),
2233  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2234  m_Count(src.m_Count),
2235  m_Capacity(src.m_Count)
2236  {
2237  if(m_Count != 0)
2238  {
2239  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2240  }
2241  }
2242 
2243  ~VmaVector()
2244  {
2245  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2246  }
2247 
2248  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2249  {
2250  if(&rhs != this)
2251  {
2252  resize(rhs.m_Count);
2253  if(m_Count != 0)
2254  {
2255  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2256  }
2257  }
2258  return *this;
2259  }
2260 
2261  bool empty() const { return m_Count == 0; }
2262  size_t size() const { return m_Count; }
2263  T* data() { return m_pArray; }
2264  const T* data() const { return m_pArray; }
2265 
2266  T& operator[](size_t index)
2267  {
2268  VMA_HEAVY_ASSERT(index < m_Count);
2269  return m_pArray[index];
2270  }
2271  const T& operator[](size_t index) const
2272  {
2273  VMA_HEAVY_ASSERT(index < m_Count);
2274  return m_pArray[index];
2275  }
2276 
2277  T& front()
2278  {
2279  VMA_HEAVY_ASSERT(m_Count > 0);
2280  return m_pArray[0];
2281  }
2282  const T& front() const
2283  {
2284  VMA_HEAVY_ASSERT(m_Count > 0);
2285  return m_pArray[0];
2286  }
2287  T& back()
2288  {
2289  VMA_HEAVY_ASSERT(m_Count > 0);
2290  return m_pArray[m_Count - 1];
2291  }
2292  const T& back() const
2293  {
2294  VMA_HEAVY_ASSERT(m_Count > 0);
2295  return m_pArray[m_Count - 1];
2296  }
2297 
2298  void reserve(size_t newCapacity, bool freeMemory = false)
2299  {
2300  newCapacity = VMA_MAX(newCapacity, m_Count);
2301 
2302  if((newCapacity < m_Capacity) && !freeMemory)
2303  {
2304  newCapacity = m_Capacity;
2305  }
2306 
2307  if(newCapacity != m_Capacity)
2308  {
2309  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2310  if(m_Count != 0)
2311  {
2312  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2313  }
2314  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2315  m_Capacity = newCapacity;
2316  m_pArray = newArray;
2317  }
2318  }
2319 
2320  void resize(size_t newCount, bool freeMemory = false)
2321  {
2322  size_t newCapacity = m_Capacity;
2323  if(newCount > m_Capacity)
2324  {
2325  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2326  }
2327  else if(freeMemory)
2328  {
2329  newCapacity = newCount;
2330  }
2331 
2332  if(newCapacity != m_Capacity)
2333  {
2334  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2335  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2336  if(elementsToCopy != 0)
2337  {
2338  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2339  }
2340  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2341  m_Capacity = newCapacity;
2342  m_pArray = newArray;
2343  }
2344 
2345  m_Count = newCount;
2346  }
2347 
2348  void clear(bool freeMemory = false)
2349  {
2350  resize(0, freeMemory);
2351  }
2352 
2353  void insert(size_t index, const T& src)
2354  {
2355  VMA_HEAVY_ASSERT(index <= m_Count);
2356  const size_t oldCount = size();
2357  resize(oldCount + 1);
2358  if(index < oldCount)
2359  {
2360  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2361  }
2362  m_pArray[index] = src;
2363  }
2364 
2365  void remove(size_t index)
2366  {
2367  VMA_HEAVY_ASSERT(index < m_Count);
2368  const size_t oldCount = size();
2369  if(index < oldCount - 1)
2370  {
2371  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2372  }
2373  resize(oldCount - 1);
2374  }
2375 
2376  void push_back(const T& src)
2377  {
2378  const size_t newIndex = size();
2379  resize(newIndex + 1);
2380  m_pArray[newIndex] = src;
2381  }
2382 
2383  void pop_back()
2384  {
2385  VMA_HEAVY_ASSERT(m_Count > 0);
2386  resize(size() - 1);
2387  }
2388 
2389  void push_front(const T& src)
2390  {
2391  insert(0, src);
2392  }
2393 
2394  void pop_front()
2395  {
2396  VMA_HEAVY_ASSERT(m_Count > 0);
2397  remove(0);
2398  }
2399 
2400  typedef T* iterator;
2401 
2402  iterator begin() { return m_pArray; }
2403  iterator end() { return m_pArray + m_Count; }
2404 
2405 private:
2406  AllocatorT m_Allocator;
2407  T* m_pArray;
2408  size_t m_Count;
2409  size_t m_Capacity;
2410 };
2411 
2412 template<typename T, typename allocatorT>
2413 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2414 {
2415  vec.insert(index, item);
2416 }
2417 
2418 template<typename T, typename allocatorT>
2419 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2420 {
2421  vec.remove(index);
2422 }
2423 
2424 #endif // #if VMA_USE_STL_VECTOR
2425 
2426 template<typename CmpLess, typename VectorT>
2427 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2428 {
2429  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2430  vector.data(),
2431  vector.data() + vector.size(),
2432  value,
2433  CmpLess()) - vector.data();
2434  VmaVectorInsert(vector, indexToInsert, value);
2435  return indexToInsert;
2436 }
2437 
2438 template<typename CmpLess, typename VectorT>
2439 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2440 {
2441  CmpLess comparator;
2442  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2443  vector.begin(),
2444  vector.end(),
2445  value,
2446  comparator);
2447  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2448  {
2449  size_t indexToRemove = it - vector.begin();
2450  VmaVectorRemove(vector, indexToRemove);
2451  return true;
2452  }
2453  return false;
2454 }
2455 
2456 template<typename CmpLess, typename VectorT>
2457 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2458 {
2459  CmpLess comparator;
2460  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2461  vector.data(),
2462  vector.data() + vector.size(),
2463  value,
2464  comparator);
2465  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2466  {
2467  return it - vector.begin();
2468  }
2469  else
2470  {
2471  return vector.size();
2472  }
2473 }
2474 
2476 // class VmaPoolAllocator
2477 
2478 /*
2479 Allocator for objects of type T using a list of arrays (pools) to speed up
2480 allocation. Number of elements that can be allocated is not bounded because
2481 allocator can create multiple blocks.
2482 */
2483 template<typename T>
2484 class VmaPoolAllocator
2485 {
2486 public:
2487  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2488  ~VmaPoolAllocator();
2489  void Clear();
2490  T* Alloc();
2491  void Free(T* ptr);
2492 
2493 private:
2494  union Item
2495  {
2496  uint32_t NextFreeIndex;
2497  T Value;
2498  };
2499 
2500  struct ItemBlock
2501  {
2502  Item* pItems;
2503  uint32_t FirstFreeIndex;
2504  };
2505 
2506  const VkAllocationCallbacks* m_pAllocationCallbacks;
2507  size_t m_ItemsPerBlock;
2508  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2509 
2510  ItemBlock& CreateNewBlock();
2511 };
2512 
2513 template<typename T>
2514 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2515  m_pAllocationCallbacks(pAllocationCallbacks),
2516  m_ItemsPerBlock(itemsPerBlock),
2517  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2518 {
2519  VMA_ASSERT(itemsPerBlock > 0);
2520 }
2521 
2522 template<typename T>
2523 VmaPoolAllocator<T>::~VmaPoolAllocator()
2524 {
2525  Clear();
2526 }
2527 
2528 template<typename T>
2529 void VmaPoolAllocator<T>::Clear()
2530 {
2531  for(size_t i = m_ItemBlocks.size(); i--; )
2532  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2533  m_ItemBlocks.clear();
2534 }
2535 
2536 template<typename T>
2537 T* VmaPoolAllocator<T>::Alloc()
2538 {
2539  for(size_t i = m_ItemBlocks.size(); i--; )
2540  {
2541  ItemBlock& block = m_ItemBlocks[i];
2542  // This block has some free items: Use first one.
2543  if(block.FirstFreeIndex != UINT32_MAX)
2544  {
2545  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2546  block.FirstFreeIndex = pItem->NextFreeIndex;
2547  return &pItem->Value;
2548  }
2549  }
2550 
2551  // No block has free item: Create new one and use it.
2552  ItemBlock& newBlock = CreateNewBlock();
2553  Item* const pItem = &newBlock.pItems[0];
2554  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2555  return &pItem->Value;
2556 }
2557 
2558 template<typename T>
2559 void VmaPoolAllocator<T>::Free(T* ptr)
2560 {
2561  // Search all memory blocks to find ptr.
2562  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2563  {
2564  ItemBlock& block = m_ItemBlocks[i];
2565 
2566  // Casting to union.
2567  Item* pItemPtr;
2568  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2569 
2570  // Check if pItemPtr is in address range of this block.
2571  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2572  {
2573  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2574  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2575  block.FirstFreeIndex = index;
2576  return;
2577  }
2578  }
2579  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2580 }
2581 
2582 template<typename T>
2583 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2584 {
2585  ItemBlock newBlock = {
2586  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2587 
2588  m_ItemBlocks.push_back(newBlock);
2589 
2590  // Setup singly-linked list of all free items in this block.
2591  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2592  newBlock.pItems[i].NextFreeIndex = i + 1;
2593  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2594  return m_ItemBlocks.back();
2595 }
2596 
2598 // class VmaRawList, VmaList
2599 
2600 #if VMA_USE_STL_LIST
2601 
2602 #define VmaList std::list
2603 
2604 #else // #if VMA_USE_STL_LIST
2605 
2606 template<typename T>
2607 struct VmaListItem
2608 {
2609  VmaListItem* pPrev;
2610  VmaListItem* pNext;
2611  T Value;
2612 };
2613 
2614 // Doubly linked list.
2615 template<typename T>
2616 class VmaRawList
2617 {
2618 public:
2619  typedef VmaListItem<T> ItemType;
2620 
2621  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2622  ~VmaRawList();
2623  void Clear();
2624 
2625  size_t GetCount() const { return m_Count; }
2626  bool IsEmpty() const { return m_Count == 0; }
2627 
2628  ItemType* Front() { return m_pFront; }
2629  const ItemType* Front() const { return m_pFront; }
2630  ItemType* Back() { return m_pBack; }
2631  const ItemType* Back() const { return m_pBack; }
2632 
2633  ItemType* PushBack();
2634  ItemType* PushFront();
2635  ItemType* PushBack(const T& value);
2636  ItemType* PushFront(const T& value);
2637  void PopBack();
2638  void PopFront();
2639 
2640  // Item can be null - it means PushBack.
2641  ItemType* InsertBefore(ItemType* pItem);
2642  // Item can be null - it means PushFront.
2643  ItemType* InsertAfter(ItemType* pItem);
2644 
2645  ItemType* InsertBefore(ItemType* pItem, const T& value);
2646  ItemType* InsertAfter(ItemType* pItem, const T& value);
2647 
2648  void Remove(ItemType* pItem);
2649 
2650 private:
2651  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2652  VmaPoolAllocator<ItemType> m_ItemAllocator;
2653  ItemType* m_pFront;
2654  ItemType* m_pBack;
2655  size_t m_Count;
2656 
2657  // Declared not defined, to block copy constructor and assignment operator.
2658  VmaRawList(const VmaRawList<T>& src);
2659  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2660 };
2661 
2662 template<typename T>
2663 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2664  m_pAllocationCallbacks(pAllocationCallbacks),
2665  m_ItemAllocator(pAllocationCallbacks, 128),
2666  m_pFront(VMA_NULL),
2667  m_pBack(VMA_NULL),
2668  m_Count(0)
2669 {
2670 }
2671 
2672 template<typename T>
2673 VmaRawList<T>::~VmaRawList()
2674 {
2675  // Intentionally not calling Clear, because that would be unnecessary
2676  // computations to return all items to m_ItemAllocator as free.
2677 }
2678 
2679 template<typename T>
2680 void VmaRawList<T>::Clear()
2681 {
2682  if(IsEmpty() == false)
2683  {
2684  ItemType* pItem = m_pBack;
2685  while(pItem != VMA_NULL)
2686  {
2687  ItemType* const pPrevItem = pItem->pPrev;
2688  m_ItemAllocator.Free(pItem);
2689  pItem = pPrevItem;
2690  }
2691  m_pFront = VMA_NULL;
2692  m_pBack = VMA_NULL;
2693  m_Count = 0;
2694  }
2695 }
2696 
2697 template<typename T>
2698 VmaListItem<T>* VmaRawList<T>::PushBack()
2699 {
2700  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2701  pNewItem->pNext = VMA_NULL;
2702  if(IsEmpty())
2703  {
2704  pNewItem->pPrev = VMA_NULL;
2705  m_pFront = pNewItem;
2706  m_pBack = pNewItem;
2707  m_Count = 1;
2708  }
2709  else
2710  {
2711  pNewItem->pPrev = m_pBack;
2712  m_pBack->pNext = pNewItem;
2713  m_pBack = pNewItem;
2714  ++m_Count;
2715  }
2716  return pNewItem;
2717 }
2718 
2719 template<typename T>
2720 VmaListItem<T>* VmaRawList<T>::PushFront()
2721 {
2722  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2723  pNewItem->pPrev = VMA_NULL;
2724  if(IsEmpty())
2725  {
2726  pNewItem->pNext = VMA_NULL;
2727  m_pFront = pNewItem;
2728  m_pBack = pNewItem;
2729  m_Count = 1;
2730  }
2731  else
2732  {
2733  pNewItem->pNext = m_pFront;
2734  m_pFront->pPrev = pNewItem;
2735  m_pFront = pNewItem;
2736  ++m_Count;
2737  }
2738  return pNewItem;
2739 }
2740 
2741 template<typename T>
2742 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2743 {
2744  ItemType* const pNewItem = PushBack();
2745  pNewItem->Value = value;
2746  return pNewItem;
2747 }
2748 
2749 template<typename T>
2750 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2751 {
2752  ItemType* const pNewItem = PushFront();
2753  pNewItem->Value = value;
2754  return pNewItem;
2755 }
2756 
2757 template<typename T>
2758 void VmaRawList<T>::PopBack()
2759 {
2760  VMA_HEAVY_ASSERT(m_Count > 0);
2761  ItemType* const pBackItem = m_pBack;
2762  ItemType* const pPrevItem = pBackItem->pPrev;
2763  if(pPrevItem != VMA_NULL)
2764  {
2765  pPrevItem->pNext = VMA_NULL;
2766  }
2767  m_pBack = pPrevItem;
2768  m_ItemAllocator.Free(pBackItem);
2769  --m_Count;
2770 }
2771 
2772 template<typename T>
2773 void VmaRawList<T>::PopFront()
2774 {
2775  VMA_HEAVY_ASSERT(m_Count > 0);
2776  ItemType* const pFrontItem = m_pFront;
2777  ItemType* const pNextItem = pFrontItem->pNext;
2778  if(pNextItem != VMA_NULL)
2779  {
2780  pNextItem->pPrev = VMA_NULL;
2781  }
2782  m_pFront = pNextItem;
2783  m_ItemAllocator.Free(pFrontItem);
2784  --m_Count;
2785 }
2786 
2787 template<typename T>
2788 void VmaRawList<T>::Remove(ItemType* pItem)
2789 {
2790  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2791  VMA_HEAVY_ASSERT(m_Count > 0);
2792 
2793  if(pItem->pPrev != VMA_NULL)
2794  {
2795  pItem->pPrev->pNext = pItem->pNext;
2796  }
2797  else
2798  {
2799  VMA_HEAVY_ASSERT(m_pFront == pItem);
2800  m_pFront = pItem->pNext;
2801  }
2802 
2803  if(pItem->pNext != VMA_NULL)
2804  {
2805  pItem->pNext->pPrev = pItem->pPrev;
2806  }
2807  else
2808  {
2809  VMA_HEAVY_ASSERT(m_pBack == pItem);
2810  m_pBack = pItem->pPrev;
2811  }
2812 
2813  m_ItemAllocator.Free(pItem);
2814  --m_Count;
2815 }
2816 
2817 template<typename T>
2818 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2819 {
2820  if(pItem != VMA_NULL)
2821  {
2822  ItemType* const prevItem = pItem->pPrev;
2823  ItemType* const newItem = m_ItemAllocator.Alloc();
2824  newItem->pPrev = prevItem;
2825  newItem->pNext = pItem;
2826  pItem->pPrev = newItem;
2827  if(prevItem != VMA_NULL)
2828  {
2829  prevItem->pNext = newItem;
2830  }
2831  else
2832  {
2833  VMA_HEAVY_ASSERT(m_pFront == pItem);
2834  m_pFront = newItem;
2835  }
2836  ++m_Count;
2837  return newItem;
2838  }
2839  else
2840  return PushBack();
2841 }
2842 
2843 template<typename T>
2844 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2845 {
2846  if(pItem != VMA_NULL)
2847  {
2848  ItemType* const nextItem = pItem->pNext;
2849  ItemType* const newItem = m_ItemAllocator.Alloc();
2850  newItem->pNext = nextItem;
2851  newItem->pPrev = pItem;
2852  pItem->pNext = newItem;
2853  if(nextItem != VMA_NULL)
2854  {
2855  nextItem->pPrev = newItem;
2856  }
2857  else
2858  {
2859  VMA_HEAVY_ASSERT(m_pBack == pItem);
2860  m_pBack = newItem;
2861  }
2862  ++m_Count;
2863  return newItem;
2864  }
2865  else
2866  return PushFront();
2867 }
2868 
2869 template<typename T>
2870 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2871 {
2872  ItemType* const newItem = InsertBefore(pItem);
2873  newItem->Value = value;
2874  return newItem;
2875 }
2876 
2877 template<typename T>
2878 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2879 {
2880  ItemType* const newItem = InsertAfter(pItem);
2881  newItem->Value = value;
2882  return newItem;
2883 }
2884 
2885 template<typename T, typename AllocatorT>
2886 class VmaList
2887 {
2888 public:
2889  class iterator
2890  {
2891  public:
2892  iterator() :
2893  m_pList(VMA_NULL),
2894  m_pItem(VMA_NULL)
2895  {
2896  }
2897 
2898  T& operator*() const
2899  {
2900  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2901  return m_pItem->Value;
2902  }
2903  T* operator->() const
2904  {
2905  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2906  return &m_pItem->Value;
2907  }
2908 
2909  iterator& operator++()
2910  {
2911  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2912  m_pItem = m_pItem->pNext;
2913  return *this;
2914  }
2915  iterator& operator--()
2916  {
2917  if(m_pItem != VMA_NULL)
2918  {
2919  m_pItem = m_pItem->pPrev;
2920  }
2921  else
2922  {
2923  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2924  m_pItem = m_pList->Back();
2925  }
2926  return *this;
2927  }
2928 
2929  iterator operator++(int)
2930  {
2931  iterator result = *this;
2932  ++*this;
2933  return result;
2934  }
2935  iterator operator--(int)
2936  {
2937  iterator result = *this;
2938  --*this;
2939  return result;
2940  }
2941 
2942  bool operator==(const iterator& rhs) const
2943  {
2944  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2945  return m_pItem == rhs.m_pItem;
2946  }
2947  bool operator!=(const iterator& rhs) const
2948  {
2949  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2950  return m_pItem != rhs.m_pItem;
2951  }
2952 
2953  private:
2954  VmaRawList<T>* m_pList;
2955  VmaListItem<T>* m_pItem;
2956 
2957  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2958  m_pList(pList),
2959  m_pItem(pItem)
2960  {
2961  }
2962 
2963  friend class VmaList<T, AllocatorT>;
2964  };
2965 
2966  class const_iterator
2967  {
2968  public:
2969  const_iterator() :
2970  m_pList(VMA_NULL),
2971  m_pItem(VMA_NULL)
2972  {
2973  }
2974 
2975  const_iterator(const iterator& src) :
2976  m_pList(src.m_pList),
2977  m_pItem(src.m_pItem)
2978  {
2979  }
2980 
2981  const T& operator*() const
2982  {
2983  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2984  return m_pItem->Value;
2985  }
2986  const T* operator->() const
2987  {
2988  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2989  return &m_pItem->Value;
2990  }
2991 
2992  const_iterator& operator++()
2993  {
2994  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2995  m_pItem = m_pItem->pNext;
2996  return *this;
2997  }
2998  const_iterator& operator--()
2999  {
3000  if(m_pItem != VMA_NULL)
3001  {
3002  m_pItem = m_pItem->pPrev;
3003  }
3004  else
3005  {
3006  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3007  m_pItem = m_pList->Back();
3008  }
3009  return *this;
3010  }
3011 
3012  const_iterator operator++(int)
3013  {
3014  const_iterator result = *this;
3015  ++*this;
3016  return result;
3017  }
3018  const_iterator operator--(int)
3019  {
3020  const_iterator result = *this;
3021  --*this;
3022  return result;
3023  }
3024 
3025  bool operator==(const const_iterator& rhs) const
3026  {
3027  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3028  return m_pItem == rhs.m_pItem;
3029  }
3030  bool operator!=(const const_iterator& rhs) const
3031  {
3032  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3033  return m_pItem != rhs.m_pItem;
3034  }
3035 
3036  private:
3037  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3038  m_pList(pList),
3039  m_pItem(pItem)
3040  {
3041  }
3042 
3043  const VmaRawList<T>* m_pList;
3044  const VmaListItem<T>* m_pItem;
3045 
3046  friend class VmaList<T, AllocatorT>;
3047  };
3048 
3049  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3050 
3051  bool empty() const { return m_RawList.IsEmpty(); }
3052  size_t size() const { return m_RawList.GetCount(); }
3053 
3054  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3055  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3056 
3057  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3058  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3059 
3060  void clear() { m_RawList.Clear(); }
3061  void push_back(const T& value) { m_RawList.PushBack(value); }
3062  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3063  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3064 
3065 private:
3066  VmaRawList<T> m_RawList;
3067 };
3068 
3069 #endif // #if VMA_USE_STL_LIST
3070 
3072 // class VmaMap
3073 
3074 // Unused in this version.
3075 #if 0
3076 
3077 #if VMA_USE_STL_UNORDERED_MAP
3078 
3079 #define VmaPair std::pair
3080 
3081 #define VMA_MAP_TYPE(KeyT, ValueT) \
3082  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3083 
3084 #else // #if VMA_USE_STL_UNORDERED_MAP
3085 
3086 template<typename T1, typename T2>
3087 struct VmaPair
3088 {
3089  T1 first;
3090  T2 second;
3091 
3092  VmaPair() : first(), second() { }
3093  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3094 };
3095 
3096 /* Class compatible with subset of interface of std::unordered_map.
3097 KeyT, ValueT must be POD because they will be stored in VmaVector.
3098 */
3099 template<typename KeyT, typename ValueT>
3100 class VmaMap
3101 {
3102 public:
3103  typedef VmaPair<KeyT, ValueT> PairType;
3104  typedef PairType* iterator;
3105 
3106  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3107 
3108  iterator begin() { return m_Vector.begin(); }
3109  iterator end() { return m_Vector.end(); }
3110 
3111  void insert(const PairType& pair);
3112  iterator find(const KeyT& key);
3113  void erase(iterator it);
3114 
3115 private:
3116  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3117 };
3118 
3119 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3120 
3121 template<typename FirstT, typename SecondT>
3122 struct VmaPairFirstLess
3123 {
3124  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3125  {
3126  return lhs.first < rhs.first;
3127  }
3128  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3129  {
3130  return lhs.first < rhsFirst;
3131  }
3132 };
3133 
3134 template<typename KeyT, typename ValueT>
3135 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3136 {
3137  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3138  m_Vector.data(),
3139  m_Vector.data() + m_Vector.size(),
3140  pair,
3141  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3142  VmaVectorInsert(m_Vector, indexToInsert, pair);
3143 }
3144 
3145 template<typename KeyT, typename ValueT>
3146 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3147 {
3148  PairType* it = VmaBinaryFindFirstNotLess(
3149  m_Vector.data(),
3150  m_Vector.data() + m_Vector.size(),
3151  key,
3152  VmaPairFirstLess<KeyT, ValueT>());
3153  if((it != m_Vector.end()) && (it->first == key))
3154  {
3155  return it;
3156  }
3157  else
3158  {
3159  return m_Vector.end();
3160  }
3161 }
3162 
3163 template<typename KeyT, typename ValueT>
3164 void VmaMap<KeyT, ValueT>::erase(iterator it)
3165 {
3166  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3167 }
3168 
3169 #endif // #if VMA_USE_STL_UNORDERED_MAP
3170 
3171 #endif // #if 0
3172 
3174 
3175 class VmaDeviceMemoryBlock;
3176 
3177 struct VmaAllocation_T
3178 {
3179 private:
3180  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3181 
3182  enum FLAGS
3183  {
3184  FLAG_USER_DATA_STRING = 0x01,
3185  };
3186 
3187 public:
3188  enum ALLOCATION_TYPE
3189  {
3190  ALLOCATION_TYPE_NONE,
3191  ALLOCATION_TYPE_BLOCK,
3192  ALLOCATION_TYPE_DEDICATED,
3193  };
3194 
3195  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3196  m_Alignment(1),
3197  m_Size(0),
3198  m_pUserData(VMA_NULL),
3199  m_LastUseFrameIndex(currentFrameIndex),
3200  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3201  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3202  m_MapCount(0),
3203  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3204  {
3205  }
3206 
3207  ~VmaAllocation_T()
3208  {
3209  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3210 
3211  // Check if owned string was freed.
3212  VMA_ASSERT(m_pUserData == VMA_NULL);
3213  }
3214 
3215  void InitBlockAllocation(
3216  VmaPool hPool,
3217  VmaDeviceMemoryBlock* block,
3218  VkDeviceSize offset,
3219  VkDeviceSize alignment,
3220  VkDeviceSize size,
3221  VmaSuballocationType suballocationType,
3222  bool mapped,
3223  bool canBecomeLost)
3224  {
3225  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3226  VMA_ASSERT(block != VMA_NULL);
3227  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3228  m_Alignment = alignment;
3229  m_Size = size;
3230  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3231  m_SuballocationType = (uint8_t)suballocationType;
3232  m_BlockAllocation.m_hPool = hPool;
3233  m_BlockAllocation.m_Block = block;
3234  m_BlockAllocation.m_Offset = offset;
3235  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3236  }
3237 
3238  void InitLost()
3239  {
3240  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3241  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3242  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3243  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3244  m_BlockAllocation.m_Block = VMA_NULL;
3245  m_BlockAllocation.m_Offset = 0;
3246  m_BlockAllocation.m_CanBecomeLost = true;
3247  }
3248 
3249  void ChangeBlockAllocation(
3250  VmaDeviceMemoryBlock* block,
3251  VkDeviceSize offset)
3252  {
3253  VMA_ASSERT(block != VMA_NULL);
3254  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3255  m_BlockAllocation.m_Block = block;
3256  m_BlockAllocation.m_Offset = offset;
3257  }
3258 
3259  // pMappedData not null means allocation is created with MAPPED flag.
3260  void InitDedicatedAllocation(
3261  uint32_t memoryTypeIndex,
3262  VkDeviceMemory hMemory,
3263  VmaSuballocationType suballocationType,
3264  void* pMappedData,
3265  VkDeviceSize size)
3266  {
3267  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3268  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3269  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3270  m_Alignment = 0;
3271  m_Size = size;
3272  m_SuballocationType = (uint8_t)suballocationType;
3273  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3274  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3275  m_DedicatedAllocation.m_hMemory = hMemory;
3276  m_DedicatedAllocation.m_pMappedData = pMappedData;
3277  }
3278 
3279  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3280  VkDeviceSize GetAlignment() const { return m_Alignment; }
3281  VkDeviceSize GetSize() const { return m_Size; }
3282  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3283  void* GetUserData() const { return m_pUserData; }
3284  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3285  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3286 
3287  VmaDeviceMemoryBlock* GetBlock() const
3288  {
3289  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3290  return m_BlockAllocation.m_Block;
3291  }
3292  VkDeviceSize GetOffset() const;
3293  VkDeviceMemory GetMemory() const;
3294  uint32_t GetMemoryTypeIndex() const;
3295  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3296  void* GetMappedData() const;
3297  bool CanBecomeLost() const;
3298  VmaPool GetPool() const;
3299 
3300  uint32_t GetLastUseFrameIndex() const
3301  {
3302  return m_LastUseFrameIndex.load();
3303  }
3304  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3305  {
3306  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3307  }
3308  /*
3309  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3310  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3311  - Else, returns false.
3312 
3313  If hAllocation is already lost, assert - you should not call it then.
3314  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3315  */
3316  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3317 
3318  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3319  {
3320  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3321  outInfo.blockCount = 1;
3322  outInfo.allocationCount = 1;
3323  outInfo.unusedRangeCount = 0;
3324  outInfo.usedBytes = m_Size;
3325  outInfo.unusedBytes = 0;
3326  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3327  outInfo.unusedRangeSizeMin = UINT64_MAX;
3328  outInfo.unusedRangeSizeMax = 0;
3329  }
3330 
3331  void BlockAllocMap();
3332  void BlockAllocUnmap();
3333  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3334  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3335 
3336 private:
3337  VkDeviceSize m_Alignment;
3338  VkDeviceSize m_Size;
3339  void* m_pUserData;
3340  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3341  uint8_t m_Type; // ALLOCATION_TYPE
3342  uint8_t m_SuballocationType; // VmaSuballocationType
3343  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3344  // Bits with mask 0x7F, used only when ALLOCATION_TYPE_DEDICATED, are reference counter for vmaMapMemory()/vmaUnmapMemory().
3345  uint8_t m_MapCount;
3346  uint8_t m_Flags; // enum FLAGS
3347 
3348  // Allocation out of VmaDeviceMemoryBlock.
3349  struct BlockAllocation
3350  {
3351  VmaPool m_hPool; // Null if belongs to general memory.
3352  VmaDeviceMemoryBlock* m_Block;
3353  VkDeviceSize m_Offset;
3354  bool m_CanBecomeLost;
3355  };
3356 
3357  // Allocation for an object that has its own private VkDeviceMemory.
3358  struct DedicatedAllocation
3359  {
3360  uint32_t m_MemoryTypeIndex;
3361  VkDeviceMemory m_hMemory;
3362  void* m_pMappedData; // Not null means memory is mapped.
3363  };
3364 
3365  union
3366  {
3367  // Allocation out of VmaDeviceMemoryBlock.
3368  BlockAllocation m_BlockAllocation;
3369  // Allocation for an object that has its own private VkDeviceMemory.
3370  DedicatedAllocation m_DedicatedAllocation;
3371  };
3372 
3373  void FreeUserDataString(VmaAllocator hAllocator);
3374 };
3375 
3376 /*
3377 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3378 allocated memory block or free.
3379 */
3380 struct VmaSuballocation
3381 {
3382  VkDeviceSize offset;
3383  VkDeviceSize size;
3384  VmaAllocation hAllocation;
3385  VmaSuballocationType type;
3386 };
3387 
3388 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3389 
3390 // Cost of one additional allocation lost, as equivalent in bytes.
3391 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3392 
3393 /*
3394 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3395 
3396 If canMakeOtherLost was false:
3397 - item points to a FREE suballocation.
3398 - itemsToMakeLostCount is 0.
3399 
3400 If canMakeOtherLost was true:
3401 - item points to first of sequence of suballocations, which are either FREE,
3402  or point to VmaAllocations that can become lost.
3403 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3404  the requested allocation to succeed.
3405 */
3406 struct VmaAllocationRequest
3407 {
3408  VkDeviceSize offset;
3409  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3410  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3411  VmaSuballocationList::iterator item;
3412  size_t itemsToMakeLostCount;
3413 
3414  VkDeviceSize CalcCost() const
3415  {
3416  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3417  }
3418 };
3419 
3420 /*
3421 Data structure used for bookkeeping of allocations and unused ranges of memory
3422 in a single VkDeviceMemory block.
3423 */
3424 class VmaBlockMetadata
3425 {
3426 public:
3427  VmaBlockMetadata(VmaAllocator hAllocator);
3428  ~VmaBlockMetadata();
3429  void Init(VkDeviceSize size);
3430 
3431  // Validates all data structures inside this object. If not valid, returns false.
3432  bool Validate() const;
3433  VkDeviceSize GetSize() const { return m_Size; }
3434  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3435  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3436  VkDeviceSize GetUnusedRangeSizeMax() const;
3437  // Returns true if this block is empty - contains only single free suballocation.
3438  bool IsEmpty() const;
3439 
3440  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3441  void AddPoolStats(VmaPoolStats& inoutStats) const;
3442 
3443 #if VMA_STATS_STRING_ENABLED
3444  void PrintDetailedMap(class VmaJsonWriter& json) const;
3445 #endif
3446 
3447  // Creates trivial request for case when block is empty.
3448  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3449 
3450  // Tries to find a place for suballocation with given parameters inside this block.
3451  // If succeeded, fills pAllocationRequest and returns true.
3452  // If failed, returns false.
3453  bool CreateAllocationRequest(
3454  uint32_t currentFrameIndex,
3455  uint32_t frameInUseCount,
3456  VkDeviceSize bufferImageGranularity,
3457  VkDeviceSize allocSize,
3458  VkDeviceSize allocAlignment,
3459  VmaSuballocationType allocType,
3460  bool canMakeOtherLost,
3461  VmaAllocationRequest* pAllocationRequest);
3462 
3463  bool MakeRequestedAllocationsLost(
3464  uint32_t currentFrameIndex,
3465  uint32_t frameInUseCount,
3466  VmaAllocationRequest* pAllocationRequest);
3467 
3468  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3469 
3470  // Makes actual allocation based on request. Request must already be checked and valid.
3471  void Alloc(
3472  const VmaAllocationRequest& request,
3473  VmaSuballocationType type,
3474  VkDeviceSize allocSize,
3475  VmaAllocation hAllocation);
3476 
3477  // Frees suballocation assigned to given memory region.
3478  void Free(const VmaAllocation allocation);
3479 
3480 private:
3481  VkDeviceSize m_Size;
3482  uint32_t m_FreeCount;
3483  VkDeviceSize m_SumFreeSize;
3484  VmaSuballocationList m_Suballocations;
3485  // Suballocations that are free and have size greater than certain threshold.
3486  // Sorted by size, ascending.
3487  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3488 
3489  bool ValidateFreeSuballocationList() const;
3490 
3491  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3492  // If yes, fills pOffset and returns true. If no, returns false.
3493  bool CheckAllocation(
3494  uint32_t currentFrameIndex,
3495  uint32_t frameInUseCount,
3496  VkDeviceSize bufferImageGranularity,
3497  VkDeviceSize allocSize,
3498  VkDeviceSize allocAlignment,
3499  VmaSuballocationType allocType,
3500  VmaSuballocationList::const_iterator suballocItem,
3501  bool canMakeOtherLost,
3502  VkDeviceSize* pOffset,
3503  size_t* itemsToMakeLostCount,
3504  VkDeviceSize* pSumFreeSize,
3505  VkDeviceSize* pSumItemSize) const;
3506  // Given free suballocation, it merges it with following one, which must also be free.
3507  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3508  // Releases given suballocation, making it free.
3509  // Merges it with adjacent free suballocations if applicable.
3510  // Returns iterator to new free suballocation at this place.
3511  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3512  // Given free suballocation, it inserts it into sorted list of
3513  // m_FreeSuballocationsBySize if it's suitable.
3514  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3515  // Given free suballocation, it removes it from sorted list of
3516  // m_FreeSuballocationsBySize if it's suitable.
3517  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3518 };
3519 
3520 // Helper class that represents mapped memory. Synchronized internally.
3521 class VmaDeviceMemoryMapping
3522 {
3523 public:
3524  VmaDeviceMemoryMapping();
3525  ~VmaDeviceMemoryMapping();
3526 
3527  void* GetMappedData() const { return m_pMappedData; }
3528 
3529  // ppData can be null.
3530  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData);
3531  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory);
3532 
3533 private:
3534  VMA_MUTEX m_Mutex;
3535  uint32_t m_MapCount;
3536  void* m_pMappedData;
3537 };
3538 
3539 /*
3540 Represents a single block of device memory (`VkDeviceMemory`) with all the
3541 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3542 
3543 Thread-safety: This class must be externally synchronized.
3544 */
3545 class VmaDeviceMemoryBlock
3546 {
3547 public:
3548  uint32_t m_MemoryTypeIndex;
3549  VkDeviceMemory m_hMemory;
3550  VmaDeviceMemoryMapping m_Mapping;
3551  VmaBlockMetadata m_Metadata;
3552 
3553  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3554 
3555  ~VmaDeviceMemoryBlock()
3556  {
3557  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3558  }
3559 
3560  // Always call after construction.
3561  void Init(
3562  uint32_t newMemoryTypeIndex,
3563  VkDeviceMemory newMemory,
3564  VkDeviceSize newSize);
3565  // Always call before destruction.
3566  void Destroy(VmaAllocator allocator);
3567 
3568  // Validates all data structures inside this object. If not valid, returns false.
3569  bool Validate() const;
3570 
3571  // ppData can be null.
3572  VkResult Map(VmaAllocator hAllocator, void** ppData);
3573  void Unmap(VmaAllocator hAllocator);
3574 };
3575 
3576 struct VmaPointerLess
3577 {
3578  bool operator()(const void* lhs, const void* rhs) const
3579  {
3580  return lhs < rhs;
3581  }
3582 };
3583 
3584 class VmaDefragmentator;
3585 
3586 /*
3587 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3588 Vulkan memory type.
3589 
3590 Synchronized internally with a mutex.
3591 */
3592 struct VmaBlockVector
3593 {
3594  VmaBlockVector(
3595  VmaAllocator hAllocator,
3596  uint32_t memoryTypeIndex,
3597  VkDeviceSize preferredBlockSize,
3598  size_t minBlockCount,
3599  size_t maxBlockCount,
3600  VkDeviceSize bufferImageGranularity,
3601  uint32_t frameInUseCount,
3602  bool isCustomPool);
3603  ~VmaBlockVector();
3604 
3605  VkResult CreateMinBlocks();
3606 
3607  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3608  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3609  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3610  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3611 
3612  void GetPoolStats(VmaPoolStats* pStats);
3613 
3614  bool IsEmpty() const { return m_Blocks.empty(); }
3615 
3616  VkResult Allocate(
3617  VmaPool hCurrentPool,
3618  uint32_t currentFrameIndex,
3619  const VkMemoryRequirements& vkMemReq,
3620  const VmaAllocationCreateInfo& createInfo,
3621  VmaSuballocationType suballocType,
3622  VmaAllocation* pAllocation);
3623 
3624  void Free(
3625  VmaAllocation hAllocation);
3626 
3627  // Adds statistics of this BlockVector to pStats.
3628  void AddStats(VmaStats* pStats);
3629 
3630 #if VMA_STATS_STRING_ENABLED
3631  void PrintDetailedMap(class VmaJsonWriter& json);
3632 #endif
3633 
3634  void MakePoolAllocationsLost(
3635  uint32_t currentFrameIndex,
3636  size_t* pLostAllocationCount);
3637 
3638  VmaDefragmentator* EnsureDefragmentator(
3639  VmaAllocator hAllocator,
3640  uint32_t currentFrameIndex);
3641 
3642  VkResult Defragment(
3643  VmaDefragmentationStats* pDefragmentationStats,
3644  VkDeviceSize& maxBytesToMove,
3645  uint32_t& maxAllocationsToMove);
3646 
3647  void DestroyDefragmentator();
3648 
3649 private:
3650  friend class VmaDefragmentator;
3651 
3652  const VmaAllocator m_hAllocator;
3653  const uint32_t m_MemoryTypeIndex;
3654  const VkDeviceSize m_PreferredBlockSize;
3655  const size_t m_MinBlockCount;
3656  const size_t m_MaxBlockCount;
3657  const VkDeviceSize m_BufferImageGranularity;
3658  const uint32_t m_FrameInUseCount;
3659  const bool m_IsCustomPool;
3660  VMA_MUTEX m_Mutex;
3661  // Incrementally sorted by sumFreeSize, ascending.
3662  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3663  /* There can be at most one allocation that is completely empty - a
3664  hysteresis to avoid pessimistic case of alternating creation and destruction
3665  of a VkDeviceMemory. */
3666  bool m_HasEmptyBlock;
3667  VmaDefragmentator* m_pDefragmentator;
3668 
3669  // Finds and removes given block from vector.
3670  void Remove(VmaDeviceMemoryBlock* pBlock);
3671 
3672  // Performs single step in sorting m_Blocks. They may not be fully sorted
3673  // after this call.
3674  void IncrementallySortBlocks();
3675 
3676  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3677 };
3678 
3679 struct VmaPool_T
3680 {
3681 public:
3682  VmaBlockVector m_BlockVector;
3683 
3684  // Takes ownership.
3685  VmaPool_T(
3686  VmaAllocator hAllocator,
3687  const VmaPoolCreateInfo& createInfo);
3688  ~VmaPool_T();
3689 
3690  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3691 
3692 #if VMA_STATS_STRING_ENABLED
3693  //void PrintDetailedMap(class VmaStringBuilder& sb);
3694 #endif
3695 };
3696 
3697 class VmaDefragmentator
3698 {
3699  const VmaAllocator m_hAllocator;
3700  VmaBlockVector* const m_pBlockVector;
3701  uint32_t m_CurrentFrameIndex;
3702  VkDeviceSize m_BytesMoved;
3703  uint32_t m_AllocationsMoved;
3704 
3705  struct AllocationInfo
3706  {
3707  VmaAllocation m_hAllocation;
3708  VkBool32* m_pChanged;
3709 
3710  AllocationInfo() :
3711  m_hAllocation(VK_NULL_HANDLE),
3712  m_pChanged(VMA_NULL)
3713  {
3714  }
3715  };
3716 
3717  struct AllocationInfoSizeGreater
3718  {
3719  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3720  {
3721  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3722  }
3723  };
3724 
3725  // Used between AddAllocation and Defragment.
3726  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3727 
3728  struct BlockInfo
3729  {
3730  VmaDeviceMemoryBlock* m_pBlock;
3731  bool m_HasNonMovableAllocations;
3732  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3733 
3734  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3735  m_pBlock(VMA_NULL),
3736  m_HasNonMovableAllocations(true),
3737  m_Allocations(pAllocationCallbacks),
3738  m_pMappedDataForDefragmentation(VMA_NULL)
3739  {
3740  }
3741 
3742  void CalcHasNonMovableAllocations()
3743  {
3744  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3745  const size_t defragmentAllocCount = m_Allocations.size();
3746  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3747  }
3748 
3749  void SortAllocationsBySizeDescecnding()
3750  {
3751  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3752  }
3753 
3754  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3755  void Unmap(VmaAllocator hAllocator);
3756 
3757  private:
3758  // Not null if mapped for defragmentation only, not originally mapped.
3759  void* m_pMappedDataForDefragmentation;
3760  };
3761 
3762  struct BlockPointerLess
3763  {
3764  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3765  {
3766  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3767  }
3768  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3769  {
3770  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3771  }
3772  };
3773 
3774  // 1. Blocks with some non-movable allocations go first.
3775  // 2. Blocks with smaller sumFreeSize go first.
3776  struct BlockInfoCompareMoveDestination
3777  {
3778  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3779  {
3780  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3781  {
3782  return true;
3783  }
3784  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3785  {
3786  return false;
3787  }
3788  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3789  {
3790  return true;
3791  }
3792  return false;
3793  }
3794  };
3795 
3796  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3797  BlockInfoVector m_Blocks;
3798 
3799  VkResult DefragmentRound(
3800  VkDeviceSize maxBytesToMove,
3801  uint32_t maxAllocationsToMove);
3802 
3803  static bool MoveMakesSense(
3804  size_t dstBlockIndex, VkDeviceSize dstOffset,
3805  size_t srcBlockIndex, VkDeviceSize srcOffset);
3806 
3807 public:
3808  VmaDefragmentator(
3809  VmaAllocator hAllocator,
3810  VmaBlockVector* pBlockVector,
3811  uint32_t currentFrameIndex);
3812 
3813  ~VmaDefragmentator();
3814 
3815  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3816  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3817 
3818  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3819 
3820  VkResult Defragment(
3821  VkDeviceSize maxBytesToMove,
3822  uint32_t maxAllocationsToMove);
3823 };
3824 
3825 // Main allocator object.
3826 struct VmaAllocator_T
3827 {
3828  bool m_UseMutex;
3829  bool m_UseKhrDedicatedAllocation;
3830  VkDevice m_hDevice;
3831  bool m_AllocationCallbacksSpecified;
3832  VkAllocationCallbacks m_AllocationCallbacks;
3833  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3834 
3835  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3836  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3837  VMA_MUTEX m_HeapSizeLimitMutex;
3838 
3839  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3840  VkPhysicalDeviceMemoryProperties m_MemProps;
3841 
3842  // Default pools.
3843  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3844 
3845  // Each vector is sorted by memory (handle value).
3846  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3847  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
3848  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3849 
3850  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3851  ~VmaAllocator_T();
3852 
3853  const VkAllocationCallbacks* GetAllocationCallbacks() const
3854  {
3855  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3856  }
3857  const VmaVulkanFunctions& GetVulkanFunctions() const
3858  {
3859  return m_VulkanFunctions;
3860  }
3861 
3862  VkDeviceSize GetBufferImageGranularity() const
3863  {
3864  return VMA_MAX(
3865  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3866  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3867  }
3868 
3869  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3870  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3871 
3872  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3873  {
3874  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3875  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3876  }
3877 
3878  void GetBufferMemoryRequirements(
3879  VkBuffer hBuffer,
3880  VkMemoryRequirements& memReq,
3881  bool& requiresDedicatedAllocation,
3882  bool& prefersDedicatedAllocation) const;
3883  void GetImageMemoryRequirements(
3884  VkImage hImage,
3885  VkMemoryRequirements& memReq,
3886  bool& requiresDedicatedAllocation,
3887  bool& prefersDedicatedAllocation) const;
3888 
3889  // Main allocation function.
3890  VkResult AllocateMemory(
3891  const VkMemoryRequirements& vkMemReq,
3892  bool requiresDedicatedAllocation,
3893  bool prefersDedicatedAllocation,
3894  VkBuffer dedicatedBuffer,
3895  VkImage dedicatedImage,
3896  const VmaAllocationCreateInfo& createInfo,
3897  VmaSuballocationType suballocType,
3898  VmaAllocation* pAllocation);
3899 
3900  // Main deallocation function.
3901  void FreeMemory(const VmaAllocation allocation);
3902 
3903  void CalculateStats(VmaStats* pStats);
3904 
3905 #if VMA_STATS_STRING_ENABLED
3906  void PrintDetailedMap(class VmaJsonWriter& json);
3907 #endif
3908 
3909  VkResult Defragment(
3910  VmaAllocation* pAllocations,
3911  size_t allocationCount,
3912  VkBool32* pAllocationsChanged,
3913  const VmaDefragmentationInfo* pDefragmentationInfo,
3914  VmaDefragmentationStats* pDefragmentationStats);
3915 
3916  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3917 
3918  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3919  void DestroyPool(VmaPool pool);
3920  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3921 
3922  void SetCurrentFrameIndex(uint32_t frameIndex);
3923 
3924  void MakePoolAllocationsLost(
3925  VmaPool hPool,
3926  size_t* pLostAllocationCount);
3927 
3928  void CreateLostAllocation(VmaAllocation* pAllocation);
3929 
3930  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3931  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3932 
3933  VkResult Map(VmaAllocation hAllocation, void** ppData);
3934  void Unmap(VmaAllocation hAllocation);
3935 
3936 private:
3937  VkDeviceSize m_PreferredLargeHeapBlockSize;
3938  VkDeviceSize m_PreferredSmallHeapBlockSize;
3939 
3940  VkPhysicalDevice m_PhysicalDevice;
3941  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3942 
3943  VMA_MUTEX m_PoolsMutex;
3944  // Protected by m_PoolsMutex. Sorted by pointer value.
3945  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3946 
3947  VmaVulkanFunctions m_VulkanFunctions;
3948 
3949  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3950 
3951  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3952 
3953  VkResult AllocateMemoryOfType(
3954  const VkMemoryRequirements& vkMemReq,
3955  bool dedicatedAllocation,
3956  VkBuffer dedicatedBuffer,
3957  VkImage dedicatedImage,
3958  const VmaAllocationCreateInfo& createInfo,
3959  uint32_t memTypeIndex,
3960  VmaSuballocationType suballocType,
3961  VmaAllocation* pAllocation);
3962 
3963  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3964  VkResult AllocateDedicatedMemory(
3965  VkDeviceSize size,
3966  VmaSuballocationType suballocType,
3967  uint32_t memTypeIndex,
3968  bool map,
3969  bool isUserDataString,
3970  void* pUserData,
3971  VkBuffer dedicatedBuffer,
3972  VkImage dedicatedImage,
3973  VmaAllocation* pAllocation);
3974 
3975  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3976  void FreeDedicatedMemory(VmaAllocation allocation);
3977 };
3978 
3980 // Memory allocation #2 after VmaAllocator_T definition
3981 
3982 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3983 {
3984  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3985 }
3986 
3987 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3988 {
3989  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3990 }
3991 
3992 template<typename T>
3993 static T* VmaAllocate(VmaAllocator hAllocator)
3994 {
3995  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3996 }
3997 
3998 template<typename T>
3999 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4000 {
4001  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4002 }
4003 
4004 template<typename T>
4005 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4006 {
4007  if(ptr != VMA_NULL)
4008  {
4009  ptr->~T();
4010  VmaFree(hAllocator, ptr);
4011  }
4012 }
4013 
4014 template<typename T>
4015 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4016 {
4017  if(ptr != VMA_NULL)
4018  {
4019  for(size_t i = count; i--; )
4020  ptr[i].~T();
4021  VmaFree(hAllocator, ptr);
4022  }
4023 }
4024 
4026 // VmaStringBuilder
4027 
4028 #if VMA_STATS_STRING_ENABLED
4029 
4030 class VmaStringBuilder
4031 {
4032 public:
4033  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4034  size_t GetLength() const { return m_Data.size(); }
4035  const char* GetData() const { return m_Data.data(); }
4036 
4037  void Add(char ch) { m_Data.push_back(ch); }
4038  void Add(const char* pStr);
4039  void AddNewLine() { Add('\n'); }
4040  void AddNumber(uint32_t num);
4041  void AddNumber(uint64_t num);
4042  void AddPointer(const void* ptr);
4043 
4044 private:
4045  VmaVector< char, VmaStlAllocator<char> > m_Data;
4046 };
4047 
4048 void VmaStringBuilder::Add(const char* pStr)
4049 {
4050  const size_t strLen = strlen(pStr);
4051  if(strLen > 0)
4052  {
4053  const size_t oldCount = m_Data.size();
4054  m_Data.resize(oldCount + strLen);
4055  memcpy(m_Data.data() + oldCount, pStr, strLen);
4056  }
4057 }
4058 
4059 void VmaStringBuilder::AddNumber(uint32_t num)
4060 {
4061  char buf[11];
4062  VmaUint32ToStr(buf, sizeof(buf), num);
4063  Add(buf);
4064 }
4065 
4066 void VmaStringBuilder::AddNumber(uint64_t num)
4067 {
4068  char buf[21];
4069  VmaUint64ToStr(buf, sizeof(buf), num);
4070  Add(buf);
4071 }
4072 
4073 void VmaStringBuilder::AddPointer(const void* ptr)
4074 {
4075  char buf[21];
4076  VmaPtrToStr(buf, sizeof(buf), ptr);
4077  Add(buf);
4078 }
4079 
4080 #endif // #if VMA_STATS_STRING_ENABLED
4081 
4083 // VmaJsonWriter
4084 
4085 #if VMA_STATS_STRING_ENABLED
4086 
4087 class VmaJsonWriter
4088 {
4089 public:
4090  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4091  ~VmaJsonWriter();
4092 
4093  void BeginObject(bool singleLine = false);
4094  void EndObject();
4095 
4096  void BeginArray(bool singleLine = false);
4097  void EndArray();
4098 
4099  void WriteString(const char* pStr);
4100  void BeginString(const char* pStr = VMA_NULL);
4101  void ContinueString(const char* pStr);
4102  void ContinueString(uint32_t n);
4103  void ContinueString(uint64_t n);
4104  void ContinueString_Pointer(const void* ptr);
4105  void EndString(const char* pStr = VMA_NULL);
4106 
4107  void WriteNumber(uint32_t n);
4108  void WriteNumber(uint64_t n);
4109  void WriteBool(bool b);
4110  void WriteNull();
4111 
4112 private:
4113  static const char* const INDENT;
4114 
4115  enum COLLECTION_TYPE
4116  {
4117  COLLECTION_TYPE_OBJECT,
4118  COLLECTION_TYPE_ARRAY,
4119  };
4120  struct StackItem
4121  {
4122  COLLECTION_TYPE type;
4123  uint32_t valueCount;
4124  bool singleLineMode;
4125  };
4126 
4127  VmaStringBuilder& m_SB;
4128  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4129  bool m_InsideString;
4130 
4131  void BeginValue(bool isString);
4132  void WriteIndent(bool oneLess = false);
4133 };
4134 
4135 const char* const VmaJsonWriter::INDENT = " ";
4136 
4137 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4138  m_SB(sb),
4139  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4140  m_InsideString(false)
4141 {
4142 }
4143 
4144 VmaJsonWriter::~VmaJsonWriter()
4145 {
4146  VMA_ASSERT(!m_InsideString);
4147  VMA_ASSERT(m_Stack.empty());
4148 }
4149 
4150 void VmaJsonWriter::BeginObject(bool singleLine)
4151 {
4152  VMA_ASSERT(!m_InsideString);
4153 
4154  BeginValue(false);
4155  m_SB.Add('{');
4156 
4157  StackItem item;
4158  item.type = COLLECTION_TYPE_OBJECT;
4159  item.valueCount = 0;
4160  item.singleLineMode = singleLine;
4161  m_Stack.push_back(item);
4162 }
4163 
4164 void VmaJsonWriter::EndObject()
4165 {
4166  VMA_ASSERT(!m_InsideString);
4167 
4168  WriteIndent(true);
4169  m_SB.Add('}');
4170 
4171  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4172  m_Stack.pop_back();
4173 }
4174 
4175 void VmaJsonWriter::BeginArray(bool singleLine)
4176 {
4177  VMA_ASSERT(!m_InsideString);
4178 
4179  BeginValue(false);
4180  m_SB.Add('[');
4181 
4182  StackItem item;
4183  item.type = COLLECTION_TYPE_ARRAY;
4184  item.valueCount = 0;
4185  item.singleLineMode = singleLine;
4186  m_Stack.push_back(item);
4187 }
4188 
4189 void VmaJsonWriter::EndArray()
4190 {
4191  VMA_ASSERT(!m_InsideString);
4192 
4193  WriteIndent(true);
4194  m_SB.Add(']');
4195 
4196  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4197  m_Stack.pop_back();
4198 }
4199 
4200 void VmaJsonWriter::WriteString(const char* pStr)
4201 {
4202  BeginString(pStr);
4203  EndString();
4204 }
4205 
4206 void VmaJsonWriter::BeginString(const char* pStr)
4207 {
4208  VMA_ASSERT(!m_InsideString);
4209 
4210  BeginValue(true);
4211  m_SB.Add('"');
4212  m_InsideString = true;
4213  if(pStr != VMA_NULL && pStr[0] != '\0')
4214  {
4215  ContinueString(pStr);
4216  }
4217 }
4218 
4219 void VmaJsonWriter::ContinueString(const char* pStr)
4220 {
4221  VMA_ASSERT(m_InsideString);
4222 
4223  const size_t strLen = strlen(pStr);
4224  for(size_t i = 0; i < strLen; ++i)
4225  {
4226  char ch = pStr[i];
4227  if(ch == '\'')
4228  {
4229  m_SB.Add("\\\\");
4230  }
4231  else if(ch == '"')
4232  {
4233  m_SB.Add("\\\"");
4234  }
4235  else if(ch >= 32)
4236  {
4237  m_SB.Add(ch);
4238  }
4239  else switch(ch)
4240  {
4241  case '\b':
4242  m_SB.Add("\\b");
4243  break;
4244  case '\f':
4245  m_SB.Add("\\f");
4246  break;
4247  case '\n':
4248  m_SB.Add("\\n");
4249  break;
4250  case '\r':
4251  m_SB.Add("\\r");
4252  break;
4253  case '\t':
4254  m_SB.Add("\\t");
4255  break;
4256  default:
4257  VMA_ASSERT(0 && "Character not currently supported.");
4258  break;
4259  }
4260  }
4261 }
4262 
4263 void VmaJsonWriter::ContinueString(uint32_t n)
4264 {
4265  VMA_ASSERT(m_InsideString);
4266  m_SB.AddNumber(n);
4267 }
4268 
4269 void VmaJsonWriter::ContinueString(uint64_t n)
4270 {
4271  VMA_ASSERT(m_InsideString);
4272  m_SB.AddNumber(n);
4273 }
4274 
4275 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4276 {
4277  VMA_ASSERT(m_InsideString);
4278  m_SB.AddPointer(ptr);
4279 }
4280 
4281 void VmaJsonWriter::EndString(const char* pStr)
4282 {
4283  VMA_ASSERT(m_InsideString);
4284  if(pStr != VMA_NULL && pStr[0] != '\0')
4285  {
4286  ContinueString(pStr);
4287  }
4288  m_SB.Add('"');
4289  m_InsideString = false;
4290 }
4291 
4292 void VmaJsonWriter::WriteNumber(uint32_t n)
4293 {
4294  VMA_ASSERT(!m_InsideString);
4295  BeginValue(false);
4296  m_SB.AddNumber(n);
4297 }
4298 
4299 void VmaJsonWriter::WriteNumber(uint64_t n)
4300 {
4301  VMA_ASSERT(!m_InsideString);
4302  BeginValue(false);
4303  m_SB.AddNumber(n);
4304 }
4305 
4306 void VmaJsonWriter::WriteBool(bool b)
4307 {
4308  VMA_ASSERT(!m_InsideString);
4309  BeginValue(false);
4310  m_SB.Add(b ? "true" : "false");
4311 }
4312 
4313 void VmaJsonWriter::WriteNull()
4314 {
4315  VMA_ASSERT(!m_InsideString);
4316  BeginValue(false);
4317  m_SB.Add("null");
4318 }
4319 
4320 void VmaJsonWriter::BeginValue(bool isString)
4321 {
4322  if(!m_Stack.empty())
4323  {
4324  StackItem& currItem = m_Stack.back();
4325  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4326  currItem.valueCount % 2 == 0)
4327  {
4328  VMA_ASSERT(isString);
4329  }
4330 
4331  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4332  currItem.valueCount % 2 != 0)
4333  {
4334  m_SB.Add(": ");
4335  }
4336  else if(currItem.valueCount > 0)
4337  {
4338  m_SB.Add(", ");
4339  WriteIndent();
4340  }
4341  else
4342  {
4343  WriteIndent();
4344  }
4345  ++currItem.valueCount;
4346  }
4347 }
4348 
4349 void VmaJsonWriter::WriteIndent(bool oneLess)
4350 {
4351  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4352  {
4353  m_SB.AddNewLine();
4354 
4355  size_t count = m_Stack.size();
4356  if(count > 0 && oneLess)
4357  {
4358  --count;
4359  }
4360  for(size_t i = 0; i < count; ++i)
4361  {
4362  m_SB.Add(INDENT);
4363  }
4364  }
4365 }
4366 
4367 #endif // #if VMA_STATS_STRING_ENABLED
4368 
4370 
4371 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4372 {
4373  if(IsUserDataString())
4374  {
4375  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4376 
4377  FreeUserDataString(hAllocator);
4378 
4379  if(pUserData != VMA_NULL)
4380  {
4381  const char* const newStrSrc = (char*)pUserData;
4382  const size_t newStrLen = strlen(newStrSrc);
4383  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4384  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4385  m_pUserData = newStrDst;
4386  }
4387  }
4388  else
4389  {
4390  m_pUserData = pUserData;
4391  }
4392 }
4393 
4394 VkDeviceSize VmaAllocation_T::GetOffset() const
4395 {
4396  switch(m_Type)
4397  {
4398  case ALLOCATION_TYPE_BLOCK:
4399  return m_BlockAllocation.m_Offset;
4400  case ALLOCATION_TYPE_DEDICATED:
4401  return 0;
4402  default:
4403  VMA_ASSERT(0);
4404  return 0;
4405  }
4406 }
4407 
4408 VkDeviceMemory VmaAllocation_T::GetMemory() const
4409 {
4410  switch(m_Type)
4411  {
4412  case ALLOCATION_TYPE_BLOCK:
4413  return m_BlockAllocation.m_Block->m_hMemory;
4414  case ALLOCATION_TYPE_DEDICATED:
4415  return m_DedicatedAllocation.m_hMemory;
4416  default:
4417  VMA_ASSERT(0);
4418  return VK_NULL_HANDLE;
4419  }
4420 }
4421 
4422 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4423 {
4424  switch(m_Type)
4425  {
4426  case ALLOCATION_TYPE_BLOCK:
4427  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4428  case ALLOCATION_TYPE_DEDICATED:
4429  return m_DedicatedAllocation.m_MemoryTypeIndex;
4430  default:
4431  VMA_ASSERT(0);
4432  return UINT32_MAX;
4433  }
4434 }
4435 
4436 void* VmaAllocation_T::GetMappedData() const
4437 {
4438  switch(m_Type)
4439  {
4440  case ALLOCATION_TYPE_BLOCK:
4441  if(m_MapCount != 0)
4442  {
4443  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4444  VMA_ASSERT(pBlockData != VMA_NULL);
4445  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4446  }
4447  else
4448  {
4449  return VMA_NULL;
4450  }
4451  break;
4452  case ALLOCATION_TYPE_DEDICATED:
4453  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4454  return m_DedicatedAllocation.m_pMappedData;
4455  default:
4456  VMA_ASSERT(0);
4457  return VMA_NULL;
4458  }
4459 }
4460 
4461 bool VmaAllocation_T::CanBecomeLost() const
4462 {
4463  switch(m_Type)
4464  {
4465  case ALLOCATION_TYPE_BLOCK:
4466  return m_BlockAllocation.m_CanBecomeLost;
4467  case ALLOCATION_TYPE_DEDICATED:
4468  return false;
4469  default:
4470  VMA_ASSERT(0);
4471  return false;
4472  }
4473 }
4474 
4475 VmaPool VmaAllocation_T::GetPool() const
4476 {
4477  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4478  return m_BlockAllocation.m_hPool;
4479 }
4480 
4481 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4482 {
4483  VMA_ASSERT(CanBecomeLost());
4484 
4485  /*
4486  Warning: This is a carefully designed algorithm.
4487  Do not modify unless you really know what you're doing :)
4488  */
4489  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4490  for(;;)
4491  {
4492  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4493  {
4494  VMA_ASSERT(0);
4495  return false;
4496  }
4497  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4498  {
4499  return false;
4500  }
4501  else // Last use time earlier than current time.
4502  {
4503  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4504  {
4505  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4506  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4507  return true;
4508  }
4509  }
4510  }
4511 }
4512 
4513 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4514 {
4515  VMA_ASSERT(IsUserDataString());
4516  if(m_pUserData != VMA_NULL)
4517  {
4518  char* const oldStr = (char*)m_pUserData;
4519  const size_t oldStrLen = strlen(oldStr);
4520  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4521  m_pUserData = VMA_NULL;
4522  }
4523 }
4524 
4525 void VmaAllocation_T::BlockAllocMap()
4526 {
4527  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4528 
4529  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4530  {
4531  ++m_MapCount;
4532  }
4533  else
4534  {
4535  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4536  }
4537 }
4538 
4539 void VmaAllocation_T::BlockAllocUnmap()
4540 {
4541  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4542 
4543  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4544  {
4545  --m_MapCount;
4546  }
4547  else
4548  {
4549  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4550  }
4551 }
4552 
4553 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4554 {
4555  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4556 
4557  if(m_MapCount != 0)
4558  {
4559  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4560  {
4561  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4562  *ppData = m_DedicatedAllocation.m_pMappedData;
4563  ++m_MapCount;
4564  return VK_SUCCESS;
4565  }
4566  else
4567  {
4568  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4569  return VK_ERROR_MEMORY_MAP_FAILED;
4570  }
4571  }
4572  else
4573  {
4574  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4575  hAllocator->m_hDevice,
4576  m_DedicatedAllocation.m_hMemory,
4577  0, // offset
4578  VK_WHOLE_SIZE,
4579  0, // flags
4580  ppData);
4581  if(result == VK_SUCCESS)
4582  {
4583  m_DedicatedAllocation.m_pMappedData = *ppData;
4584  m_MapCount = 1;
4585  }
4586  return result;
4587  }
4588 }
4589 
4590 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4591 {
4592  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4593 
4594  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4595  {
4596  --m_MapCount;
4597  if(m_MapCount == 0)
4598  {
4599  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4600  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4601  hAllocator->m_hDevice,
4602  m_DedicatedAllocation.m_hMemory);
4603  }
4604  }
4605  else
4606  {
4607  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4608  }
4609 }
4610 
4611 #if VMA_STATS_STRING_ENABLED
4612 
4613 // Correspond to values of enum VmaSuballocationType.
4614 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4615  "FREE",
4616  "UNKNOWN",
4617  "BUFFER",
4618  "IMAGE_UNKNOWN",
4619  "IMAGE_LINEAR",
4620  "IMAGE_OPTIMAL",
4621 };
4622 
4623 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4624 {
4625  json.BeginObject();
4626 
4627  json.WriteString("Blocks");
4628  json.WriteNumber(stat.blockCount);
4629 
4630  json.WriteString("Allocations");
4631  json.WriteNumber(stat.allocationCount);
4632 
4633  json.WriteString("UnusedRanges");
4634  json.WriteNumber(stat.unusedRangeCount);
4635 
4636  json.WriteString("UsedBytes");
4637  json.WriteNumber(stat.usedBytes);
4638 
4639  json.WriteString("UnusedBytes");
4640  json.WriteNumber(stat.unusedBytes);
4641 
4642  if(stat.allocationCount > 1)
4643  {
4644  json.WriteString("AllocationSize");
4645  json.BeginObject(true);
4646  json.WriteString("Min");
4647  json.WriteNumber(stat.allocationSizeMin);
4648  json.WriteString("Avg");
4649  json.WriteNumber(stat.allocationSizeAvg);
4650  json.WriteString("Max");
4651  json.WriteNumber(stat.allocationSizeMax);
4652  json.EndObject();
4653  }
4654 
4655  if(stat.unusedRangeCount > 1)
4656  {
4657  json.WriteString("UnusedRangeSize");
4658  json.BeginObject(true);
4659  json.WriteString("Min");
4660  json.WriteNumber(stat.unusedRangeSizeMin);
4661  json.WriteString("Avg");
4662  json.WriteNumber(stat.unusedRangeSizeAvg);
4663  json.WriteString("Max");
4664  json.WriteNumber(stat.unusedRangeSizeMax);
4665  json.EndObject();
4666  }
4667 
4668  json.EndObject();
4669 }
4670 
4671 #endif // #if VMA_STATS_STRING_ENABLED
4672 
4673 struct VmaSuballocationItemSizeLess
4674 {
4675  bool operator()(
4676  const VmaSuballocationList::iterator lhs,
4677  const VmaSuballocationList::iterator rhs) const
4678  {
4679  return lhs->size < rhs->size;
4680  }
4681  bool operator()(
4682  const VmaSuballocationList::iterator lhs,
4683  VkDeviceSize rhsSize) const
4684  {
4685  return lhs->size < rhsSize;
4686  }
4687 };
4688 
4690 // class VmaBlockMetadata
4691 
4692 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4693  m_Size(0),
4694  m_FreeCount(0),
4695  m_SumFreeSize(0),
4696  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4697  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4698 {
4699 }
4700 
4701 VmaBlockMetadata::~VmaBlockMetadata()
4702 {
4703 }
4704 
4705 void VmaBlockMetadata::Init(VkDeviceSize size)
4706 {
4707  m_Size = size;
4708  m_FreeCount = 1;
4709  m_SumFreeSize = size;
4710 
4711  VmaSuballocation suballoc = {};
4712  suballoc.offset = 0;
4713  suballoc.size = size;
4714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4715  suballoc.hAllocation = VK_NULL_HANDLE;
4716 
4717  m_Suballocations.push_back(suballoc);
4718  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4719  --suballocItem;
4720  m_FreeSuballocationsBySize.push_back(suballocItem);
4721 }
4722 
4723 bool VmaBlockMetadata::Validate() const
4724 {
4725  if(m_Suballocations.empty())
4726  {
4727  return false;
4728  }
4729 
4730  // Expected offset of new suballocation as calculates from previous ones.
4731  VkDeviceSize calculatedOffset = 0;
4732  // Expected number of free suballocations as calculated from traversing their list.
4733  uint32_t calculatedFreeCount = 0;
4734  // Expected sum size of free suballocations as calculated from traversing their list.
4735  VkDeviceSize calculatedSumFreeSize = 0;
4736  // Expected number of free suballocations that should be registered in
4737  // m_FreeSuballocationsBySize calculated from traversing their list.
4738  size_t freeSuballocationsToRegister = 0;
4739  // True if previous visisted suballocation was free.
4740  bool prevFree = false;
4741 
4742  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4743  suballocItem != m_Suballocations.cend();
4744  ++suballocItem)
4745  {
4746  const VmaSuballocation& subAlloc = *suballocItem;
4747 
4748  // Actual offset of this suballocation doesn't match expected one.
4749  if(subAlloc.offset != calculatedOffset)
4750  {
4751  return false;
4752  }
4753 
4754  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4755  // Two adjacent free suballocations are invalid. They should be merged.
4756  if(prevFree && currFree)
4757  {
4758  return false;
4759  }
4760  prevFree = currFree;
4761 
4762  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4763  {
4764  return false;
4765  }
4766 
4767  if(currFree)
4768  {
4769  calculatedSumFreeSize += subAlloc.size;
4770  ++calculatedFreeCount;
4771  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4772  {
4773  ++freeSuballocationsToRegister;
4774  }
4775  }
4776 
4777  calculatedOffset += subAlloc.size;
4778  }
4779 
4780  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4781  // match expected one.
4782  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4783  {
4784  return false;
4785  }
4786 
4787  VkDeviceSize lastSize = 0;
4788  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4789  {
4790  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4791 
4792  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4793  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4794  {
4795  return false;
4796  }
4797  // They must be sorted by size ascending.
4798  if(suballocItem->size < lastSize)
4799  {
4800  return false;
4801  }
4802 
4803  lastSize = suballocItem->size;
4804  }
4805 
4806  // Check if totals match calculacted values.
4807  return
4808  ValidateFreeSuballocationList() &&
4809  (calculatedOffset == m_Size) &&
4810  (calculatedSumFreeSize == m_SumFreeSize) &&
4811  (calculatedFreeCount == m_FreeCount);
4812 }
4813 
4814 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4815 {
4816  if(!m_FreeSuballocationsBySize.empty())
4817  {
4818  return m_FreeSuballocationsBySize.back()->size;
4819  }
4820  else
4821  {
4822  return 0;
4823  }
4824 }
4825 
4826 bool VmaBlockMetadata::IsEmpty() const
4827 {
4828  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4829 }
4830 
4831 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4832 {
4833  outInfo.blockCount = 1;
4834 
4835  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4836  outInfo.allocationCount = rangeCount - m_FreeCount;
4837  outInfo.unusedRangeCount = m_FreeCount;
4838 
4839  outInfo.unusedBytes = m_SumFreeSize;
4840  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4841 
4842  outInfo.allocationSizeMin = UINT64_MAX;
4843  outInfo.allocationSizeMax = 0;
4844  outInfo.unusedRangeSizeMin = UINT64_MAX;
4845  outInfo.unusedRangeSizeMax = 0;
4846 
4847  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4848  suballocItem != m_Suballocations.cend();
4849  ++suballocItem)
4850  {
4851  const VmaSuballocation& suballoc = *suballocItem;
4852  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4853  {
4854  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4855  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4856  }
4857  else
4858  {
4859  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4860  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4861  }
4862  }
4863 }
4864 
4865 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4866 {
4867  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4868 
4869  inoutStats.size += m_Size;
4870  inoutStats.unusedSize += m_SumFreeSize;
4871  inoutStats.allocationCount += rangeCount - m_FreeCount;
4872  inoutStats.unusedRangeCount += m_FreeCount;
4873  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4874 }
4875 
4876 #if VMA_STATS_STRING_ENABLED
4877 
4878 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4879 {
4880  json.BeginObject();
4881 
4882  json.WriteString("TotalBytes");
4883  json.WriteNumber(m_Size);
4884 
4885  json.WriteString("UnusedBytes");
4886  json.WriteNumber(m_SumFreeSize);
4887 
4888  json.WriteString("Allocations");
4889  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4890 
4891  json.WriteString("UnusedRanges");
4892  json.WriteNumber(m_FreeCount);
4893 
4894  json.WriteString("Suballocations");
4895  json.BeginArray();
4896  size_t i = 0;
4897  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4898  suballocItem != m_Suballocations.cend();
4899  ++suballocItem, ++i)
4900  {
4901  json.BeginObject(true);
4902 
4903  json.WriteString("Type");
4904  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4905 
4906  json.WriteString("Size");
4907  json.WriteNumber(suballocItem->size);
4908 
4909  json.WriteString("Offset");
4910  json.WriteNumber(suballocItem->offset);
4911 
4912  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4913  {
4914  const void* pUserData = suballocItem->hAllocation->GetUserData();
4915  if(pUserData != VMA_NULL)
4916  {
4917  json.WriteString("UserData");
4918  if(suballocItem->hAllocation->IsUserDataString())
4919  {
4920  json.WriteString((const char*)pUserData);
4921  }
4922  else
4923  {
4924  json.BeginString();
4925  json.ContinueString_Pointer(pUserData);
4926  json.EndString();
4927  }
4928  }
4929  }
4930 
4931  json.EndObject();
4932  }
4933  json.EndArray();
4934 
4935  json.EndObject();
4936 }
4937 
4938 #endif // #if VMA_STATS_STRING_ENABLED
4939 
4940 /*
4941 How many suitable free suballocations to analyze before choosing best one.
4942 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4943  be chosen.
4944 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4945  suballocations will be analized and best one will be chosen.
4946 - Any other value is also acceptable.
4947 */
4948 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4949 
4950 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4951 {
4952  VMA_ASSERT(IsEmpty());
4953  pAllocationRequest->offset = 0;
4954  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4955  pAllocationRequest->sumItemSize = 0;
4956  pAllocationRequest->item = m_Suballocations.begin();
4957  pAllocationRequest->itemsToMakeLostCount = 0;
4958 }
4959 
4960 bool VmaBlockMetadata::CreateAllocationRequest(
4961  uint32_t currentFrameIndex,
4962  uint32_t frameInUseCount,
4963  VkDeviceSize bufferImageGranularity,
4964  VkDeviceSize allocSize,
4965  VkDeviceSize allocAlignment,
4966  VmaSuballocationType allocType,
4967  bool canMakeOtherLost,
4968  VmaAllocationRequest* pAllocationRequest)
4969 {
4970  VMA_ASSERT(allocSize > 0);
4971  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4972  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4973  VMA_HEAVY_ASSERT(Validate());
4974 
4975  // There is not enough total free space in this block to fullfill the request: Early return.
4976  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4977  {
4978  return false;
4979  }
4980 
4981  // New algorithm, efficiently searching freeSuballocationsBySize.
4982  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4983  if(freeSuballocCount > 0)
4984  {
4985  if(VMA_BEST_FIT)
4986  {
4987  // Find first free suballocation with size not less than allocSize.
4988  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4989  m_FreeSuballocationsBySize.data(),
4990  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4991  allocSize,
4992  VmaSuballocationItemSizeLess());
4993  size_t index = it - m_FreeSuballocationsBySize.data();
4994  for(; index < freeSuballocCount; ++index)
4995  {
4996  if(CheckAllocation(
4997  currentFrameIndex,
4998  frameInUseCount,
4999  bufferImageGranularity,
5000  allocSize,
5001  allocAlignment,
5002  allocType,
5003  m_FreeSuballocationsBySize[index],
5004  false, // canMakeOtherLost
5005  &pAllocationRequest->offset,
5006  &pAllocationRequest->itemsToMakeLostCount,
5007  &pAllocationRequest->sumFreeSize,
5008  &pAllocationRequest->sumItemSize))
5009  {
5010  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5011  return true;
5012  }
5013  }
5014  }
5015  else
5016  {
5017  // Search staring from biggest suballocations.
5018  for(size_t index = freeSuballocCount; index--; )
5019  {
5020  if(CheckAllocation(
5021  currentFrameIndex,
5022  frameInUseCount,
5023  bufferImageGranularity,
5024  allocSize,
5025  allocAlignment,
5026  allocType,
5027  m_FreeSuballocationsBySize[index],
5028  false, // canMakeOtherLost
5029  &pAllocationRequest->offset,
5030  &pAllocationRequest->itemsToMakeLostCount,
5031  &pAllocationRequest->sumFreeSize,
5032  &pAllocationRequest->sumItemSize))
5033  {
5034  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5035  return true;
5036  }
5037  }
5038  }
5039  }
5040 
5041  if(canMakeOtherLost)
5042  {
5043  // Brute-force algorithm. TODO: Come up with something better.
5044 
5045  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5046  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5047 
5048  VmaAllocationRequest tmpAllocRequest = {};
5049  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5050  suballocIt != m_Suballocations.end();
5051  ++suballocIt)
5052  {
5053  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5054  suballocIt->hAllocation->CanBecomeLost())
5055  {
5056  if(CheckAllocation(
5057  currentFrameIndex,
5058  frameInUseCount,
5059  bufferImageGranularity,
5060  allocSize,
5061  allocAlignment,
5062  allocType,
5063  suballocIt,
5064  canMakeOtherLost,
5065  &tmpAllocRequest.offset,
5066  &tmpAllocRequest.itemsToMakeLostCount,
5067  &tmpAllocRequest.sumFreeSize,
5068  &tmpAllocRequest.sumItemSize))
5069  {
5070  tmpAllocRequest.item = suballocIt;
5071 
5072  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5073  {
5074  *pAllocationRequest = tmpAllocRequest;
5075  }
5076  }
5077  }
5078  }
5079 
5080  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5081  {
5082  return true;
5083  }
5084  }
5085 
5086  return false;
5087 }
5088 
5089 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5090  uint32_t currentFrameIndex,
5091  uint32_t frameInUseCount,
5092  VmaAllocationRequest* pAllocationRequest)
5093 {
5094  while(pAllocationRequest->itemsToMakeLostCount > 0)
5095  {
5096  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5097  {
5098  ++pAllocationRequest->item;
5099  }
5100  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5101  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5102  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5103  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5104  {
5105  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5106  --pAllocationRequest->itemsToMakeLostCount;
5107  }
5108  else
5109  {
5110  return false;
5111  }
5112  }
5113 
5114  VMA_HEAVY_ASSERT(Validate());
5115  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5116  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5117 
5118  return true;
5119 }
5120 
5121 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5122 {
5123  uint32_t lostAllocationCount = 0;
5124  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5125  it != m_Suballocations.end();
5126  ++it)
5127  {
5128  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5129  it->hAllocation->CanBecomeLost() &&
5130  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5131  {
5132  it = FreeSuballocation(it);
5133  ++lostAllocationCount;
5134  }
5135  }
5136  return lostAllocationCount;
5137 }
5138 
5139 void VmaBlockMetadata::Alloc(
5140  const VmaAllocationRequest& request,
5141  VmaSuballocationType type,
5142  VkDeviceSize allocSize,
5143  VmaAllocation hAllocation)
5144 {
5145  VMA_ASSERT(request.item != m_Suballocations.end());
5146  VmaSuballocation& suballoc = *request.item;
5147  // Given suballocation is a free block.
5148  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5149  // Given offset is inside this suballocation.
5150  VMA_ASSERT(request.offset >= suballoc.offset);
5151  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5152  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5153  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5154 
5155  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5156  // it to become used.
5157  UnregisterFreeSuballocation(request.item);
5158 
5159  suballoc.offset = request.offset;
5160  suballoc.size = allocSize;
5161  suballoc.type = type;
5162  suballoc.hAllocation = hAllocation;
5163 
5164  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5165  if(paddingEnd)
5166  {
5167  VmaSuballocation paddingSuballoc = {};
5168  paddingSuballoc.offset = request.offset + allocSize;
5169  paddingSuballoc.size = paddingEnd;
5170  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5171  VmaSuballocationList::iterator next = request.item;
5172  ++next;
5173  const VmaSuballocationList::iterator paddingEndItem =
5174  m_Suballocations.insert(next, paddingSuballoc);
5175  RegisterFreeSuballocation(paddingEndItem);
5176  }
5177 
5178  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5179  if(paddingBegin)
5180  {
5181  VmaSuballocation paddingSuballoc = {};
5182  paddingSuballoc.offset = request.offset - paddingBegin;
5183  paddingSuballoc.size = paddingBegin;
5184  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5185  const VmaSuballocationList::iterator paddingBeginItem =
5186  m_Suballocations.insert(request.item, paddingSuballoc);
5187  RegisterFreeSuballocation(paddingBeginItem);
5188  }
5189 
5190  // Update totals.
5191  m_FreeCount = m_FreeCount - 1;
5192  if(paddingBegin > 0)
5193  {
5194  ++m_FreeCount;
5195  }
5196  if(paddingEnd > 0)
5197  {
5198  ++m_FreeCount;
5199  }
5200  m_SumFreeSize -= allocSize;
5201 }
5202 
5203 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5204 {
5205  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5206  suballocItem != m_Suballocations.end();
5207  ++suballocItem)
5208  {
5209  VmaSuballocation& suballoc = *suballocItem;
5210  if(suballoc.hAllocation == allocation)
5211  {
5212  FreeSuballocation(suballocItem);
5213  VMA_HEAVY_ASSERT(Validate());
5214  return;
5215  }
5216  }
5217  VMA_ASSERT(0 && "Not found!");
5218 }
5219 
5220 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5221 {
5222  VkDeviceSize lastSize = 0;
5223  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5224  {
5225  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5226 
5227  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5228  {
5229  VMA_ASSERT(0);
5230  return false;
5231  }
5232  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5233  {
5234  VMA_ASSERT(0);
5235  return false;
5236  }
5237  if(it->size < lastSize)
5238  {
5239  VMA_ASSERT(0);
5240  return false;
5241  }
5242 
5243  lastSize = it->size;
5244  }
5245  return true;
5246 }
5247 
5248 bool VmaBlockMetadata::CheckAllocation(
5249  uint32_t currentFrameIndex,
5250  uint32_t frameInUseCount,
5251  VkDeviceSize bufferImageGranularity,
5252  VkDeviceSize allocSize,
5253  VkDeviceSize allocAlignment,
5254  VmaSuballocationType allocType,
5255  VmaSuballocationList::const_iterator suballocItem,
5256  bool canMakeOtherLost,
5257  VkDeviceSize* pOffset,
5258  size_t* itemsToMakeLostCount,
5259  VkDeviceSize* pSumFreeSize,
5260  VkDeviceSize* pSumItemSize) const
5261 {
5262  VMA_ASSERT(allocSize > 0);
5263  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5264  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5265  VMA_ASSERT(pOffset != VMA_NULL);
5266 
5267  *itemsToMakeLostCount = 0;
5268  *pSumFreeSize = 0;
5269  *pSumItemSize = 0;
5270 
5271  if(canMakeOtherLost)
5272  {
5273  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5274  {
5275  *pSumFreeSize = suballocItem->size;
5276  }
5277  else
5278  {
5279  if(suballocItem->hAllocation->CanBecomeLost() &&
5280  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5281  {
5282  ++*itemsToMakeLostCount;
5283  *pSumItemSize = suballocItem->size;
5284  }
5285  else
5286  {
5287  return false;
5288  }
5289  }
5290 
5291  // Remaining size is too small for this request: Early return.
5292  if(m_Size - suballocItem->offset < allocSize)
5293  {
5294  return false;
5295  }
5296 
5297  // Start from offset equal to beginning of this suballocation.
5298  *pOffset = suballocItem->offset;
5299 
5300  // Apply VMA_DEBUG_MARGIN at the beginning.
5301  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5302  {
5303  *pOffset += VMA_DEBUG_MARGIN;
5304  }
5305 
5306  // Apply alignment.
5307  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5308  *pOffset = VmaAlignUp(*pOffset, alignment);
5309 
5310  // Check previous suballocations for BufferImageGranularity conflicts.
5311  // Make bigger alignment if necessary.
5312  if(bufferImageGranularity > 1)
5313  {
5314  bool bufferImageGranularityConflict = false;
5315  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5316  while(prevSuballocItem != m_Suballocations.cbegin())
5317  {
5318  --prevSuballocItem;
5319  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5320  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5321  {
5322  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5323  {
5324  bufferImageGranularityConflict = true;
5325  break;
5326  }
5327  }
5328  else
5329  // Already on previous page.
5330  break;
5331  }
5332  if(bufferImageGranularityConflict)
5333  {
5334  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5335  }
5336  }
5337 
5338  // Now that we have final *pOffset, check if we are past suballocItem.
5339  // If yes, return false - this function should be called for another suballocItem as starting point.
5340  if(*pOffset >= suballocItem->offset + suballocItem->size)
5341  {
5342  return false;
5343  }
5344 
5345  // Calculate padding at the beginning based on current offset.
5346  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5347 
5348  // Calculate required margin at the end if this is not last suballocation.
5349  VmaSuballocationList::const_iterator next = suballocItem;
5350  ++next;
5351  const VkDeviceSize requiredEndMargin =
5352  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5353 
5354  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5355  // Another early return check.
5356  if(suballocItem->offset + totalSize > m_Size)
5357  {
5358  return false;
5359  }
5360 
5361  // Advance lastSuballocItem until desired size is reached.
5362  // Update itemsToMakeLostCount.
5363  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5364  if(totalSize > suballocItem->size)
5365  {
5366  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5367  while(remainingSize > 0)
5368  {
5369  ++lastSuballocItem;
5370  if(lastSuballocItem == m_Suballocations.cend())
5371  {
5372  return false;
5373  }
5374  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5375  {
5376  *pSumFreeSize += lastSuballocItem->size;
5377  }
5378  else
5379  {
5380  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5381  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5382  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5383  {
5384  ++*itemsToMakeLostCount;
5385  *pSumItemSize += lastSuballocItem->size;
5386  }
5387  else
5388  {
5389  return false;
5390  }
5391  }
5392  remainingSize = (lastSuballocItem->size < remainingSize) ?
5393  remainingSize - lastSuballocItem->size : 0;
5394  }
5395  }
5396 
5397  // Check next suballocations for BufferImageGranularity conflicts.
5398  // If conflict exists, we must mark more allocations lost or fail.
5399  if(bufferImageGranularity > 1)
5400  {
5401  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5402  ++nextSuballocItem;
5403  while(nextSuballocItem != m_Suballocations.cend())
5404  {
5405  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5406  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5407  {
5408  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5409  {
5410  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5411  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5412  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5413  {
5414  ++*itemsToMakeLostCount;
5415  }
5416  else
5417  {
5418  return false;
5419  }
5420  }
5421  }
5422  else
5423  {
5424  // Already on next page.
5425  break;
5426  }
5427  ++nextSuballocItem;
5428  }
5429  }
5430  }
5431  else
5432  {
5433  const VmaSuballocation& suballoc = *suballocItem;
5434  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5435 
5436  *pSumFreeSize = suballoc.size;
5437 
5438  // Size of this suballocation is too small for this request: Early return.
5439  if(suballoc.size < allocSize)
5440  {
5441  return false;
5442  }
5443 
5444  // Start from offset equal to beginning of this suballocation.
5445  *pOffset = suballoc.offset;
5446 
5447  // Apply VMA_DEBUG_MARGIN at the beginning.
5448  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5449  {
5450  *pOffset += VMA_DEBUG_MARGIN;
5451  }
5452 
5453  // Apply alignment.
5454  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5455  *pOffset = VmaAlignUp(*pOffset, alignment);
5456 
5457  // Check previous suballocations for BufferImageGranularity conflicts.
5458  // Make bigger alignment if necessary.
5459  if(bufferImageGranularity > 1)
5460  {
5461  bool bufferImageGranularityConflict = false;
5462  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5463  while(prevSuballocItem != m_Suballocations.cbegin())
5464  {
5465  --prevSuballocItem;
5466  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5467  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5468  {
5469  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5470  {
5471  bufferImageGranularityConflict = true;
5472  break;
5473  }
5474  }
5475  else
5476  // Already on previous page.
5477  break;
5478  }
5479  if(bufferImageGranularityConflict)
5480  {
5481  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5482  }
5483  }
5484 
5485  // Calculate padding at the beginning based on current offset.
5486  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5487 
5488  // Calculate required margin at the end if this is not last suballocation.
5489  VmaSuballocationList::const_iterator next = suballocItem;
5490  ++next;
5491  const VkDeviceSize requiredEndMargin =
5492  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5493 
5494  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5495  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5496  {
5497  return false;
5498  }
5499 
5500  // Check next suballocations for BufferImageGranularity conflicts.
5501  // If conflict exists, allocation cannot be made here.
5502  if(bufferImageGranularity > 1)
5503  {
5504  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5505  ++nextSuballocItem;
5506  while(nextSuballocItem != m_Suballocations.cend())
5507  {
5508  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5509  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5510  {
5511  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5512  {
5513  return false;
5514  }
5515  }
5516  else
5517  {
5518  // Already on next page.
5519  break;
5520  }
5521  ++nextSuballocItem;
5522  }
5523  }
5524  }
5525 
5526  // All tests passed: Success. pOffset is already filled.
5527  return true;
5528 }
5529 
5530 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5531 {
5532  VMA_ASSERT(item != m_Suballocations.end());
5533  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5534 
5535  VmaSuballocationList::iterator nextItem = item;
5536  ++nextItem;
5537  VMA_ASSERT(nextItem != m_Suballocations.end());
5538  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5539 
5540  item->size += nextItem->size;
5541  --m_FreeCount;
5542  m_Suballocations.erase(nextItem);
5543 }
5544 
5545 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5546 {
5547  // Change this suballocation to be marked as free.
5548  VmaSuballocation& suballoc = *suballocItem;
5549  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5550  suballoc.hAllocation = VK_NULL_HANDLE;
5551 
5552  // Update totals.
5553  ++m_FreeCount;
5554  m_SumFreeSize += suballoc.size;
5555 
5556  // Merge with previous and/or next suballocation if it's also free.
5557  bool mergeWithNext = false;
5558  bool mergeWithPrev = false;
5559 
5560  VmaSuballocationList::iterator nextItem = suballocItem;
5561  ++nextItem;
5562  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5563  {
5564  mergeWithNext = true;
5565  }
5566 
5567  VmaSuballocationList::iterator prevItem = suballocItem;
5568  if(suballocItem != m_Suballocations.begin())
5569  {
5570  --prevItem;
5571  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5572  {
5573  mergeWithPrev = true;
5574  }
5575  }
5576 
5577  if(mergeWithNext)
5578  {
5579  UnregisterFreeSuballocation(nextItem);
5580  MergeFreeWithNext(suballocItem);
5581  }
5582 
5583  if(mergeWithPrev)
5584  {
5585  UnregisterFreeSuballocation(prevItem);
5586  MergeFreeWithNext(prevItem);
5587  RegisterFreeSuballocation(prevItem);
5588  return prevItem;
5589  }
5590  else
5591  {
5592  RegisterFreeSuballocation(suballocItem);
5593  return suballocItem;
5594  }
5595 }
5596 
5597 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5598 {
5599  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5600  VMA_ASSERT(item->size > 0);
5601 
5602  // You may want to enable this validation at the beginning or at the end of
5603  // this function, depending on what do you want to check.
5604  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5605 
5606  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5607  {
5608  if(m_FreeSuballocationsBySize.empty())
5609  {
5610  m_FreeSuballocationsBySize.push_back(item);
5611  }
5612  else
5613  {
5614  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5615  }
5616  }
5617 
5618  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5619 }
5620 
5621 
5622 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5623 {
5624  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5625  VMA_ASSERT(item->size > 0);
5626 
5627  // You may want to enable this validation at the beginning or at the end of
5628  // this function, depending on what do you want to check.
5629  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5630 
5631  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5632  {
5633  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5634  m_FreeSuballocationsBySize.data(),
5635  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5636  item,
5637  VmaSuballocationItemSizeLess());
5638  for(size_t index = it - m_FreeSuballocationsBySize.data();
5639  index < m_FreeSuballocationsBySize.size();
5640  ++index)
5641  {
5642  if(m_FreeSuballocationsBySize[index] == item)
5643  {
5644  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5645  return;
5646  }
5647  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5648  }
5649  VMA_ASSERT(0 && "Not found.");
5650  }
5651 
5652  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5653 }
5654 
5656 // class VmaDeviceMemoryMapping
5657 
5658 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5659  m_MapCount(0),
5660  m_pMappedData(VMA_NULL)
5661 {
5662 }
5663 
5664 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5665 {
5666  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5667 }
5668 
5669 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData)
5670 {
5671  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5672  if(m_MapCount != 0)
5673  {
5674  ++m_MapCount;
5675  VMA_ASSERT(m_pMappedData != VMA_NULL);
5676  if(ppData != VMA_NULL)
5677  {
5678  *ppData = m_pMappedData;
5679  }
5680  return VK_SUCCESS;
5681  }
5682  else
5683  {
5684  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5685  hAllocator->m_hDevice,
5686  hMemory,
5687  0, // offset
5688  VK_WHOLE_SIZE,
5689  0, // flags
5690  &m_pMappedData);
5691  if(result == VK_SUCCESS)
5692  {
5693  if(ppData != VMA_NULL)
5694  {
5695  *ppData = m_pMappedData;
5696  }
5697  m_MapCount = 1;
5698  }
5699  return result;
5700  }
5701 }
5702 
5703 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory)
5704 {
5705  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5706  if(m_MapCount != 0)
5707  {
5708  if(--m_MapCount == 0)
5709  {
5710  m_pMappedData = VMA_NULL;
5711  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5712  }
5713  }
5714  else
5715  {
5716  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5717  }
5718 }
5719 
5721 // class VmaDeviceMemoryBlock
5722 
5723 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5724  m_MemoryTypeIndex(UINT32_MAX),
5725  m_hMemory(VK_NULL_HANDLE),
5726  m_Metadata(hAllocator)
5727 {
5728 }
5729 
5730 void VmaDeviceMemoryBlock::Init(
5731  uint32_t newMemoryTypeIndex,
5732  VkDeviceMemory newMemory,
5733  VkDeviceSize newSize)
5734 {
5735  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5736 
5737  m_MemoryTypeIndex = newMemoryTypeIndex;
5738  m_hMemory = newMemory;
5739 
5740  m_Metadata.Init(newSize);
5741 }
5742 
5743 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5744 {
5745  // This is the most important assert in the entire library.
5746  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5747  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5748 
5749  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5750  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5751  m_hMemory = VK_NULL_HANDLE;
5752 }
5753 
5754 bool VmaDeviceMemoryBlock::Validate() const
5755 {
5756  if((m_hMemory == VK_NULL_HANDLE) ||
5757  (m_Metadata.GetSize() == 0))
5758  {
5759  return false;
5760  }
5761 
5762  return m_Metadata.Validate();
5763 }
5764 
5765 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, void** ppData)
5766 {
5767  return m_Mapping.Map(hAllocator, m_hMemory, ppData);
5768 }
5769 
5770 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator)
5771 {
5772  m_Mapping.Unmap(hAllocator, m_hMemory);
5773 }
5774 
5775 static void InitStatInfo(VmaStatInfo& outInfo)
5776 {
5777  memset(&outInfo, 0, sizeof(outInfo));
5778  outInfo.allocationSizeMin = UINT64_MAX;
5779  outInfo.unusedRangeSizeMin = UINT64_MAX;
5780 }
5781 
5782 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5783 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5784 {
5785  inoutInfo.blockCount += srcInfo.blockCount;
5786  inoutInfo.allocationCount += srcInfo.allocationCount;
5787  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5788  inoutInfo.usedBytes += srcInfo.usedBytes;
5789  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5790  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5791  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5792  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5793  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5794 }
5795 
5796 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5797 {
5798  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5799  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5800  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5801  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5802 }
5803 
5804 VmaPool_T::VmaPool_T(
5805  VmaAllocator hAllocator,
5806  const VmaPoolCreateInfo& createInfo) :
5807  m_BlockVector(
5808  hAllocator,
5809  createInfo.memoryTypeIndex,
5810  createInfo.blockSize,
5811  createInfo.minBlockCount,
5812  createInfo.maxBlockCount,
5813  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5814  createInfo.frameInUseCount,
5815  true) // isCustomPool
5816 {
5817 }
5818 
5819 VmaPool_T::~VmaPool_T()
5820 {
5821 }
5822 
5823 #if VMA_STATS_STRING_ENABLED
5824 
5825 #endif // #if VMA_STATS_STRING_ENABLED
5826 
5827 VmaBlockVector::VmaBlockVector(
5828  VmaAllocator hAllocator,
5829  uint32_t memoryTypeIndex,
5830  VkDeviceSize preferredBlockSize,
5831  size_t minBlockCount,
5832  size_t maxBlockCount,
5833  VkDeviceSize bufferImageGranularity,
5834  uint32_t frameInUseCount,
5835  bool isCustomPool) :
5836  m_hAllocator(hAllocator),
5837  m_MemoryTypeIndex(memoryTypeIndex),
5838  m_PreferredBlockSize(preferredBlockSize),
5839  m_MinBlockCount(minBlockCount),
5840  m_MaxBlockCount(maxBlockCount),
5841  m_BufferImageGranularity(bufferImageGranularity),
5842  m_FrameInUseCount(frameInUseCount),
5843  m_IsCustomPool(isCustomPool),
5844  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5845  m_HasEmptyBlock(false),
5846  m_pDefragmentator(VMA_NULL)
5847 {
5848 }
5849 
5850 VmaBlockVector::~VmaBlockVector()
5851 {
5852  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5853 
5854  for(size_t i = m_Blocks.size(); i--; )
5855  {
5856  m_Blocks[i]->Destroy(m_hAllocator);
5857  vma_delete(m_hAllocator, m_Blocks[i]);
5858  }
5859 }
5860 
5861 VkResult VmaBlockVector::CreateMinBlocks()
5862 {
5863  for(size_t i = 0; i < m_MinBlockCount; ++i)
5864  {
5865  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5866  if(res != VK_SUCCESS)
5867  {
5868  return res;
5869  }
5870  }
5871  return VK_SUCCESS;
5872 }
5873 
5874 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5875 {
5876  pStats->size = 0;
5877  pStats->unusedSize = 0;
5878  pStats->allocationCount = 0;
5879  pStats->unusedRangeCount = 0;
5880  pStats->unusedRangeSizeMax = 0;
5881 
5882  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5883 
5884  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5885  {
5886  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5887  VMA_ASSERT(pBlock);
5888  VMA_HEAVY_ASSERT(pBlock->Validate());
5889  pBlock->m_Metadata.AddPoolStats(*pStats);
5890  }
5891 }
5892 
5893 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5894 
5895 VkResult VmaBlockVector::Allocate(
5896  VmaPool hCurrentPool,
5897  uint32_t currentFrameIndex,
5898  const VkMemoryRequirements& vkMemReq,
5899  const VmaAllocationCreateInfo& createInfo,
5900  VmaSuballocationType suballocType,
5901  VmaAllocation* pAllocation)
5902 {
5903  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
5904  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
5905 
5906  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5907 
5908  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5909  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5910  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5911  {
5912  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5913  VMA_ASSERT(pCurrBlock);
5914  VmaAllocationRequest currRequest = {};
5915  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5916  currentFrameIndex,
5917  m_FrameInUseCount,
5918  m_BufferImageGranularity,
5919  vkMemReq.size,
5920  vkMemReq.alignment,
5921  suballocType,
5922  false, // canMakeOtherLost
5923  &currRequest))
5924  {
5925  // Allocate from pCurrBlock.
5926  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5927 
5928  if(mapped)
5929  {
5930  VkResult res = pCurrBlock->Map(m_hAllocator, nullptr);
5931  if(res != VK_SUCCESS)
5932  {
5933  return res;
5934  }
5935  }
5936 
5937  // We no longer have an empty Allocation.
5938  if(pCurrBlock->m_Metadata.IsEmpty())
5939  {
5940  m_HasEmptyBlock = false;
5941  }
5942 
5943  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5944  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5945  (*pAllocation)->InitBlockAllocation(
5946  hCurrentPool,
5947  pCurrBlock,
5948  currRequest.offset,
5949  vkMemReq.alignment,
5950  vkMemReq.size,
5951  suballocType,
5952  mapped,
5953  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5954  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5955  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5956  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5957  return VK_SUCCESS;
5958  }
5959  }
5960 
5961  const bool canCreateNewBlock =
5962  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5963  (m_Blocks.size() < m_MaxBlockCount);
5964 
5965  // 2. Try to create new block.
5966  if(canCreateNewBlock)
5967  {
5968  // 2.1. Start with full preferredBlockSize.
5969  VkDeviceSize blockSize = m_PreferredBlockSize;
5970  size_t newBlockIndex = 0;
5971  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5972  // Allocating blocks of other sizes is allowed only in default pools.
5973  // In custom pools block size is fixed.
5974  if(res < 0 && m_IsCustomPool == false)
5975  {
5976  // 2.2. Try half the size.
5977  blockSize /= 2;
5978  if(blockSize >= vkMemReq.size)
5979  {
5980  res = CreateBlock(blockSize, &newBlockIndex);
5981  if(res < 0)
5982  {
5983  // 2.3. Try quarter the size.
5984  blockSize /= 2;
5985  if(blockSize >= vkMemReq.size)
5986  {
5987  res = CreateBlock(blockSize, &newBlockIndex);
5988  }
5989  }
5990  }
5991  }
5992  if(res == VK_SUCCESS)
5993  {
5994  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5995  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
5996 
5997  if(mapped)
5998  {
5999  res = pBlock->Map(m_hAllocator, nullptr);
6000  if(res != VK_SUCCESS)
6001  {
6002  return res;
6003  }
6004  }
6005 
6006  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6007  VmaAllocationRequest allocRequest;
6008  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6009  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6010  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6011  (*pAllocation)->InitBlockAllocation(
6012  hCurrentPool,
6013  pBlock,
6014  allocRequest.offset,
6015  vkMemReq.alignment,
6016  vkMemReq.size,
6017  suballocType,
6018  mapped,
6019  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6020  VMA_HEAVY_ASSERT(pBlock->Validate());
6021  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6022  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6023  return VK_SUCCESS;
6024  }
6025  }
6026 
6027  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6028 
6029  // 3. Try to allocate from existing blocks with making other allocations lost.
6030  if(canMakeOtherLost)
6031  {
6032  uint32_t tryIndex = 0;
6033  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6034  {
6035  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6036  VmaAllocationRequest bestRequest = {};
6037  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6038 
6039  // 1. Search existing allocations.
6040  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6041  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6042  {
6043  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6044  VMA_ASSERT(pCurrBlock);
6045  VmaAllocationRequest currRequest = {};
6046  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6047  currentFrameIndex,
6048  m_FrameInUseCount,
6049  m_BufferImageGranularity,
6050  vkMemReq.size,
6051  vkMemReq.alignment,
6052  suballocType,
6053  canMakeOtherLost,
6054  &currRequest))
6055  {
6056  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6057  if(pBestRequestBlock == VMA_NULL ||
6058  currRequestCost < bestRequestCost)
6059  {
6060  pBestRequestBlock = pCurrBlock;
6061  bestRequest = currRequest;
6062  bestRequestCost = currRequestCost;
6063 
6064  if(bestRequestCost == 0)
6065  {
6066  break;
6067  }
6068  }
6069  }
6070  }
6071 
6072  if(pBestRequestBlock != VMA_NULL)
6073  {
6074  if(mapped)
6075  {
6076  VkResult res = pBestRequestBlock->Map(m_hAllocator, nullptr);
6077  if(res != VK_SUCCESS)
6078  {
6079  return res;
6080  }
6081  }
6082 
6083  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6084  currentFrameIndex,
6085  m_FrameInUseCount,
6086  &bestRequest))
6087  {
6088  // We no longer have an empty Allocation.
6089  if(pBestRequestBlock->m_Metadata.IsEmpty())
6090  {
6091  m_HasEmptyBlock = false;
6092  }
6093  // Allocate from this pBlock.
6094  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6095  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6096  (*pAllocation)->InitBlockAllocation(
6097  hCurrentPool,
6098  pBestRequestBlock,
6099  bestRequest.offset,
6100  vkMemReq.alignment,
6101  vkMemReq.size,
6102  suballocType,
6103  mapped,
6104  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6105  VMA_HEAVY_ASSERT(pBlock->Validate());
6106  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6107  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6108  return VK_SUCCESS;
6109  }
6110  // else: Some allocations must have been touched while we are here. Next try.
6111  }
6112  else
6113  {
6114  // Could not find place in any of the blocks - break outer loop.
6115  break;
6116  }
6117  }
6118  /* Maximum number of tries exceeded - a very unlike event when many other
6119  threads are simultaneously touching allocations making it impossible to make
6120  lost at the same time as we try to allocate. */
6121  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6122  {
6123  return VK_ERROR_TOO_MANY_OBJECTS;
6124  }
6125  }
6126 
6127  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6128 }
6129 
6130 void VmaBlockVector::Free(
6131  VmaAllocation hAllocation)
6132 {
6133  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6134 
6135  // Scope for lock.
6136  {
6137  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6138 
6139  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6140 
6141  if(hAllocation->IsPersistentMap())
6142  {
6143  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory);
6144  }
6145 
6146  pBlock->m_Metadata.Free(hAllocation);
6147  VMA_HEAVY_ASSERT(pBlock->Validate());
6148 
6149  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6150 
6151  // pBlock became empty after this deallocation.
6152  if(pBlock->m_Metadata.IsEmpty())
6153  {
6154  // Already has empty Allocation. We don't want to have two, so delete this one.
6155  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6156  {
6157  pBlockToDelete = pBlock;
6158  Remove(pBlock);
6159  }
6160  // We now have first empty Allocation.
6161  else
6162  {
6163  m_HasEmptyBlock = true;
6164  }
6165  }
6166  // pBlock didn't become empty, but we have another empty block - find and free that one.
6167  // (This is optional, heuristics.)
6168  else if(m_HasEmptyBlock)
6169  {
6170  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6171  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6172  {
6173  pBlockToDelete = pLastBlock;
6174  m_Blocks.pop_back();
6175  m_HasEmptyBlock = false;
6176  }
6177  }
6178 
6179  IncrementallySortBlocks();
6180  }
6181 
6182  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6183  // lock, for performance reason.
6184  if(pBlockToDelete != VMA_NULL)
6185  {
6186  VMA_DEBUG_LOG(" Deleted empty allocation");
6187  pBlockToDelete->Destroy(m_hAllocator);
6188  vma_delete(m_hAllocator, pBlockToDelete);
6189  }
6190 }
6191 
6192 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6193 {
6194  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6195  {
6196  if(m_Blocks[blockIndex] == pBlock)
6197  {
6198  VmaVectorRemove(m_Blocks, blockIndex);
6199  return;
6200  }
6201  }
6202  VMA_ASSERT(0);
6203 }
6204 
6205 void VmaBlockVector::IncrementallySortBlocks()
6206 {
6207  // Bubble sort only until first swap.
6208  for(size_t i = 1; i < m_Blocks.size(); ++i)
6209  {
6210  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6211  {
6212  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6213  return;
6214  }
6215  }
6216 }
6217 
6218 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6219 {
6220  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6221  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6222  allocInfo.allocationSize = blockSize;
6223  VkDeviceMemory mem = VK_NULL_HANDLE;
6224  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6225  if(res < 0)
6226  {
6227  return res;
6228  }
6229 
6230  // New VkDeviceMemory successfully created.
6231 
6232  // Create new Allocation for it.
6233  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6234  pBlock->Init(
6235  m_MemoryTypeIndex,
6236  mem,
6237  allocInfo.allocationSize);
6238 
6239  m_Blocks.push_back(pBlock);
6240  if(pNewBlockIndex != VMA_NULL)
6241  {
6242  *pNewBlockIndex = m_Blocks.size() - 1;
6243  }
6244 
6245  return VK_SUCCESS;
6246 }
6247 
6248 #if VMA_STATS_STRING_ENABLED
6249 
6250 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6251 {
6252  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6253 
6254  json.BeginObject();
6255 
6256  if(m_IsCustomPool)
6257  {
6258  json.WriteString("MemoryTypeIndex");
6259  json.WriteNumber(m_MemoryTypeIndex);
6260 
6261  json.WriteString("BlockSize");
6262  json.WriteNumber(m_PreferredBlockSize);
6263 
6264  json.WriteString("BlockCount");
6265  json.BeginObject(true);
6266  if(m_MinBlockCount > 0)
6267  {
6268  json.WriteString("Min");
6269  json.WriteNumber(m_MinBlockCount);
6270  }
6271  if(m_MaxBlockCount < SIZE_MAX)
6272  {
6273  json.WriteString("Max");
6274  json.WriteNumber(m_MaxBlockCount);
6275  }
6276  json.WriteString("Cur");
6277  json.WriteNumber(m_Blocks.size());
6278  json.EndObject();
6279 
6280  if(m_FrameInUseCount > 0)
6281  {
6282  json.WriteString("FrameInUseCount");
6283  json.WriteNumber(m_FrameInUseCount);
6284  }
6285  }
6286  else
6287  {
6288  json.WriteString("PreferredBlockSize");
6289  json.WriteNumber(m_PreferredBlockSize);
6290  }
6291 
6292  json.WriteString("Blocks");
6293  json.BeginArray();
6294  for(size_t i = 0; i < m_Blocks.size(); ++i)
6295  {
6296  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6297  }
6298  json.EndArray();
6299 
6300  json.EndObject();
6301 }
6302 
6303 #endif // #if VMA_STATS_STRING_ENABLED
6304 
6305 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6306  VmaAllocator hAllocator,
6307  uint32_t currentFrameIndex)
6308 {
6309  if(m_pDefragmentator == VMA_NULL)
6310  {
6311  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6312  hAllocator,
6313  this,
6314  currentFrameIndex);
6315  }
6316 
6317  return m_pDefragmentator;
6318 }
6319 
6320 VkResult VmaBlockVector::Defragment(
6321  VmaDefragmentationStats* pDefragmentationStats,
6322  VkDeviceSize& maxBytesToMove,
6323  uint32_t& maxAllocationsToMove)
6324 {
6325  if(m_pDefragmentator == VMA_NULL)
6326  {
6327  return VK_SUCCESS;
6328  }
6329 
6330  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6331 
6332  // Defragment.
6333  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6334 
6335  // Accumulate statistics.
6336  if(pDefragmentationStats != VMA_NULL)
6337  {
6338  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6339  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6340  pDefragmentationStats->bytesMoved += bytesMoved;
6341  pDefragmentationStats->allocationsMoved += allocationsMoved;
6342  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6343  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6344  maxBytesToMove -= bytesMoved;
6345  maxAllocationsToMove -= allocationsMoved;
6346  }
6347 
6348  // Free empty blocks.
6349  m_HasEmptyBlock = false;
6350  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6351  {
6352  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6353  if(pBlock->m_Metadata.IsEmpty())
6354  {
6355  if(m_Blocks.size() > m_MinBlockCount)
6356  {
6357  if(pDefragmentationStats != VMA_NULL)
6358  {
6359  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6360  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6361  }
6362 
6363  VmaVectorRemove(m_Blocks, blockIndex);
6364  pBlock->Destroy(m_hAllocator);
6365  vma_delete(m_hAllocator, pBlock);
6366  }
6367  else
6368  {
6369  m_HasEmptyBlock = true;
6370  }
6371  }
6372  }
6373 
6374  return result;
6375 }
6376 
6377 void VmaBlockVector::DestroyDefragmentator()
6378 {
6379  if(m_pDefragmentator != VMA_NULL)
6380  {
6381  vma_delete(m_hAllocator, m_pDefragmentator);
6382  m_pDefragmentator = VMA_NULL;
6383  }
6384 }
6385 
6386 void VmaBlockVector::MakePoolAllocationsLost(
6387  uint32_t currentFrameIndex,
6388  size_t* pLostAllocationCount)
6389 {
6390  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6391 
6392  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6393  {
6394  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6395  VMA_ASSERT(pBlock);
6396  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6397  }
6398 }
6399 
6400 void VmaBlockVector::AddStats(VmaStats* pStats)
6401 {
6402  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6403  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6404 
6405  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6406 
6407  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6408  {
6409  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6410  VMA_ASSERT(pBlock);
6411  VMA_HEAVY_ASSERT(pBlock->Validate());
6412  VmaStatInfo allocationStatInfo;
6413  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6414  VmaAddStatInfo(pStats->total, allocationStatInfo);
6415  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6416  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6417  }
6418 }
6419 
6421 // VmaDefragmentator members definition
6422 
6423 VmaDefragmentator::VmaDefragmentator(
6424  VmaAllocator hAllocator,
6425  VmaBlockVector* pBlockVector,
6426  uint32_t currentFrameIndex) :
6427  m_hAllocator(hAllocator),
6428  m_pBlockVector(pBlockVector),
6429  m_CurrentFrameIndex(currentFrameIndex),
6430  m_BytesMoved(0),
6431  m_AllocationsMoved(0),
6432  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6433  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6434 {
6435 }
6436 
6437 VmaDefragmentator::~VmaDefragmentator()
6438 {
6439  for(size_t i = m_Blocks.size(); i--; )
6440  {
6441  vma_delete(m_hAllocator, m_Blocks[i]);
6442  }
6443 }
6444 
6445 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6446 {
6447  AllocationInfo allocInfo;
6448  allocInfo.m_hAllocation = hAlloc;
6449  allocInfo.m_pChanged = pChanged;
6450  m_Allocations.push_back(allocInfo);
6451 }
6452 
6453 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6454 {
6455  // It has already been mapped for defragmentation.
6456  if(m_pMappedDataForDefragmentation)
6457  {
6458  *ppMappedData = m_pMappedDataForDefragmentation;
6459  return VK_SUCCESS;
6460  }
6461 
6462  // It is originally mapped.
6463  if(m_pBlock->m_Mapping.GetMappedData())
6464  {
6465  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6466  return VK_SUCCESS;
6467  }
6468 
6469  // Map on first usage.
6470  VkResult res = m_pBlock->Map(hAllocator, &m_pMappedDataForDefragmentation);
6471  *ppMappedData = m_pMappedDataForDefragmentation;
6472  return res;
6473 }
6474 
6475 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6476 {
6477  if(m_pMappedDataForDefragmentation != VMA_NULL)
6478  {
6479  m_pBlock->Unmap(hAllocator);
6480  }
6481 }
6482 
6483 VkResult VmaDefragmentator::DefragmentRound(
6484  VkDeviceSize maxBytesToMove,
6485  uint32_t maxAllocationsToMove)
6486 {
6487  if(m_Blocks.empty())
6488  {
6489  return VK_SUCCESS;
6490  }
6491 
6492  size_t srcBlockIndex = m_Blocks.size() - 1;
6493  size_t srcAllocIndex = SIZE_MAX;
6494  for(;;)
6495  {
6496  // 1. Find next allocation to move.
6497  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6498  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6499  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6500  {
6501  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6502  {
6503  // Finished: no more allocations to process.
6504  if(srcBlockIndex == 0)
6505  {
6506  return VK_SUCCESS;
6507  }
6508  else
6509  {
6510  --srcBlockIndex;
6511  srcAllocIndex = SIZE_MAX;
6512  }
6513  }
6514  else
6515  {
6516  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6517  }
6518  }
6519 
6520  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6521  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6522 
6523  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6524  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6525  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6526  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6527 
6528  // 2. Try to find new place for this allocation in preceding or current block.
6529  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6530  {
6531  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6532  VmaAllocationRequest dstAllocRequest;
6533  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6534  m_CurrentFrameIndex,
6535  m_pBlockVector->GetFrameInUseCount(),
6536  m_pBlockVector->GetBufferImageGranularity(),
6537  size,
6538  alignment,
6539  suballocType,
6540  false, // canMakeOtherLost
6541  &dstAllocRequest) &&
6542  MoveMakesSense(
6543  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6544  {
6545  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6546 
6547  // Reached limit on number of allocations or bytes to move.
6548  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6549  (m_BytesMoved + size > maxBytesToMove))
6550  {
6551  return VK_INCOMPLETE;
6552  }
6553 
6554  void* pDstMappedData = VMA_NULL;
6555  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6556  if(res != VK_SUCCESS)
6557  {
6558  return res;
6559  }
6560 
6561  void* pSrcMappedData = VMA_NULL;
6562  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6563  if(res != VK_SUCCESS)
6564  {
6565  return res;
6566  }
6567 
6568  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6569  memcpy(
6570  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6571  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6572  static_cast<size_t>(size));
6573 
6574  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6575  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6576 
6577  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6578 
6579  if(allocInfo.m_pChanged != VMA_NULL)
6580  {
6581  *allocInfo.m_pChanged = VK_TRUE;
6582  }
6583 
6584  ++m_AllocationsMoved;
6585  m_BytesMoved += size;
6586 
6587  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6588 
6589  break;
6590  }
6591  }
6592 
6593  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6594 
6595  if(srcAllocIndex > 0)
6596  {
6597  --srcAllocIndex;
6598  }
6599  else
6600  {
6601  if(srcBlockIndex > 0)
6602  {
6603  --srcBlockIndex;
6604  srcAllocIndex = SIZE_MAX;
6605  }
6606  else
6607  {
6608  return VK_SUCCESS;
6609  }
6610  }
6611  }
6612 }
6613 
6614 VkResult VmaDefragmentator::Defragment(
6615  VkDeviceSize maxBytesToMove,
6616  uint32_t maxAllocationsToMove)
6617 {
6618  if(m_Allocations.empty())
6619  {
6620  return VK_SUCCESS;
6621  }
6622 
6623  // Create block info for each block.
6624  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6625  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6626  {
6627  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6628  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6629  m_Blocks.push_back(pBlockInfo);
6630  }
6631 
6632  // Sort them by m_pBlock pointer value.
6633  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6634 
6635  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6636  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6637  {
6638  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6639  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6640  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6641  {
6642  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6643  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6644  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6645  {
6646  (*it)->m_Allocations.push_back(allocInfo);
6647  }
6648  else
6649  {
6650  VMA_ASSERT(0);
6651  }
6652  }
6653  }
6654  m_Allocations.clear();
6655 
6656  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6657  {
6658  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6659  pBlockInfo->CalcHasNonMovableAllocations();
6660  pBlockInfo->SortAllocationsBySizeDescecnding();
6661  }
6662 
6663  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6664  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6665 
6666  // Execute defragmentation rounds (the main part).
6667  VkResult result = VK_SUCCESS;
6668  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6669  {
6670  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6671  }
6672 
6673  // Unmap blocks that were mapped for defragmentation.
6674  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6675  {
6676  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6677  }
6678 
6679  return result;
6680 }
6681 
6682 bool VmaDefragmentator::MoveMakesSense(
6683  size_t dstBlockIndex, VkDeviceSize dstOffset,
6684  size_t srcBlockIndex, VkDeviceSize srcOffset)
6685 {
6686  if(dstBlockIndex < srcBlockIndex)
6687  {
6688  return true;
6689  }
6690  if(dstBlockIndex > srcBlockIndex)
6691  {
6692  return false;
6693  }
6694  if(dstOffset < srcOffset)
6695  {
6696  return true;
6697  }
6698  return false;
6699 }
6700 
6702 // VmaAllocator_T
6703 
6704 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6705  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6706  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6707  m_PhysicalDevice(pCreateInfo->physicalDevice),
6708  m_hDevice(pCreateInfo->device),
6709  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6710  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6711  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6712  m_PreferredLargeHeapBlockSize(0),
6713  m_PreferredSmallHeapBlockSize(0),
6714  m_CurrentFrameIndex(0),
6715  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6716 {
6717  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6718 
6719  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6720  memset(&m_MemProps, 0, sizeof(m_MemProps));
6721  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6722 
6723  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6724  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6725 
6726  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6727  {
6728  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6729  }
6730 
6731  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6732  {
6733  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6734  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6735  }
6736 
6737  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6738 
6739  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6740  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6741 
6742  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6743  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6744  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6745  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6746 
6747  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6748  {
6749  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6750  {
6751  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6752  if(limit != VK_WHOLE_SIZE)
6753  {
6754  m_HeapSizeLimit[heapIndex] = limit;
6755  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6756  {
6757  m_MemProps.memoryHeaps[heapIndex].size = limit;
6758  }
6759  }
6760  }
6761  }
6762 
6763  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6764  {
6765  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6766 
6767  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
6768  this,
6769  memTypeIndex,
6770  preferredBlockSize,
6771  0,
6772  SIZE_MAX,
6773  GetBufferImageGranularity(),
6774  pCreateInfo->frameInUseCount,
6775  false); // isCustomPool
6776  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6777  // becase minBlockCount is 0.
6778  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6779  }
6780 }
6781 
6782 VmaAllocator_T::~VmaAllocator_T()
6783 {
6784  VMA_ASSERT(m_Pools.empty());
6785 
6786  for(size_t i = GetMemoryTypeCount(); i--; )
6787  {
6788  vma_delete(this, m_pDedicatedAllocations[i]);
6789  vma_delete(this, m_pBlockVectors[i]);
6790  }
6791 }
6792 
6793 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6794 {
6795 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6796  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6797  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6798  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6799  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6800  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6801  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6802  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6803  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6804  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6805  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6806  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6807  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6808  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6809  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6810  if(m_UseKhrDedicatedAllocation)
6811  {
6812  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
6813  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
6814  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
6815  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
6816  }
6817 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6818 
6819 #define VMA_COPY_IF_NOT_NULL(funcName) \
6820  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6821 
6822  if(pVulkanFunctions != VMA_NULL)
6823  {
6824  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6825  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6826  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6827  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6828  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6829  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6830  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6831  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6832  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6833  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6834  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6835  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6836  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6837  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6838  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6839  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6840  }
6841 
6842 #undef VMA_COPY_IF_NOT_NULL
6843 
6844  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6845  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6846  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6847  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6848  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6849  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6850  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6851  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6852  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6853  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6854  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6855  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6856  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6857  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6858  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6859  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6860  if(m_UseKhrDedicatedAllocation)
6861  {
6862  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6863  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6864  }
6865 }
6866 
6867 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6868 {
6869  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6870  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6871  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6872  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6873 }
6874 
6875 VkResult VmaAllocator_T::AllocateMemoryOfType(
6876  const VkMemoryRequirements& vkMemReq,
6877  bool dedicatedAllocation,
6878  VkBuffer dedicatedBuffer,
6879  VkImage dedicatedImage,
6880  const VmaAllocationCreateInfo& createInfo,
6881  uint32_t memTypeIndex,
6882  VmaSuballocationType suballocType,
6883  VmaAllocation* pAllocation)
6884 {
6885  VMA_ASSERT(pAllocation != VMA_NULL);
6886  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6887 
6888  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6889 
6890  // If memory type is not HOST_VISIBLE, disable MAPPED.
6891  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6892  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6893  {
6894  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
6895  }
6896 
6897  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
6898  VMA_ASSERT(blockVector);
6899 
6900  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6901  bool preferDedicatedMemory =
6902  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6903  dedicatedAllocation ||
6904  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6905  vkMemReq.size > preferredBlockSize / 2;
6906 
6907  if(preferDedicatedMemory &&
6908  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6909  finalCreateInfo.pool == VK_NULL_HANDLE)
6910  {
6912  }
6913 
6914  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6915  {
6916  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6917  {
6918  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6919  }
6920  else
6921  {
6922  return AllocateDedicatedMemory(
6923  vkMemReq.size,
6924  suballocType,
6925  memTypeIndex,
6926  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6927  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6928  finalCreateInfo.pUserData,
6929  dedicatedBuffer,
6930  dedicatedImage,
6931  pAllocation);
6932  }
6933  }
6934  else
6935  {
6936  VkResult res = blockVector->Allocate(
6937  VK_NULL_HANDLE, // hCurrentPool
6938  m_CurrentFrameIndex.load(),
6939  vkMemReq,
6940  finalCreateInfo,
6941  suballocType,
6942  pAllocation);
6943  if(res == VK_SUCCESS)
6944  {
6945  return res;
6946  }
6947 
6948  // 5. Try dedicated memory.
6949  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6950  {
6951  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6952  }
6953  else
6954  {
6955  res = AllocateDedicatedMemory(
6956  vkMemReq.size,
6957  suballocType,
6958  memTypeIndex,
6959  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6960  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6961  finalCreateInfo.pUserData,
6962  dedicatedBuffer,
6963  dedicatedImage,
6964  pAllocation);
6965  if(res == VK_SUCCESS)
6966  {
6967  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6968  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6969  return VK_SUCCESS;
6970  }
6971  else
6972  {
6973  // Everything failed: Return error code.
6974  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6975  return res;
6976  }
6977  }
6978  }
6979 }
6980 
6981 VkResult VmaAllocator_T::AllocateDedicatedMemory(
6982  VkDeviceSize size,
6983  VmaSuballocationType suballocType,
6984  uint32_t memTypeIndex,
6985  bool map,
6986  bool isUserDataString,
6987  void* pUserData,
6988  VkBuffer dedicatedBuffer,
6989  VkImage dedicatedImage,
6990  VmaAllocation* pAllocation)
6991 {
6992  VMA_ASSERT(pAllocation);
6993 
6994  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6995  allocInfo.memoryTypeIndex = memTypeIndex;
6996  allocInfo.allocationSize = size;
6997 
6998  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
6999  if(m_UseKhrDedicatedAllocation)
7000  {
7001  if(dedicatedBuffer != VK_NULL_HANDLE)
7002  {
7003  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7004  dedicatedAllocInfo.buffer = dedicatedBuffer;
7005  allocInfo.pNext = &dedicatedAllocInfo;
7006  }
7007  else if(dedicatedImage != VK_NULL_HANDLE)
7008  {
7009  dedicatedAllocInfo.image = dedicatedImage;
7010  allocInfo.pNext = &dedicatedAllocInfo;
7011  }
7012  }
7013 
7014  // Allocate VkDeviceMemory.
7015  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7016  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7017  if(res < 0)
7018  {
7019  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7020  return res;
7021  }
7022 
7023  void* pMappedData = nullptr;
7024  if(map)
7025  {
7026  res = (*m_VulkanFunctions.vkMapMemory)(
7027  m_hDevice,
7028  hMemory,
7029  0,
7030  VK_WHOLE_SIZE,
7031  0,
7032  &pMappedData);
7033  if(res < 0)
7034  {
7035  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7036  FreeVulkanMemory(memTypeIndex, size, hMemory);
7037  return res;
7038  }
7039  }
7040 
7041  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7042  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7043  (*pAllocation)->SetUserData(this, pUserData);
7044 
7045  // Register it in m_pDedicatedAllocations.
7046  {
7047  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7048  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7049  VMA_ASSERT(pDedicatedAllocations);
7050  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7051  }
7052 
7053  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7054 
7055  return VK_SUCCESS;
7056 }
7057 
7058 void VmaAllocator_T::GetBufferMemoryRequirements(
7059  VkBuffer hBuffer,
7060  VkMemoryRequirements& memReq,
7061  bool& requiresDedicatedAllocation,
7062  bool& prefersDedicatedAllocation) const
7063 {
7064  if(m_UseKhrDedicatedAllocation)
7065  {
7066  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7067  memReqInfo.buffer = hBuffer;
7068 
7069  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7070 
7071  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7072  memReq2.pNext = &memDedicatedReq;
7073 
7074  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7075 
7076  memReq = memReq2.memoryRequirements;
7077  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7078  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7079  }
7080  else
7081  {
7082  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7083  requiresDedicatedAllocation = false;
7084  prefersDedicatedAllocation = false;
7085  }
7086 }
7087 
7088 void VmaAllocator_T::GetImageMemoryRequirements(
7089  VkImage hImage,
7090  VkMemoryRequirements& memReq,
7091  bool& requiresDedicatedAllocation,
7092  bool& prefersDedicatedAllocation) const
7093 {
7094  if(m_UseKhrDedicatedAllocation)
7095  {
7096  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7097  memReqInfo.image = hImage;
7098 
7099  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7100 
7101  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7102  memReq2.pNext = &memDedicatedReq;
7103 
7104  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7105 
7106  memReq = memReq2.memoryRequirements;
7107  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7108  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7109  }
7110  else
7111  {
7112  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7113  requiresDedicatedAllocation = false;
7114  prefersDedicatedAllocation = false;
7115  }
7116 }
7117 
7118 VkResult VmaAllocator_T::AllocateMemory(
7119  const VkMemoryRequirements& vkMemReq,
7120  bool requiresDedicatedAllocation,
7121  bool prefersDedicatedAllocation,
7122  VkBuffer dedicatedBuffer,
7123  VkImage dedicatedImage,
7124  const VmaAllocationCreateInfo& createInfo,
7125  VmaSuballocationType suballocType,
7126  VmaAllocation* pAllocation)
7127 {
7128  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7129  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7130  {
7131  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7132  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7133  }
7134  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7136  {
7137  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7138  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7139  }
7140  if(requiresDedicatedAllocation)
7141  {
7142  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7143  {
7144  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7145  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7146  }
7147  if(createInfo.pool != VK_NULL_HANDLE)
7148  {
7149  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7150  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7151  }
7152  }
7153  if((createInfo.pool != VK_NULL_HANDLE) &&
7154  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7155  {
7156  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7157  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7158  }
7159 
7160  if(createInfo.pool != VK_NULL_HANDLE)
7161  {
7162  return createInfo.pool->m_BlockVector.Allocate(
7163  createInfo.pool,
7164  m_CurrentFrameIndex.load(),
7165  vkMemReq,
7166  createInfo,
7167  suballocType,
7168  pAllocation);
7169  }
7170  else
7171  {
7172  // Bit mask of memory Vulkan types acceptable for this allocation.
7173  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7174  uint32_t memTypeIndex = UINT32_MAX;
7175  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7176  if(res == VK_SUCCESS)
7177  {
7178  res = AllocateMemoryOfType(
7179  vkMemReq,
7180  requiresDedicatedAllocation || prefersDedicatedAllocation,
7181  dedicatedBuffer,
7182  dedicatedImage,
7183  createInfo,
7184  memTypeIndex,
7185  suballocType,
7186  pAllocation);
7187  // Succeeded on first try.
7188  if(res == VK_SUCCESS)
7189  {
7190  return res;
7191  }
7192  // Allocation from this memory type failed. Try other compatible memory types.
7193  else
7194  {
7195  for(;;)
7196  {
7197  // Remove old memTypeIndex from list of possibilities.
7198  memoryTypeBits &= ~(1u << memTypeIndex);
7199  // Find alternative memTypeIndex.
7200  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7201  if(res == VK_SUCCESS)
7202  {
7203  res = AllocateMemoryOfType(
7204  vkMemReq,
7205  requiresDedicatedAllocation || prefersDedicatedAllocation,
7206  dedicatedBuffer,
7207  dedicatedImage,
7208  createInfo,
7209  memTypeIndex,
7210  suballocType,
7211  pAllocation);
7212  // Allocation from this alternative memory type succeeded.
7213  if(res == VK_SUCCESS)
7214  {
7215  return res;
7216  }
7217  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7218  }
7219  // No other matching memory type index could be found.
7220  else
7221  {
7222  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7223  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7224  }
7225  }
7226  }
7227  }
7228  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7229  else
7230  return res;
7231  }
7232 }
7233 
7234 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7235 {
7236  VMA_ASSERT(allocation);
7237 
7238  if(allocation->CanBecomeLost() == false ||
7239  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7240  {
7241  switch(allocation->GetType())
7242  {
7243  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7244  {
7245  VmaBlockVector* pBlockVector = VMA_NULL;
7246  VmaPool hPool = allocation->GetPool();
7247  if(hPool != VK_NULL_HANDLE)
7248  {
7249  pBlockVector = &hPool->m_BlockVector;
7250  }
7251  else
7252  {
7253  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7254  pBlockVector = m_pBlockVectors[memTypeIndex];
7255  }
7256  pBlockVector->Free(allocation);
7257  }
7258  break;
7259  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7260  FreeDedicatedMemory(allocation);
7261  break;
7262  default:
7263  VMA_ASSERT(0);
7264  }
7265  }
7266 
7267  allocation->SetUserData(this, VMA_NULL);
7268  vma_delete(this, allocation);
7269 }
7270 
7271 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7272 {
7273  // Initialize.
7274  InitStatInfo(pStats->total);
7275  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7276  InitStatInfo(pStats->memoryType[i]);
7277  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7278  InitStatInfo(pStats->memoryHeap[i]);
7279 
7280  // Process default pools.
7281  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7282  {
7283  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7284  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7285  VMA_ASSERT(pBlockVector);
7286  pBlockVector->AddStats(pStats);
7287  }
7288 
7289  // Process custom pools.
7290  {
7291  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7292  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7293  {
7294  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7295  }
7296  }
7297 
7298  // Process dedicated allocations.
7299  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7300  {
7301  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7302  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7303  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7304  VMA_ASSERT(pDedicatedAllocVector);
7305  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7306  {
7307  VmaStatInfo allocationStatInfo;
7308  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7309  VmaAddStatInfo(pStats->total, allocationStatInfo);
7310  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7311  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7312  }
7313  }
7314 
7315  // Postprocess.
7316  VmaPostprocessCalcStatInfo(pStats->total);
7317  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7318  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7319  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7320  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7321 }
7322 
7323 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7324 
7325 VkResult VmaAllocator_T::Defragment(
7326  VmaAllocation* pAllocations,
7327  size_t allocationCount,
7328  VkBool32* pAllocationsChanged,
7329  const VmaDefragmentationInfo* pDefragmentationInfo,
7330  VmaDefragmentationStats* pDefragmentationStats)
7331 {
7332  if(pAllocationsChanged != VMA_NULL)
7333  {
7334  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7335  }
7336  if(pDefragmentationStats != VMA_NULL)
7337  {
7338  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7339  }
7340 
7341  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7342 
7343  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7344 
7345  const size_t poolCount = m_Pools.size();
7346 
7347  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7348  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7349  {
7350  VmaAllocation hAlloc = pAllocations[allocIndex];
7351  VMA_ASSERT(hAlloc);
7352  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7353  // DedicatedAlloc cannot be defragmented.
7354  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7355  // Only HOST_VISIBLE memory types can be defragmented.
7356  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7357  // Lost allocation cannot be defragmented.
7358  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7359  {
7360  VmaBlockVector* pAllocBlockVector = nullptr;
7361 
7362  const VmaPool hAllocPool = hAlloc->GetPool();
7363  // This allocation belongs to custom pool.
7364  if(hAllocPool != VK_NULL_HANDLE)
7365  {
7366  pAllocBlockVector = &hAllocPool->GetBlockVector();
7367  }
7368  // This allocation belongs to general pool.
7369  else
7370  {
7371  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7372  }
7373 
7374  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7375 
7376  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7377  &pAllocationsChanged[allocIndex] : VMA_NULL;
7378  pDefragmentator->AddAllocation(hAlloc, pChanged);
7379  }
7380  }
7381 
7382  VkResult result = VK_SUCCESS;
7383 
7384  // ======== Main processing.
7385 
7386  VkDeviceSize maxBytesToMove = SIZE_MAX;
7387  uint32_t maxAllocationsToMove = UINT32_MAX;
7388  if(pDefragmentationInfo != VMA_NULL)
7389  {
7390  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7391  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7392  }
7393 
7394  // Process standard memory.
7395  for(uint32_t memTypeIndex = 0;
7396  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7397  ++memTypeIndex)
7398  {
7399  // Only HOST_VISIBLE memory types can be defragmented.
7400  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7401  {
7402  result = m_pBlockVectors[memTypeIndex]->Defragment(
7403  pDefragmentationStats,
7404  maxBytesToMove,
7405  maxAllocationsToMove);
7406  }
7407  }
7408 
7409  // Process custom pools.
7410  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7411  {
7412  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7413  pDefragmentationStats,
7414  maxBytesToMove,
7415  maxAllocationsToMove);
7416  }
7417 
7418  // ======== Destroy defragmentators.
7419 
7420  // Process custom pools.
7421  for(size_t poolIndex = poolCount; poolIndex--; )
7422  {
7423  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7424  }
7425 
7426  // Process standard memory.
7427  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7428  {
7429  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7430  {
7431  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7432  }
7433  }
7434 
7435  return result;
7436 }
7437 
7438 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7439 {
7440  if(hAllocation->CanBecomeLost())
7441  {
7442  /*
7443  Warning: This is a carefully designed algorithm.
7444  Do not modify unless you really know what you're doing :)
7445  */
7446  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7447  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7448  for(;;)
7449  {
7450  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7451  {
7452  pAllocationInfo->memoryType = UINT32_MAX;
7453  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7454  pAllocationInfo->offset = 0;
7455  pAllocationInfo->size = hAllocation->GetSize();
7456  pAllocationInfo->pMappedData = VMA_NULL;
7457  pAllocationInfo->pUserData = hAllocation->GetUserData();
7458  return;
7459  }
7460  else if(localLastUseFrameIndex == localCurrFrameIndex)
7461  {
7462  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7463  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7464  pAllocationInfo->offset = hAllocation->GetOffset();
7465  pAllocationInfo->size = hAllocation->GetSize();
7466  pAllocationInfo->pMappedData = VMA_NULL;
7467  pAllocationInfo->pUserData = hAllocation->GetUserData();
7468  return;
7469  }
7470  else // Last use time earlier than current time.
7471  {
7472  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7473  {
7474  localLastUseFrameIndex = localCurrFrameIndex;
7475  }
7476  }
7477  }
7478  }
7479  else
7480  {
7481  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7482  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7483  pAllocationInfo->offset = hAllocation->GetOffset();
7484  pAllocationInfo->size = hAllocation->GetSize();
7485  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7486  pAllocationInfo->pUserData = hAllocation->GetUserData();
7487  }
7488 }
7489 
7490 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7491 {
7492  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7493 
7494  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7495 
7496  if(newCreateInfo.maxBlockCount == 0)
7497  {
7498  newCreateInfo.maxBlockCount = SIZE_MAX;
7499  }
7500  if(newCreateInfo.blockSize == 0)
7501  {
7502  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7503  }
7504 
7505  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7506 
7507  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7508  if(res != VK_SUCCESS)
7509  {
7510  vma_delete(this, *pPool);
7511  *pPool = VMA_NULL;
7512  return res;
7513  }
7514 
7515  // Add to m_Pools.
7516  {
7517  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7518  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7519  }
7520 
7521  return VK_SUCCESS;
7522 }
7523 
7524 void VmaAllocator_T::DestroyPool(VmaPool pool)
7525 {
7526  // Remove from m_Pools.
7527  {
7528  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7529  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7530  VMA_ASSERT(success && "Pool not found in Allocator.");
7531  }
7532 
7533  vma_delete(this, pool);
7534 }
7535 
7536 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7537 {
7538  pool->m_BlockVector.GetPoolStats(pPoolStats);
7539 }
7540 
7541 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7542 {
7543  m_CurrentFrameIndex.store(frameIndex);
7544 }
7545 
7546 void VmaAllocator_T::MakePoolAllocationsLost(
7547  VmaPool hPool,
7548  size_t* pLostAllocationCount)
7549 {
7550  hPool->m_BlockVector.MakePoolAllocationsLost(
7551  m_CurrentFrameIndex.load(),
7552  pLostAllocationCount);
7553 }
7554 
7555 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7556 {
7557  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7558  (*pAllocation)->InitLost();
7559 }
7560 
7561 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7562 {
7563  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7564 
7565  VkResult res;
7566  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7567  {
7568  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7569  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7570  {
7571  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7572  if(res == VK_SUCCESS)
7573  {
7574  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7575  }
7576  }
7577  else
7578  {
7579  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7580  }
7581  }
7582  else
7583  {
7584  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7585  }
7586 
7587  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7588  {
7589  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7590  }
7591 
7592  return res;
7593 }
7594 
7595 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7596 {
7597  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7598  {
7599  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7600  }
7601 
7602  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7603 
7604  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7605  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7606  {
7607  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7608  m_HeapSizeLimit[heapIndex] += size;
7609  }
7610 }
7611 
7612 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7613 {
7614  if(hAllocation->CanBecomeLost())
7615  {
7616  return VK_ERROR_MEMORY_MAP_FAILED;
7617  }
7618 
7619  switch(hAllocation->GetType())
7620  {
7621  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7622  {
7623  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7624  char *pBytes = nullptr;
7625  VkResult res = pBlock->Map(this, (void**)&pBytes);
7626  if(res == VK_SUCCESS)
7627  {
7628  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7629  hAllocation->BlockAllocMap();
7630  }
7631  return res;
7632  }
7633  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7634  return hAllocation->DedicatedAllocMap(this, ppData);
7635  default:
7636  VMA_ASSERT(0);
7637  return VK_ERROR_MEMORY_MAP_FAILED;
7638  }
7639 }
7640 
7641 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7642 {
7643  switch(hAllocation->GetType())
7644  {
7645  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7646  {
7647  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7648  hAllocation->BlockAllocUnmap();
7649  pBlock->Unmap(this);
7650  }
7651  break;
7652  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7653  hAllocation->DedicatedAllocUnmap(this);
7654  break;
7655  default:
7656  VMA_ASSERT(0);
7657  }
7658 }
7659 
7660 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7661 {
7662  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7663 
7664  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7665  {
7666  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7667  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7668  VMA_ASSERT(pDedicatedAllocations);
7669  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7670  VMA_ASSERT(success);
7671  }
7672 
7673  VkDeviceMemory hMemory = allocation->GetMemory();
7674 
7675  if(allocation->GetMappedData() != VMA_NULL)
7676  {
7677  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7678  }
7679 
7680  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7681 
7682  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7683 }
7684 
7685 #if VMA_STATS_STRING_ENABLED
7686 
7687 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7688 {
7689  bool dedicatedAllocationsStarted = false;
7690  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7691  {
7692  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7693  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7694  VMA_ASSERT(pDedicatedAllocVector);
7695  if(pDedicatedAllocVector->empty() == false)
7696  {
7697  if(dedicatedAllocationsStarted == false)
7698  {
7699  dedicatedAllocationsStarted = true;
7700  json.WriteString("DedicatedAllocations");
7701  json.BeginObject();
7702  }
7703 
7704  json.BeginString("Type ");
7705  json.ContinueString(memTypeIndex);
7706  json.EndString();
7707 
7708  json.BeginArray();
7709 
7710  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7711  {
7712  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7713  json.BeginObject(true);
7714 
7715  json.WriteString("Type");
7716  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7717 
7718  json.WriteString("Size");
7719  json.WriteNumber(hAlloc->GetSize());
7720 
7721  const void* pUserData = hAlloc->GetUserData();
7722  if(pUserData != VMA_NULL)
7723  {
7724  json.WriteString("UserData");
7725  if(hAlloc->IsUserDataString())
7726  {
7727  json.WriteString((const char*)pUserData);
7728  }
7729  else
7730  {
7731  json.BeginString();
7732  json.ContinueString_Pointer(pUserData);
7733  json.EndString();
7734  }
7735  }
7736 
7737  json.EndObject();
7738  }
7739 
7740  json.EndArray();
7741  }
7742  }
7743  if(dedicatedAllocationsStarted)
7744  {
7745  json.EndObject();
7746  }
7747 
7748  {
7749  bool allocationsStarted = false;
7750  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7751  {
7752  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
7753  {
7754  if(allocationsStarted == false)
7755  {
7756  allocationsStarted = true;
7757  json.WriteString("DefaultPools");
7758  json.BeginObject();
7759  }
7760 
7761  json.BeginString("Type ");
7762  json.ContinueString(memTypeIndex);
7763  json.EndString();
7764 
7765  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
7766  }
7767  }
7768  if(allocationsStarted)
7769  {
7770  json.EndObject();
7771  }
7772  }
7773 
7774  {
7775  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7776  const size_t poolCount = m_Pools.size();
7777  if(poolCount > 0)
7778  {
7779  json.WriteString("Pools");
7780  json.BeginArray();
7781  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7782  {
7783  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7784  }
7785  json.EndArray();
7786  }
7787  }
7788 }
7789 
7790 #endif // #if VMA_STATS_STRING_ENABLED
7791 
7792 static VkResult AllocateMemoryForImage(
7793  VmaAllocator allocator,
7794  VkImage image,
7795  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7796  VmaSuballocationType suballocType,
7797  VmaAllocation* pAllocation)
7798 {
7799  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7800 
7801  VkMemoryRequirements vkMemReq = {};
7802  bool requiresDedicatedAllocation = false;
7803  bool prefersDedicatedAllocation = false;
7804  allocator->GetImageMemoryRequirements(image, vkMemReq,
7805  requiresDedicatedAllocation, prefersDedicatedAllocation);
7806 
7807  return allocator->AllocateMemory(
7808  vkMemReq,
7809  requiresDedicatedAllocation,
7810  prefersDedicatedAllocation,
7811  VK_NULL_HANDLE, // dedicatedBuffer
7812  image, // dedicatedImage
7813  *pAllocationCreateInfo,
7814  suballocType,
7815  pAllocation);
7816 }
7817 
7819 // Public interface
7820 
7821 VkResult vmaCreateAllocator(
7822  const VmaAllocatorCreateInfo* pCreateInfo,
7823  VmaAllocator* pAllocator)
7824 {
7825  VMA_ASSERT(pCreateInfo && pAllocator);
7826  VMA_DEBUG_LOG("vmaCreateAllocator");
7827  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7828  return VK_SUCCESS;
7829 }
7830 
7831 void vmaDestroyAllocator(
7832  VmaAllocator allocator)
7833 {
7834  if(allocator != VK_NULL_HANDLE)
7835  {
7836  VMA_DEBUG_LOG("vmaDestroyAllocator");
7837  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7838  vma_delete(&allocationCallbacks, allocator);
7839  }
7840 }
7841 
7843  VmaAllocator allocator,
7844  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7845 {
7846  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7847  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7848 }
7849 
7851  VmaAllocator allocator,
7852  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7853 {
7854  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7855  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7856 }
7857 
7859  VmaAllocator allocator,
7860  uint32_t memoryTypeIndex,
7861  VkMemoryPropertyFlags* pFlags)
7862 {
7863  VMA_ASSERT(allocator && pFlags);
7864  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7865  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7866 }
7867 
7869  VmaAllocator allocator,
7870  uint32_t frameIndex)
7871 {
7872  VMA_ASSERT(allocator);
7873  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7874 
7875  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7876 
7877  allocator->SetCurrentFrameIndex(frameIndex);
7878 }
7879 
7880 void vmaCalculateStats(
7881  VmaAllocator allocator,
7882  VmaStats* pStats)
7883 {
7884  VMA_ASSERT(allocator && pStats);
7885  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7886  allocator->CalculateStats(pStats);
7887 }
7888 
7889 #if VMA_STATS_STRING_ENABLED
7890 
7891 void vmaBuildStatsString(
7892  VmaAllocator allocator,
7893  char** ppStatsString,
7894  VkBool32 detailedMap)
7895 {
7896  VMA_ASSERT(allocator && ppStatsString);
7897  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7898 
7899  VmaStringBuilder sb(allocator);
7900  {
7901  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7902  json.BeginObject();
7903 
7904  VmaStats stats;
7905  allocator->CalculateStats(&stats);
7906 
7907  json.WriteString("Total");
7908  VmaPrintStatInfo(json, stats.total);
7909 
7910  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7911  {
7912  json.BeginString("Heap ");
7913  json.ContinueString(heapIndex);
7914  json.EndString();
7915  json.BeginObject();
7916 
7917  json.WriteString("Size");
7918  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7919 
7920  json.WriteString("Flags");
7921  json.BeginArray(true);
7922  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7923  {
7924  json.WriteString("DEVICE_LOCAL");
7925  }
7926  json.EndArray();
7927 
7928  if(stats.memoryHeap[heapIndex].blockCount > 0)
7929  {
7930  json.WriteString("Stats");
7931  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7932  }
7933 
7934  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7935  {
7936  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7937  {
7938  json.BeginString("Type ");
7939  json.ContinueString(typeIndex);
7940  json.EndString();
7941 
7942  json.BeginObject();
7943 
7944  json.WriteString("Flags");
7945  json.BeginArray(true);
7946  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7947  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7948  {
7949  json.WriteString("DEVICE_LOCAL");
7950  }
7951  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7952  {
7953  json.WriteString("HOST_VISIBLE");
7954  }
7955  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7956  {
7957  json.WriteString("HOST_COHERENT");
7958  }
7959  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7960  {
7961  json.WriteString("HOST_CACHED");
7962  }
7963  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7964  {
7965  json.WriteString("LAZILY_ALLOCATED");
7966  }
7967  json.EndArray();
7968 
7969  if(stats.memoryType[typeIndex].blockCount > 0)
7970  {
7971  json.WriteString("Stats");
7972  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7973  }
7974 
7975  json.EndObject();
7976  }
7977  }
7978 
7979  json.EndObject();
7980  }
7981  if(detailedMap == VK_TRUE)
7982  {
7983  allocator->PrintDetailedMap(json);
7984  }
7985 
7986  json.EndObject();
7987  }
7988 
7989  const size_t len = sb.GetLength();
7990  char* const pChars = vma_new_array(allocator, char, len + 1);
7991  if(len > 0)
7992  {
7993  memcpy(pChars, sb.GetData(), len);
7994  }
7995  pChars[len] = '\0';
7996  *ppStatsString = pChars;
7997 }
7998 
7999 void vmaFreeStatsString(
8000  VmaAllocator allocator,
8001  char* pStatsString)
8002 {
8003  if(pStatsString != VMA_NULL)
8004  {
8005  VMA_ASSERT(allocator);
8006  size_t len = strlen(pStatsString);
8007  vma_delete_array(allocator, pStatsString, len + 1);
8008  }
8009 }
8010 
8011 #endif // #if VMA_STATS_STRING_ENABLED
8012 
8013 /*
8014 This function is not protected by any mutex because it just reads immutable data.
8015 */
8016 VkResult vmaFindMemoryTypeIndex(
8017  VmaAllocator allocator,
8018  uint32_t memoryTypeBits,
8019  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8020  uint32_t* pMemoryTypeIndex)
8021 {
8022  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8023  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8024  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8025 
8026  if(pAllocationCreateInfo->memoryTypeBits != 0)
8027  {
8028  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8029  }
8030 
8031  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8032  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8033 
8034  // Convert usage to requiredFlags and preferredFlags.
8035  switch(pAllocationCreateInfo->usage)
8036  {
8038  break;
8040  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8041  break;
8043  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8044  break;
8046  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8047  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8048  break;
8050  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8051  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8052  break;
8053  default:
8054  break;
8055  }
8056 
8057  *pMemoryTypeIndex = UINT32_MAX;
8058  uint32_t minCost = UINT32_MAX;
8059  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8060  memTypeIndex < allocator->GetMemoryTypeCount();
8061  ++memTypeIndex, memTypeBit <<= 1)
8062  {
8063  // This memory type is acceptable according to memoryTypeBits bitmask.
8064  if((memTypeBit & memoryTypeBits) != 0)
8065  {
8066  const VkMemoryPropertyFlags currFlags =
8067  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8068  // This memory type contains requiredFlags.
8069  if((requiredFlags & ~currFlags) == 0)
8070  {
8071  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8072  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8073  // Remember memory type with lowest cost.
8074  if(currCost < minCost)
8075  {
8076  *pMemoryTypeIndex = memTypeIndex;
8077  if(currCost == 0)
8078  {
8079  return VK_SUCCESS;
8080  }
8081  minCost = currCost;
8082  }
8083  }
8084  }
8085  }
8086  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8087 }
8088 
8089 VkResult vmaCreatePool(
8090  VmaAllocator allocator,
8091  const VmaPoolCreateInfo* pCreateInfo,
8092  VmaPool* pPool)
8093 {
8094  VMA_ASSERT(allocator && pCreateInfo && pPool);
8095 
8096  VMA_DEBUG_LOG("vmaCreatePool");
8097 
8098  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8099 
8100  return allocator->CreatePool(pCreateInfo, pPool);
8101 }
8102 
8103 void vmaDestroyPool(
8104  VmaAllocator allocator,
8105  VmaPool pool)
8106 {
8107  VMA_ASSERT(allocator);
8108 
8109  if(pool == VK_NULL_HANDLE)
8110  {
8111  return;
8112  }
8113 
8114  VMA_DEBUG_LOG("vmaDestroyPool");
8115 
8116  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8117 
8118  allocator->DestroyPool(pool);
8119 }
8120 
8121 void vmaGetPoolStats(
8122  VmaAllocator allocator,
8123  VmaPool pool,
8124  VmaPoolStats* pPoolStats)
8125 {
8126  VMA_ASSERT(allocator && pool && pPoolStats);
8127 
8128  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8129 
8130  allocator->GetPoolStats(pool, pPoolStats);
8131 }
8132 
8134  VmaAllocator allocator,
8135  VmaPool pool,
8136  size_t* pLostAllocationCount)
8137 {
8138  VMA_ASSERT(allocator && pool);
8139 
8140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8141 
8142  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8143 }
8144 
8145 VkResult vmaAllocateMemory(
8146  VmaAllocator allocator,
8147  const VkMemoryRequirements* pVkMemoryRequirements,
8148  const VmaAllocationCreateInfo* pCreateInfo,
8149  VmaAllocation* pAllocation,
8150  VmaAllocationInfo* pAllocationInfo)
8151 {
8152  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8153 
8154  VMA_DEBUG_LOG("vmaAllocateMemory");
8155 
8156  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8157 
8158  VkResult result = allocator->AllocateMemory(
8159  *pVkMemoryRequirements,
8160  false, // requiresDedicatedAllocation
8161  false, // prefersDedicatedAllocation
8162  VK_NULL_HANDLE, // dedicatedBuffer
8163  VK_NULL_HANDLE, // dedicatedImage
8164  *pCreateInfo,
8165  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8166  pAllocation);
8167 
8168  if(pAllocationInfo && result == VK_SUCCESS)
8169  {
8170  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8171  }
8172 
8173  return result;
8174 }
8175 
8177  VmaAllocator allocator,
8178  VkBuffer buffer,
8179  const VmaAllocationCreateInfo* pCreateInfo,
8180  VmaAllocation* pAllocation,
8181  VmaAllocationInfo* pAllocationInfo)
8182 {
8183  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8184 
8185  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8186 
8187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8188 
8189  VkMemoryRequirements vkMemReq = {};
8190  bool requiresDedicatedAllocation = false;
8191  bool prefersDedicatedAllocation = false;
8192  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8193  requiresDedicatedAllocation,
8194  prefersDedicatedAllocation);
8195 
8196  VkResult result = allocator->AllocateMemory(
8197  vkMemReq,
8198  requiresDedicatedAllocation,
8199  prefersDedicatedAllocation,
8200  buffer, // dedicatedBuffer
8201  VK_NULL_HANDLE, // dedicatedImage
8202  *pCreateInfo,
8203  VMA_SUBALLOCATION_TYPE_BUFFER,
8204  pAllocation);
8205 
8206  if(pAllocationInfo && result == VK_SUCCESS)
8207  {
8208  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8209  }
8210 
8211  return result;
8212 }
8213 
8214 VkResult vmaAllocateMemoryForImage(
8215  VmaAllocator allocator,
8216  VkImage image,
8217  const VmaAllocationCreateInfo* pCreateInfo,
8218  VmaAllocation* pAllocation,
8219  VmaAllocationInfo* pAllocationInfo)
8220 {
8221  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8222 
8223  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8224 
8225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8226 
8227  VkResult result = AllocateMemoryForImage(
8228  allocator,
8229  image,
8230  pCreateInfo,
8231  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8232  pAllocation);
8233 
8234  if(pAllocationInfo && result == VK_SUCCESS)
8235  {
8236  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8237  }
8238 
8239  return result;
8240 }
8241 
8242 void vmaFreeMemory(
8243  VmaAllocator allocator,
8244  VmaAllocation allocation)
8245 {
8246  VMA_ASSERT(allocator && allocation);
8247 
8248  VMA_DEBUG_LOG("vmaFreeMemory");
8249 
8250  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8251 
8252  allocator->FreeMemory(allocation);
8253 }
8254 
8256  VmaAllocator allocator,
8257  VmaAllocation allocation,
8258  VmaAllocationInfo* pAllocationInfo)
8259 {
8260  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8261 
8262  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8263 
8264  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8265 }
8266 
8268  VmaAllocator allocator,
8269  VmaAllocation allocation,
8270  void* pUserData)
8271 {
8272  VMA_ASSERT(allocator && allocation);
8273 
8274  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8275 
8276  allocation->SetUserData(allocator, pUserData);
8277 }
8278 
8280  VmaAllocator allocator,
8281  VmaAllocation* pAllocation)
8282 {
8283  VMA_ASSERT(allocator && pAllocation);
8284 
8285  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8286 
8287  allocator->CreateLostAllocation(pAllocation);
8288 }
8289 
8290 VkResult vmaMapMemory(
8291  VmaAllocator allocator,
8292  VmaAllocation allocation,
8293  void** ppData)
8294 {
8295  VMA_ASSERT(allocator && allocation && ppData);
8296 
8297  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8298 
8299  return allocator->Map(allocation, ppData);
8300 }
8301 
8302 void vmaUnmapMemory(
8303  VmaAllocator allocator,
8304  VmaAllocation allocation)
8305 {
8306  VMA_ASSERT(allocator && allocation);
8307 
8308  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8309 
8310  allocator->Unmap(allocation);
8311 }
8312 
8313 VkResult vmaDefragment(
8314  VmaAllocator allocator,
8315  VmaAllocation* pAllocations,
8316  size_t allocationCount,
8317  VkBool32* pAllocationsChanged,
8318  const VmaDefragmentationInfo *pDefragmentationInfo,
8319  VmaDefragmentationStats* pDefragmentationStats)
8320 {
8321  VMA_ASSERT(allocator && pAllocations);
8322 
8323  VMA_DEBUG_LOG("vmaDefragment");
8324 
8325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8326 
8327  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8328 }
8329 
8330 VkResult vmaCreateBuffer(
8331  VmaAllocator allocator,
8332  const VkBufferCreateInfo* pBufferCreateInfo,
8333  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8334  VkBuffer* pBuffer,
8335  VmaAllocation* pAllocation,
8336  VmaAllocationInfo* pAllocationInfo)
8337 {
8338  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8339 
8340  VMA_DEBUG_LOG("vmaCreateBuffer");
8341 
8342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8343 
8344  *pBuffer = VK_NULL_HANDLE;
8345  *pAllocation = VK_NULL_HANDLE;
8346 
8347  // 1. Create VkBuffer.
8348  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8349  allocator->m_hDevice,
8350  pBufferCreateInfo,
8351  allocator->GetAllocationCallbacks(),
8352  pBuffer);
8353  if(res >= 0)
8354  {
8355  // 2. vkGetBufferMemoryRequirements.
8356  VkMemoryRequirements vkMemReq = {};
8357  bool requiresDedicatedAllocation = false;
8358  bool prefersDedicatedAllocation = false;
8359  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8360  requiresDedicatedAllocation, prefersDedicatedAllocation);
8361 
8362  // 3. Allocate memory using allocator.
8363  res = allocator->AllocateMemory(
8364  vkMemReq,
8365  requiresDedicatedAllocation,
8366  prefersDedicatedAllocation,
8367  *pBuffer, // dedicatedBuffer
8368  VK_NULL_HANDLE, // dedicatedImage
8369  *pAllocationCreateInfo,
8370  VMA_SUBALLOCATION_TYPE_BUFFER,
8371  pAllocation);
8372  if(res >= 0)
8373  {
8374  // 3. Bind buffer with memory.
8375  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8376  allocator->m_hDevice,
8377  *pBuffer,
8378  (*pAllocation)->GetMemory(),
8379  (*pAllocation)->GetOffset());
8380  if(res >= 0)
8381  {
8382  // All steps succeeded.
8383  if(pAllocationInfo != VMA_NULL)
8384  {
8385  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8386  }
8387  return VK_SUCCESS;
8388  }
8389  allocator->FreeMemory(*pAllocation);
8390  *pAllocation = VK_NULL_HANDLE;
8391  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8392  *pBuffer = VK_NULL_HANDLE;
8393  return res;
8394  }
8395  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8396  *pBuffer = VK_NULL_HANDLE;
8397  return res;
8398  }
8399  return res;
8400 }
8401 
8402 void vmaDestroyBuffer(
8403  VmaAllocator allocator,
8404  VkBuffer buffer,
8405  VmaAllocation allocation)
8406 {
8407  if(buffer != VK_NULL_HANDLE)
8408  {
8409  VMA_ASSERT(allocator);
8410 
8411  VMA_DEBUG_LOG("vmaDestroyBuffer");
8412 
8413  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8414 
8415  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8416 
8417  allocator->FreeMemory(allocation);
8418  }
8419 }
8420 
8421 VkResult vmaCreateImage(
8422  VmaAllocator allocator,
8423  const VkImageCreateInfo* pImageCreateInfo,
8424  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8425  VkImage* pImage,
8426  VmaAllocation* pAllocation,
8427  VmaAllocationInfo* pAllocationInfo)
8428 {
8429  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8430 
8431  VMA_DEBUG_LOG("vmaCreateImage");
8432 
8433  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8434 
8435  *pImage = VK_NULL_HANDLE;
8436  *pAllocation = VK_NULL_HANDLE;
8437 
8438  // 1. Create VkImage.
8439  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8440  allocator->m_hDevice,
8441  pImageCreateInfo,
8442  allocator->GetAllocationCallbacks(),
8443  pImage);
8444  if(res >= 0)
8445  {
8446  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8447  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8448  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8449 
8450  // 2. Allocate memory using allocator.
8451  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8452  if(res >= 0)
8453  {
8454  // 3. Bind image with memory.
8455  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8456  allocator->m_hDevice,
8457  *pImage,
8458  (*pAllocation)->GetMemory(),
8459  (*pAllocation)->GetOffset());
8460  if(res >= 0)
8461  {
8462  // All steps succeeded.
8463  if(pAllocationInfo != VMA_NULL)
8464  {
8465  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8466  }
8467  return VK_SUCCESS;
8468  }
8469  allocator->FreeMemory(*pAllocation);
8470  *pAllocation = VK_NULL_HANDLE;
8471  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8472  *pImage = VK_NULL_HANDLE;
8473  return res;
8474  }
8475  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8476  *pImage = VK_NULL_HANDLE;
8477  return res;
8478  }
8479  return res;
8480 }
8481 
8482 void vmaDestroyImage(
8483  VmaAllocator allocator,
8484  VkImage image,
8485  VmaAllocation allocation)
8486 {
8487  if(image != VK_NULL_HANDLE)
8488  {
8489  VMA_ASSERT(allocator);
8490 
8491  VMA_DEBUG_LOG("vmaDestroyImage");
8492 
8493  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8494 
8495  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8496 
8497  allocator->FreeMemory(allocation);
8498  }
8499 }
8500 
8501 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:764
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1011
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:789
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:774
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:974
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:768
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1279
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:786
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1445
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1149
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1203
Definition: vk_mem_alloc.h:1048
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:757
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1086
Definition: vk_mem_alloc.h:995
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:801
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:854
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:783
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:798
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:999
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:919
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:771
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:918
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:779
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1449
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:818
VmaStatInfo total
Definition: vk_mem_alloc.h:928
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1457
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1070
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1440
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:772
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:693
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:792
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1157
Definition: vk_mem_alloc.h:1151
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1289
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:769
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1107
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1173
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1209
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:755
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1160
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:956
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1435
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1453
Definition: vk_mem_alloc.h:989
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1094
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:770
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:924
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:699
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:720
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:725
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1455
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1081
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1219
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:765
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:907
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1168
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:712
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1055
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:920
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:716
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1163
Definition: vk_mem_alloc.h:994
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1076
Definition: vk_mem_alloc.h:1067
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:910
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:767
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1181
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:804
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1212
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1065
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1100
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:842
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:926
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1035
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:919
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:776
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:714
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:775
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1195
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1303
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:795
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:919
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:916
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1200
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1284
Definition: vk_mem_alloc.h:1063
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1451
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:763
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:778
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:914
Definition: vk_mem_alloc.h:961
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1153
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:912
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:773
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:777
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1022
Definition: vk_mem_alloc.h:983
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1298
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:753
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:766
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1265
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1131
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:920
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:927
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1206
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:920
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1270