Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
594 #include <vulkan/vulkan.h>
595 
596 VK_DEFINE_HANDLE(VmaAllocator)
597 
598 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
600  VmaAllocator allocator,
601  uint32_t memoryType,
602  VkDeviceMemory memory,
603  VkDeviceSize size);
605 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
606  VmaAllocator allocator,
607  uint32_t memoryType,
608  VkDeviceMemory memory,
609  VkDeviceSize size);
610 
618 typedef struct VmaDeviceMemoryCallbacks {
624 
660 
663 typedef VkFlags VmaAllocatorCreateFlags;
664 
669 typedef struct VmaVulkanFunctions {
670  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
671  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
672  PFN_vkAllocateMemory vkAllocateMemory;
673  PFN_vkFreeMemory vkFreeMemory;
674  PFN_vkMapMemory vkMapMemory;
675  PFN_vkUnmapMemory vkUnmapMemory;
676  PFN_vkBindBufferMemory vkBindBufferMemory;
677  PFN_vkBindImageMemory vkBindImageMemory;
678  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
679  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
680  PFN_vkCreateBuffer vkCreateBuffer;
681  PFN_vkDestroyBuffer vkDestroyBuffer;
682  PFN_vkCreateImage vkCreateImage;
683  PFN_vkDestroyImage vkDestroyImage;
684  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
685  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
687 
690 {
692  VmaAllocatorCreateFlags flags;
694 
695  VkPhysicalDevice physicalDevice;
697 
698  VkDevice device;
700 
703 
706 
707  const VkAllocationCallbacks* pAllocationCallbacks;
709 
724  uint32_t frameInUseCount;
742  const VkDeviceSize* pHeapSizeLimit;
756 
758 VkResult vmaCreateAllocator(
759  const VmaAllocatorCreateInfo* pCreateInfo,
760  VmaAllocator* pAllocator);
761 
764  VmaAllocator allocator);
765 
771  VmaAllocator allocator,
772  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
773 
779  VmaAllocator allocator,
780  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
781 
789  VmaAllocator allocator,
790  uint32_t memoryTypeIndex,
791  VkMemoryPropertyFlags* pFlags);
792 
802  VmaAllocator allocator,
803  uint32_t frameIndex);
804 
807 typedef struct VmaStatInfo
808 {
810  uint32_t blockCount;
812  uint32_t allocationCount;
816  VkDeviceSize usedBytes;
818  VkDeviceSize unusedBytes;
819  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
820  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
821 } VmaStatInfo;
822 
824 typedef struct VmaStats
825 {
826  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
827  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
829 } VmaStats;
830 
832 void vmaCalculateStats(
833  VmaAllocator allocator,
834  VmaStats* pStats);
835 
836 #define VMA_STATS_STRING_ENABLED 1
837 
838 #if VMA_STATS_STRING_ENABLED
839 
841 
844  VmaAllocator allocator,
845  char** ppStatsString,
846  VkBool32 detailedMap);
847 
848 void vmaFreeStatsString(
849  VmaAllocator allocator,
850  char* pStatsString);
851 
852 #endif // #if VMA_STATS_STRING_ENABLED
853 
854 VK_DEFINE_HANDLE(VmaPool)
855 
856 typedef enum VmaMemoryUsage
857 {
863 
866 
869 
873 
888 
938 
941 typedef VkFlags VmaAllocationCreateFlags;
942 
944 {
946  VmaAllocationCreateFlags flags;
957  VkMemoryPropertyFlags requiredFlags;
963  VkMemoryPropertyFlags preferredFlags;
965  void* pUserData;
970  VmaPool pool;
972 
987 VkResult vmaFindMemoryTypeIndex(
988  VmaAllocator allocator,
989  uint32_t memoryTypeBits,
990  const VmaAllocationCreateInfo* pAllocationCreateInfo,
991  uint32_t* pMemoryTypeIndex);
992 
994 typedef enum VmaPoolCreateFlagBits {
1013 
1016 typedef VkFlags VmaPoolCreateFlags;
1017 
1020 typedef struct VmaPoolCreateInfo {
1026  VmaPoolCreateFlags flags;
1031  VkDeviceSize blockSize;
1060 
1063 typedef struct VmaPoolStats {
1066  VkDeviceSize size;
1069  VkDeviceSize unusedSize;
1082  VkDeviceSize unusedRangeSizeMax;
1083 } VmaPoolStats;
1084 
1091 VkResult vmaCreatePool(
1092  VmaAllocator allocator,
1093  const VmaPoolCreateInfo* pCreateInfo,
1094  VmaPool* pPool);
1095 
1098 void vmaDestroyPool(
1099  VmaAllocator allocator,
1100  VmaPool pool);
1101 
1108 void vmaGetPoolStats(
1109  VmaAllocator allocator,
1110  VmaPool pool,
1111  VmaPoolStats* pPoolStats);
1112 
1120  VmaAllocator allocator,
1121  VmaPool pool,
1122  size_t* pLostAllocationCount);
1123 
1124 VK_DEFINE_HANDLE(VmaAllocation)
1125 
1126 
1128 typedef struct VmaAllocationInfo {
1133  uint32_t memoryType;
1142  VkDeviceMemory deviceMemory;
1147  VkDeviceSize offset;
1152  VkDeviceSize size;
1166  void* pUserData;
1168 
1179 VkResult vmaAllocateMemory(
1180  VmaAllocator allocator,
1181  const VkMemoryRequirements* pVkMemoryRequirements,
1182  const VmaAllocationCreateInfo* pCreateInfo,
1183  VmaAllocation* pAllocation,
1184  VmaAllocationInfo* pAllocationInfo);
1185 
1193  VmaAllocator allocator,
1194  VkBuffer buffer,
1195  const VmaAllocationCreateInfo* pCreateInfo,
1196  VmaAllocation* pAllocation,
1197  VmaAllocationInfo* pAllocationInfo);
1198 
1200 VkResult vmaAllocateMemoryForImage(
1201  VmaAllocator allocator,
1202  VkImage image,
1203  const VmaAllocationCreateInfo* pCreateInfo,
1204  VmaAllocation* pAllocation,
1205  VmaAllocationInfo* pAllocationInfo);
1206 
1208 void vmaFreeMemory(
1209  VmaAllocator allocator,
1210  VmaAllocation allocation);
1211 
1214  VmaAllocator allocator,
1215  VmaAllocation allocation,
1216  VmaAllocationInfo* pAllocationInfo);
1217 
1232  VmaAllocator allocator,
1233  VmaAllocation allocation,
1234  void* pUserData);
1235 
1247  VmaAllocator allocator,
1248  VmaAllocation* pAllocation);
1249 
1284 VkResult vmaMapMemory(
1285  VmaAllocator allocator,
1286  VmaAllocation allocation,
1287  void** ppData);
1288 
1293 void vmaUnmapMemory(
1294  VmaAllocator allocator,
1295  VmaAllocation allocation);
1296 
1298 typedef struct VmaDefragmentationInfo {
1303  VkDeviceSize maxBytesToMove;
1310 
1312 typedef struct VmaDefragmentationStats {
1314  VkDeviceSize bytesMoved;
1316  VkDeviceSize bytesFreed;
1322 
1399 VkResult vmaDefragment(
1400  VmaAllocator allocator,
1401  VmaAllocation* pAllocations,
1402  size_t allocationCount,
1403  VkBool32* pAllocationsChanged,
1404  const VmaDefragmentationInfo *pDefragmentationInfo,
1405  VmaDefragmentationStats* pDefragmentationStats);
1406 
1433 VkResult vmaCreateBuffer(
1434  VmaAllocator allocator,
1435  const VkBufferCreateInfo* pBufferCreateInfo,
1436  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1437  VkBuffer* pBuffer,
1438  VmaAllocation* pAllocation,
1439  VmaAllocationInfo* pAllocationInfo);
1440 
1452 void vmaDestroyBuffer(
1453  VmaAllocator allocator,
1454  VkBuffer buffer,
1455  VmaAllocation allocation);
1456 
1458 VkResult vmaCreateImage(
1459  VmaAllocator allocator,
1460  const VkImageCreateInfo* pImageCreateInfo,
1461  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1462  VkImage* pImage,
1463  VmaAllocation* pAllocation,
1464  VmaAllocationInfo* pAllocationInfo);
1465 
1477 void vmaDestroyImage(
1478  VmaAllocator allocator,
1479  VkImage image,
1480  VmaAllocation allocation);
1481 
1482 #ifdef __cplusplus
1483 }
1484 #endif
1485 
1486 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1487 
1488 // For Visual Studio IntelliSense.
1489 #ifdef __INTELLISENSE__
1490 #define VMA_IMPLEMENTATION
1491 #endif
1492 
1493 #ifdef VMA_IMPLEMENTATION
1494 #undef VMA_IMPLEMENTATION
1495 
1496 #include <cstdint>
1497 #include <cstdlib>
1498 #include <cstring>
1499 
1500 /*******************************************************************************
1501 CONFIGURATION SECTION
1502 
1503 Define some of these macros before each #include of this header or change them
1504 here if you need other then default behavior depending on your environment.
1505 */
1506 
1507 /*
1508 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1509 internally, like:
1510 
1511  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1512 
1513 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1514 VmaAllocatorCreateInfo::pVulkanFunctions.
1515 */
1516 #ifndef VMA_STATIC_VULKAN_FUNCTIONS
1517 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1518 #endif
1519 
1520 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1521 //#define VMA_USE_STL_CONTAINERS 1
1522 
1523 /* Set this macro to 1 to make the library including and using STL containers:
1524 std::pair, std::vector, std::list, std::unordered_map.
1525 
1526 Set it to 0 or undefined to make the library using its own implementation of
1527 the containers.
1528 */
1529 #if VMA_USE_STL_CONTAINERS
1530  #define VMA_USE_STL_VECTOR 1
1531  #define VMA_USE_STL_UNORDERED_MAP 1
1532  #define VMA_USE_STL_LIST 1
1533 #endif
1534 
1535 #if VMA_USE_STL_VECTOR
1536  #include <vector>
1537 #endif
1538 
1539 #if VMA_USE_STL_UNORDERED_MAP
1540  #include <unordered_map>
1541 #endif
1542 
1543 #if VMA_USE_STL_LIST
1544  #include <list>
1545 #endif
1546 
1547 /*
1548 Following headers are used in this CONFIGURATION section only, so feel free to
1549 remove them if not needed.
1550 */
1551 #include <cassert> // for assert
1552 #include <algorithm> // for min, max
1553 #include <mutex> // for std::mutex
1554 #include <atomic> // for std::atomic
1555 
1556 #if !defined(_WIN32)
1557  #include <malloc.h> // for aligned_alloc()
1558 #endif
1559 
1560 // Normal assert to check for programmer's errors, especially in Debug configuration.
1561 #ifndef VMA_ASSERT
1562  #ifdef _DEBUG
1563  #define VMA_ASSERT(expr) assert(expr)
1564  #else
1565  #define VMA_ASSERT(expr)
1566  #endif
1567 #endif
1568 
1569 // Assert that will be called very often, like inside data structures e.g. operator[].
1570 // Making it non-empty can make program slow.
1571 #ifndef VMA_HEAVY_ASSERT
1572  #ifdef _DEBUG
1573  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1574  #else
1575  #define VMA_HEAVY_ASSERT(expr)
1576  #endif
1577 #endif
1578 
1579 #ifndef VMA_NULL
1580  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1581  #define VMA_NULL nullptr
1582 #endif
1583 
1584 #ifndef VMA_ALIGN_OF
1585  #define VMA_ALIGN_OF(type) (__alignof(type))
1586 #endif
1587 
1588 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1589  #if defined(_WIN32)
1590  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1591  #else
1592  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1593  #endif
1594 #endif
1595 
1596 #ifndef VMA_SYSTEM_FREE
1597  #if defined(_WIN32)
1598  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1599  #else
1600  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1601  #endif
1602 #endif
1603 
1604 #ifndef VMA_MIN
1605  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1606 #endif
1607 
1608 #ifndef VMA_MAX
1609  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1610 #endif
1611 
1612 #ifndef VMA_SWAP
1613  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1614 #endif
1615 
1616 #ifndef VMA_SORT
1617  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1618 #endif
1619 
1620 #ifndef VMA_DEBUG_LOG
1621  #define VMA_DEBUG_LOG(format, ...)
1622  /*
1623  #define VMA_DEBUG_LOG(format, ...) do { \
1624  printf(format, __VA_ARGS__); \
1625  printf("\n"); \
1626  } while(false)
1627  */
1628 #endif
1629 
1630 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1631 #if VMA_STATS_STRING_ENABLED
1632  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1633  {
1634  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1635  }
1636  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1637  {
1638  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1639  }
1640  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1641  {
1642  snprintf(outStr, strLen, "%p", ptr);
1643  }
1644 #endif
1645 
1646 #ifndef VMA_MUTEX
1647  class VmaMutex
1648  {
1649  public:
1650  VmaMutex() { }
1651  ~VmaMutex() { }
1652  void Lock() { m_Mutex.lock(); }
1653  void Unlock() { m_Mutex.unlock(); }
1654  private:
1655  std::mutex m_Mutex;
1656  };
1657  #define VMA_MUTEX VmaMutex
1658 #endif
1659 
1660 /*
1661 If providing your own implementation, you need to implement a subset of std::atomic:
1662 
1663 - Constructor(uint32_t desired)
1664 - uint32_t load() const
1665 - void store(uint32_t desired)
1666 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1667 */
1668 #ifndef VMA_ATOMIC_UINT32
1669  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1670 #endif
1671 
1672 #ifndef VMA_BEST_FIT
1673 
1685  #define VMA_BEST_FIT (1)
1686 #endif
1687 
1688 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1689 
1693  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1694 #endif
1695 
1696 #ifndef VMA_DEBUG_ALIGNMENT
1697 
1701  #define VMA_DEBUG_ALIGNMENT (1)
1702 #endif
1703 
1704 #ifndef VMA_DEBUG_MARGIN
1705 
1709  #define VMA_DEBUG_MARGIN (0)
1710 #endif
1711 
1712 #ifndef VMA_DEBUG_GLOBAL_MUTEX
1713 
1717  #define VMA_DEBUG_GLOBAL_MUTEX (0)
1718 #endif
1719 
1720 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
1721 
1725  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
1726 #endif
1727 
1728 #ifndef VMA_SMALL_HEAP_MAX_SIZE
1729  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
1731 #endif
1732 
1733 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
1734  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
1736 #endif
1737 
1738 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
1739  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
1741 #endif
1742 
1743 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
1744 
1745 /*******************************************************************************
1746 END OF CONFIGURATION
1747 */
1748 
1749 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
1750  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
1751 
1752 // Returns number of bits set to 1 in (v).
1753 static inline uint32_t CountBitsSet(uint32_t v)
1754 {
1755  uint32_t c = v - ((v >> 1) & 0x55555555);
1756  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
1757  c = ((c >> 4) + c) & 0x0F0F0F0F;
1758  c = ((c >> 8) + c) & 0x00FF00FF;
1759  c = ((c >> 16) + c) & 0x0000FFFF;
1760  return c;
1761 }
1762 
1763 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
1764 // Use types like uint32_t, uint64_t as T.
1765 template <typename T>
1766 static inline T VmaAlignUp(T val, T align)
1767 {
1768  return (val + align - 1) / align * align;
1769 }
1770 
1771 // Division with mathematical rounding to nearest number.
1772 template <typename T>
1773 inline T VmaRoundDiv(T x, T y)
1774 {
1775  return (x + (y / (T)2)) / y;
1776 }
1777 
1778 #ifndef VMA_SORT
1779 
1780 template<typename Iterator, typename Compare>
1781 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
1782 {
1783  Iterator centerValue = end; --centerValue;
1784  Iterator insertIndex = beg;
1785  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
1786  {
1787  if(cmp(*memTypeIndex, *centerValue))
1788  {
1789  if(insertIndex != memTypeIndex)
1790  {
1791  VMA_SWAP(*memTypeIndex, *insertIndex);
1792  }
1793  ++insertIndex;
1794  }
1795  }
1796  if(insertIndex != centerValue)
1797  {
1798  VMA_SWAP(*insertIndex, *centerValue);
1799  }
1800  return insertIndex;
1801 }
1802 
1803 template<typename Iterator, typename Compare>
1804 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
1805 {
1806  if(beg < end)
1807  {
1808  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
1809  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1810  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1811  }
1812 }
1813 
1814 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1815 
1816 #endif // #ifndef VMA_SORT
1817 
1818 /*
1819 Returns true if two memory blocks occupy overlapping pages.
1820 ResourceA must be in less memory offset than ResourceB.
1821 
1822 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1823 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1824 */
1825 static inline bool VmaBlocksOnSamePage(
1826  VkDeviceSize resourceAOffset,
1827  VkDeviceSize resourceASize,
1828  VkDeviceSize resourceBOffset,
1829  VkDeviceSize pageSize)
1830 {
1831  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1832  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1833  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1834  VkDeviceSize resourceBStart = resourceBOffset;
1835  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1836  return resourceAEndPage == resourceBStartPage;
1837 }
1838 
1839 enum VmaSuballocationType
1840 {
1841  VMA_SUBALLOCATION_TYPE_FREE = 0,
1842  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1843  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1844  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1845  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1846  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1847  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1848 };
1849 
1850 /*
1851 Returns true if given suballocation types could conflict and must respect
1852 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1853 or linear image and another one is optimal image. If type is unknown, behave
1854 conservatively.
1855 */
1856 static inline bool VmaIsBufferImageGranularityConflict(
1857  VmaSuballocationType suballocType1,
1858  VmaSuballocationType suballocType2)
1859 {
1860  if(suballocType1 > suballocType2)
1861  {
1862  VMA_SWAP(suballocType1, suballocType2);
1863  }
1864 
1865  switch(suballocType1)
1866  {
1867  case VMA_SUBALLOCATION_TYPE_FREE:
1868  return false;
1869  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1870  return true;
1871  case VMA_SUBALLOCATION_TYPE_BUFFER:
1872  return
1873  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1874  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1875  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1876  return
1877  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1878  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1879  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1880  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1881  return
1882  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1883  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1884  return false;
1885  default:
1886  VMA_ASSERT(0);
1887  return true;
1888  }
1889 }
1890 
1891 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1892 struct VmaMutexLock
1893 {
1894 public:
1895  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1896  m_pMutex(useMutex ? &mutex : VMA_NULL)
1897  {
1898  if(m_pMutex)
1899  {
1900  m_pMutex->Lock();
1901  }
1902  }
1903 
1904  ~VmaMutexLock()
1905  {
1906  if(m_pMutex)
1907  {
1908  m_pMutex->Unlock();
1909  }
1910  }
1911 
1912 private:
1913  VMA_MUTEX* m_pMutex;
1914 };
1915 
1916 #if VMA_DEBUG_GLOBAL_MUTEX
1917  static VMA_MUTEX gDebugGlobalMutex;
1918  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
1919 #else
1920  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1921 #endif
1922 
1923 // Minimum size of a free suballocation to register it in the free suballocation collection.
1924 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1925 
1926 /*
1927 Performs binary search and returns iterator to first element that is greater or
1928 equal to (key), according to comparison (cmp).
1929 
1930 Cmp should return true if first argument is less than second argument.
1931 
1932 Returned value is the found element, if present in the collection or place where
1933 new element with value (key) should be inserted.
1934 */
1935 template <typename IterT, typename KeyT, typename CmpT>
1936 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1937 {
1938  size_t down = 0, up = (end - beg);
1939  while(down < up)
1940  {
1941  const size_t mid = (down + up) / 2;
1942  if(cmp(*(beg+mid), key))
1943  {
1944  down = mid + 1;
1945  }
1946  else
1947  {
1948  up = mid;
1949  }
1950  }
1951  return beg + down;
1952 }
1953 
1955 // Memory allocation
1956 
1957 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1958 {
1959  if((pAllocationCallbacks != VMA_NULL) &&
1960  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1961  {
1962  return (*pAllocationCallbacks->pfnAllocation)(
1963  pAllocationCallbacks->pUserData,
1964  size,
1965  alignment,
1966  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1967  }
1968  else
1969  {
1970  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1971  }
1972 }
1973 
1974 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1975 {
1976  if((pAllocationCallbacks != VMA_NULL) &&
1977  (pAllocationCallbacks->pfnFree != VMA_NULL))
1978  {
1979  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1980  }
1981  else
1982  {
1983  VMA_SYSTEM_FREE(ptr);
1984  }
1985 }
1986 
1987 template<typename T>
1988 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1989 {
1990  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1991 }
1992 
1993 template<typename T>
1994 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1995 {
1996  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1997 }
1998 
1999 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2000 
2001 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2002 
2003 template<typename T>
2004 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2005 {
2006  ptr->~T();
2007  VmaFree(pAllocationCallbacks, ptr);
2008 }
2009 
2010 template<typename T>
2011 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2012 {
2013  if(ptr != VMA_NULL)
2014  {
2015  for(size_t i = count; i--; )
2016  {
2017  ptr[i].~T();
2018  }
2019  VmaFree(pAllocationCallbacks, ptr);
2020  }
2021 }
2022 
2023 // STL-compatible allocator.
2024 template<typename T>
2025 class VmaStlAllocator
2026 {
2027 public:
2028  const VkAllocationCallbacks* const m_pCallbacks;
2029  typedef T value_type;
2030 
2031  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2032  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2033 
2034  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2035  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2036 
2037  template<typename U>
2038  bool operator==(const VmaStlAllocator<U>& rhs) const
2039  {
2040  return m_pCallbacks == rhs.m_pCallbacks;
2041  }
2042  template<typename U>
2043  bool operator!=(const VmaStlAllocator<U>& rhs) const
2044  {
2045  return m_pCallbacks != rhs.m_pCallbacks;
2046  }
2047 
2048  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2049 };
2050 
2051 #if VMA_USE_STL_VECTOR
2052 
2053 #define VmaVector std::vector
2054 
2055 template<typename T, typename allocatorT>
2056 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2057 {
2058  vec.insert(vec.begin() + index, item);
2059 }
2060 
2061 template<typename T, typename allocatorT>
2062 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2063 {
2064  vec.erase(vec.begin() + index);
2065 }
2066 
2067 #else // #if VMA_USE_STL_VECTOR
2068 
2069 /* Class with interface compatible with subset of std::vector.
2070 T must be POD because constructors and destructors are not called and memcpy is
2071 used for these objects. */
2072 template<typename T, typename AllocatorT>
2073 class VmaVector
2074 {
2075 public:
2076  typedef T value_type;
2077 
2078  VmaVector(const AllocatorT& allocator) :
2079  m_Allocator(allocator),
2080  m_pArray(VMA_NULL),
2081  m_Count(0),
2082  m_Capacity(0)
2083  {
2084  }
2085 
2086  VmaVector(size_t count, const AllocatorT& allocator) :
2087  m_Allocator(allocator),
2088  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2089  m_Count(count),
2090  m_Capacity(count)
2091  {
2092  }
2093 
2094  VmaVector(const VmaVector<T, AllocatorT>& src) :
2095  m_Allocator(src.m_Allocator),
2096  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2097  m_Count(src.m_Count),
2098  m_Capacity(src.m_Count)
2099  {
2100  if(m_Count != 0)
2101  {
2102  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2103  }
2104  }
2105 
2106  ~VmaVector()
2107  {
2108  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2109  }
2110 
2111  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2112  {
2113  if(&rhs != this)
2114  {
2115  resize(rhs.m_Count);
2116  if(m_Count != 0)
2117  {
2118  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2119  }
2120  }
2121  return *this;
2122  }
2123 
2124  bool empty() const { return m_Count == 0; }
2125  size_t size() const { return m_Count; }
2126  T* data() { return m_pArray; }
2127  const T* data() const { return m_pArray; }
2128 
2129  T& operator[](size_t index)
2130  {
2131  VMA_HEAVY_ASSERT(index < m_Count);
2132  return m_pArray[index];
2133  }
2134  const T& operator[](size_t index) const
2135  {
2136  VMA_HEAVY_ASSERT(index < m_Count);
2137  return m_pArray[index];
2138  }
2139 
2140  T& front()
2141  {
2142  VMA_HEAVY_ASSERT(m_Count > 0);
2143  return m_pArray[0];
2144  }
2145  const T& front() const
2146  {
2147  VMA_HEAVY_ASSERT(m_Count > 0);
2148  return m_pArray[0];
2149  }
2150  T& back()
2151  {
2152  VMA_HEAVY_ASSERT(m_Count > 0);
2153  return m_pArray[m_Count - 1];
2154  }
2155  const T& back() const
2156  {
2157  VMA_HEAVY_ASSERT(m_Count > 0);
2158  return m_pArray[m_Count - 1];
2159  }
2160 
2161  void reserve(size_t newCapacity, bool freeMemory = false)
2162  {
2163  newCapacity = VMA_MAX(newCapacity, m_Count);
2164 
2165  if((newCapacity < m_Capacity) && !freeMemory)
2166  {
2167  newCapacity = m_Capacity;
2168  }
2169 
2170  if(newCapacity != m_Capacity)
2171  {
2172  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2173  if(m_Count != 0)
2174  {
2175  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2176  }
2177  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2178  m_Capacity = newCapacity;
2179  m_pArray = newArray;
2180  }
2181  }
2182 
2183  void resize(size_t newCount, bool freeMemory = false)
2184  {
2185  size_t newCapacity = m_Capacity;
2186  if(newCount > m_Capacity)
2187  {
2188  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2189  }
2190  else if(freeMemory)
2191  {
2192  newCapacity = newCount;
2193  }
2194 
2195  if(newCapacity != m_Capacity)
2196  {
2197  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2198  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2199  if(elementsToCopy != 0)
2200  {
2201  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2202  }
2203  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2204  m_Capacity = newCapacity;
2205  m_pArray = newArray;
2206  }
2207 
2208  m_Count = newCount;
2209  }
2210 
2211  void clear(bool freeMemory = false)
2212  {
2213  resize(0, freeMemory);
2214  }
2215 
2216  void insert(size_t index, const T& src)
2217  {
2218  VMA_HEAVY_ASSERT(index <= m_Count);
2219  const size_t oldCount = size();
2220  resize(oldCount + 1);
2221  if(index < oldCount)
2222  {
2223  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2224  }
2225  m_pArray[index] = src;
2226  }
2227 
2228  void remove(size_t index)
2229  {
2230  VMA_HEAVY_ASSERT(index < m_Count);
2231  const size_t oldCount = size();
2232  if(index < oldCount - 1)
2233  {
2234  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2235  }
2236  resize(oldCount - 1);
2237  }
2238 
2239  void push_back(const T& src)
2240  {
2241  const size_t newIndex = size();
2242  resize(newIndex + 1);
2243  m_pArray[newIndex] = src;
2244  }
2245 
2246  void pop_back()
2247  {
2248  VMA_HEAVY_ASSERT(m_Count > 0);
2249  resize(size() - 1);
2250  }
2251 
2252  void push_front(const T& src)
2253  {
2254  insert(0, src);
2255  }
2256 
2257  void pop_front()
2258  {
2259  VMA_HEAVY_ASSERT(m_Count > 0);
2260  remove(0);
2261  }
2262 
2263  typedef T* iterator;
2264 
2265  iterator begin() { return m_pArray; }
2266  iterator end() { return m_pArray + m_Count; }
2267 
2268 private:
2269  AllocatorT m_Allocator;
2270  T* m_pArray;
2271  size_t m_Count;
2272  size_t m_Capacity;
2273 };
2274 
2275 template<typename T, typename allocatorT>
2276 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2277 {
2278  vec.insert(index, item);
2279 }
2280 
2281 template<typename T, typename allocatorT>
2282 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2283 {
2284  vec.remove(index);
2285 }
2286 
2287 #endif // #if VMA_USE_STL_VECTOR
2288 
2289 template<typename CmpLess, typename VectorT>
2290 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2291 {
2292  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2293  vector.data(),
2294  vector.data() + vector.size(),
2295  value,
2296  CmpLess()) - vector.data();
2297  VmaVectorInsert(vector, indexToInsert, value);
2298  return indexToInsert;
2299 }
2300 
2301 template<typename CmpLess, typename VectorT>
2302 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2303 {
2304  CmpLess comparator;
2305  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2306  vector.begin(),
2307  vector.end(),
2308  value,
2309  comparator);
2310  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2311  {
2312  size_t indexToRemove = it - vector.begin();
2313  VmaVectorRemove(vector, indexToRemove);
2314  return true;
2315  }
2316  return false;
2317 }
2318 
2319 template<typename CmpLess, typename VectorT>
2320 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2321 {
2322  CmpLess comparator;
2323  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2324  vector.data(),
2325  vector.data() + vector.size(),
2326  value,
2327  comparator);
2328  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2329  {
2330  return it - vector.begin();
2331  }
2332  else
2333  {
2334  return vector.size();
2335  }
2336 }
2337 
2339 // class VmaPoolAllocator
2340 
2341 /*
2342 Allocator for objects of type T using a list of arrays (pools) to speed up
2343 allocation. Number of elements that can be allocated is not bounded because
2344 allocator can create multiple blocks.
2345 */
2346 template<typename T>
2347 class VmaPoolAllocator
2348 {
2349 public:
2350  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2351  ~VmaPoolAllocator();
2352  void Clear();
2353  T* Alloc();
2354  void Free(T* ptr);
2355 
2356 private:
2357  union Item
2358  {
2359  uint32_t NextFreeIndex;
2360  T Value;
2361  };
2362 
2363  struct ItemBlock
2364  {
2365  Item* pItems;
2366  uint32_t FirstFreeIndex;
2367  };
2368 
2369  const VkAllocationCallbacks* m_pAllocationCallbacks;
2370  size_t m_ItemsPerBlock;
2371  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2372 
2373  ItemBlock& CreateNewBlock();
2374 };
2375 
2376 template<typename T>
2377 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2378  m_pAllocationCallbacks(pAllocationCallbacks),
2379  m_ItemsPerBlock(itemsPerBlock),
2380  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2381 {
2382  VMA_ASSERT(itemsPerBlock > 0);
2383 }
2384 
2385 template<typename T>
2386 VmaPoolAllocator<T>::~VmaPoolAllocator()
2387 {
2388  Clear();
2389 }
2390 
2391 template<typename T>
2392 void VmaPoolAllocator<T>::Clear()
2393 {
2394  for(size_t i = m_ItemBlocks.size(); i--; )
2395  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2396  m_ItemBlocks.clear();
2397 }
2398 
2399 template<typename T>
2400 T* VmaPoolAllocator<T>::Alloc()
2401 {
2402  for(size_t i = m_ItemBlocks.size(); i--; )
2403  {
2404  ItemBlock& block = m_ItemBlocks[i];
2405  // This block has some free items: Use first one.
2406  if(block.FirstFreeIndex != UINT32_MAX)
2407  {
2408  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2409  block.FirstFreeIndex = pItem->NextFreeIndex;
2410  return &pItem->Value;
2411  }
2412  }
2413 
2414  // No block has free item: Create new one and use it.
2415  ItemBlock& newBlock = CreateNewBlock();
2416  Item* const pItem = &newBlock.pItems[0];
2417  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2418  return &pItem->Value;
2419 }
2420 
2421 template<typename T>
2422 void VmaPoolAllocator<T>::Free(T* ptr)
2423 {
2424  // Search all memory blocks to find ptr.
2425  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2426  {
2427  ItemBlock& block = m_ItemBlocks[i];
2428 
2429  // Casting to union.
2430  Item* pItemPtr;
2431  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2432 
2433  // Check if pItemPtr is in address range of this block.
2434  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2435  {
2436  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2437  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2438  block.FirstFreeIndex = index;
2439  return;
2440  }
2441  }
2442  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2443 }
2444 
2445 template<typename T>
2446 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2447 {
2448  ItemBlock newBlock = {
2449  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2450 
2451  m_ItemBlocks.push_back(newBlock);
2452 
2453  // Setup singly-linked list of all free items in this block.
2454  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2455  newBlock.pItems[i].NextFreeIndex = i + 1;
2456  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2457  return m_ItemBlocks.back();
2458 }
2459 
2461 // class VmaRawList, VmaList
2462 
2463 #if VMA_USE_STL_LIST
2464 
2465 #define VmaList std::list
2466 
2467 #else // #if VMA_USE_STL_LIST
2468 
2469 template<typename T>
2470 struct VmaListItem
2471 {
2472  VmaListItem* pPrev;
2473  VmaListItem* pNext;
2474  T Value;
2475 };
2476 
2477 // Doubly linked list.
2478 template<typename T>
2479 class VmaRawList
2480 {
2481 public:
2482  typedef VmaListItem<T> ItemType;
2483 
2484  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2485  ~VmaRawList();
2486  void Clear();
2487 
2488  size_t GetCount() const { return m_Count; }
2489  bool IsEmpty() const { return m_Count == 0; }
2490 
2491  ItemType* Front() { return m_pFront; }
2492  const ItemType* Front() const { return m_pFront; }
2493  ItemType* Back() { return m_pBack; }
2494  const ItemType* Back() const { return m_pBack; }
2495 
2496  ItemType* PushBack();
2497  ItemType* PushFront();
2498  ItemType* PushBack(const T& value);
2499  ItemType* PushFront(const T& value);
2500  void PopBack();
2501  void PopFront();
2502 
2503  // Item can be null - it means PushBack.
2504  ItemType* InsertBefore(ItemType* pItem);
2505  // Item can be null - it means PushFront.
2506  ItemType* InsertAfter(ItemType* pItem);
2507 
2508  ItemType* InsertBefore(ItemType* pItem, const T& value);
2509  ItemType* InsertAfter(ItemType* pItem, const T& value);
2510 
2511  void Remove(ItemType* pItem);
2512 
2513 private:
2514  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2515  VmaPoolAllocator<ItemType> m_ItemAllocator;
2516  ItemType* m_pFront;
2517  ItemType* m_pBack;
2518  size_t m_Count;
2519 
2520  // Declared not defined, to block copy constructor and assignment operator.
2521  VmaRawList(const VmaRawList<T>& src);
2522  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2523 };
2524 
2525 template<typename T>
2526 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2527  m_pAllocationCallbacks(pAllocationCallbacks),
2528  m_ItemAllocator(pAllocationCallbacks, 128),
2529  m_pFront(VMA_NULL),
2530  m_pBack(VMA_NULL),
2531  m_Count(0)
2532 {
2533 }
2534 
2535 template<typename T>
2536 VmaRawList<T>::~VmaRawList()
2537 {
2538  // Intentionally not calling Clear, because that would be unnecessary
2539  // computations to return all items to m_ItemAllocator as free.
2540 }
2541 
2542 template<typename T>
2543 void VmaRawList<T>::Clear()
2544 {
2545  if(IsEmpty() == false)
2546  {
2547  ItemType* pItem = m_pBack;
2548  while(pItem != VMA_NULL)
2549  {
2550  ItemType* const pPrevItem = pItem->pPrev;
2551  m_ItemAllocator.Free(pItem);
2552  pItem = pPrevItem;
2553  }
2554  m_pFront = VMA_NULL;
2555  m_pBack = VMA_NULL;
2556  m_Count = 0;
2557  }
2558 }
2559 
2560 template<typename T>
2561 VmaListItem<T>* VmaRawList<T>::PushBack()
2562 {
2563  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2564  pNewItem->pNext = VMA_NULL;
2565  if(IsEmpty())
2566  {
2567  pNewItem->pPrev = VMA_NULL;
2568  m_pFront = pNewItem;
2569  m_pBack = pNewItem;
2570  m_Count = 1;
2571  }
2572  else
2573  {
2574  pNewItem->pPrev = m_pBack;
2575  m_pBack->pNext = pNewItem;
2576  m_pBack = pNewItem;
2577  ++m_Count;
2578  }
2579  return pNewItem;
2580 }
2581 
2582 template<typename T>
2583 VmaListItem<T>* VmaRawList<T>::PushFront()
2584 {
2585  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2586  pNewItem->pPrev = VMA_NULL;
2587  if(IsEmpty())
2588  {
2589  pNewItem->pNext = VMA_NULL;
2590  m_pFront = pNewItem;
2591  m_pBack = pNewItem;
2592  m_Count = 1;
2593  }
2594  else
2595  {
2596  pNewItem->pNext = m_pFront;
2597  m_pFront->pPrev = pNewItem;
2598  m_pFront = pNewItem;
2599  ++m_Count;
2600  }
2601  return pNewItem;
2602 }
2603 
2604 template<typename T>
2605 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2606 {
2607  ItemType* const pNewItem = PushBack();
2608  pNewItem->Value = value;
2609  return pNewItem;
2610 }
2611 
2612 template<typename T>
2613 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2614 {
2615  ItemType* const pNewItem = PushFront();
2616  pNewItem->Value = value;
2617  return pNewItem;
2618 }
2619 
2620 template<typename T>
2621 void VmaRawList<T>::PopBack()
2622 {
2623  VMA_HEAVY_ASSERT(m_Count > 0);
2624  ItemType* const pBackItem = m_pBack;
2625  ItemType* const pPrevItem = pBackItem->pPrev;
2626  if(pPrevItem != VMA_NULL)
2627  {
2628  pPrevItem->pNext = VMA_NULL;
2629  }
2630  m_pBack = pPrevItem;
2631  m_ItemAllocator.Free(pBackItem);
2632  --m_Count;
2633 }
2634 
2635 template<typename T>
2636 void VmaRawList<T>::PopFront()
2637 {
2638  VMA_HEAVY_ASSERT(m_Count > 0);
2639  ItemType* const pFrontItem = m_pFront;
2640  ItemType* const pNextItem = pFrontItem->pNext;
2641  if(pNextItem != VMA_NULL)
2642  {
2643  pNextItem->pPrev = VMA_NULL;
2644  }
2645  m_pFront = pNextItem;
2646  m_ItemAllocator.Free(pFrontItem);
2647  --m_Count;
2648 }
2649 
2650 template<typename T>
2651 void VmaRawList<T>::Remove(ItemType* pItem)
2652 {
2653  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2654  VMA_HEAVY_ASSERT(m_Count > 0);
2655 
2656  if(pItem->pPrev != VMA_NULL)
2657  {
2658  pItem->pPrev->pNext = pItem->pNext;
2659  }
2660  else
2661  {
2662  VMA_HEAVY_ASSERT(m_pFront == pItem);
2663  m_pFront = pItem->pNext;
2664  }
2665 
2666  if(pItem->pNext != VMA_NULL)
2667  {
2668  pItem->pNext->pPrev = pItem->pPrev;
2669  }
2670  else
2671  {
2672  VMA_HEAVY_ASSERT(m_pBack == pItem);
2673  m_pBack = pItem->pPrev;
2674  }
2675 
2676  m_ItemAllocator.Free(pItem);
2677  --m_Count;
2678 }
2679 
2680 template<typename T>
2681 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2682 {
2683  if(pItem != VMA_NULL)
2684  {
2685  ItemType* const prevItem = pItem->pPrev;
2686  ItemType* const newItem = m_ItemAllocator.Alloc();
2687  newItem->pPrev = prevItem;
2688  newItem->pNext = pItem;
2689  pItem->pPrev = newItem;
2690  if(prevItem != VMA_NULL)
2691  {
2692  prevItem->pNext = newItem;
2693  }
2694  else
2695  {
2696  VMA_HEAVY_ASSERT(m_pFront == pItem);
2697  m_pFront = newItem;
2698  }
2699  ++m_Count;
2700  return newItem;
2701  }
2702  else
2703  return PushBack();
2704 }
2705 
2706 template<typename T>
2707 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
2708 {
2709  if(pItem != VMA_NULL)
2710  {
2711  ItemType* const nextItem = pItem->pNext;
2712  ItemType* const newItem = m_ItemAllocator.Alloc();
2713  newItem->pNext = nextItem;
2714  newItem->pPrev = pItem;
2715  pItem->pNext = newItem;
2716  if(nextItem != VMA_NULL)
2717  {
2718  nextItem->pPrev = newItem;
2719  }
2720  else
2721  {
2722  VMA_HEAVY_ASSERT(m_pBack == pItem);
2723  m_pBack = newItem;
2724  }
2725  ++m_Count;
2726  return newItem;
2727  }
2728  else
2729  return PushFront();
2730 }
2731 
2732 template<typename T>
2733 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
2734 {
2735  ItemType* const newItem = InsertBefore(pItem);
2736  newItem->Value = value;
2737  return newItem;
2738 }
2739 
2740 template<typename T>
2741 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
2742 {
2743  ItemType* const newItem = InsertAfter(pItem);
2744  newItem->Value = value;
2745  return newItem;
2746 }
2747 
2748 template<typename T, typename AllocatorT>
2749 class VmaList
2750 {
2751 public:
2752  class iterator
2753  {
2754  public:
2755  iterator() :
2756  m_pList(VMA_NULL),
2757  m_pItem(VMA_NULL)
2758  {
2759  }
2760 
2761  T& operator*() const
2762  {
2763  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2764  return m_pItem->Value;
2765  }
2766  T* operator->() const
2767  {
2768  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2769  return &m_pItem->Value;
2770  }
2771 
2772  iterator& operator++()
2773  {
2774  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2775  m_pItem = m_pItem->pNext;
2776  return *this;
2777  }
2778  iterator& operator--()
2779  {
2780  if(m_pItem != VMA_NULL)
2781  {
2782  m_pItem = m_pItem->pPrev;
2783  }
2784  else
2785  {
2786  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
2787  m_pItem = m_pList->Back();
2788  }
2789  return *this;
2790  }
2791 
2792  iterator operator++(int)
2793  {
2794  iterator result = *this;
2795  ++*this;
2796  return result;
2797  }
2798  iterator operator--(int)
2799  {
2800  iterator result = *this;
2801  --*this;
2802  return result;
2803  }
2804 
2805  bool operator==(const iterator& rhs) const
2806  {
2807  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2808  return m_pItem == rhs.m_pItem;
2809  }
2810  bool operator!=(const iterator& rhs) const
2811  {
2812  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2813  return m_pItem != rhs.m_pItem;
2814  }
2815 
2816  private:
2817  VmaRawList<T>* m_pList;
2818  VmaListItem<T>* m_pItem;
2819 
2820  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
2821  m_pList(pList),
2822  m_pItem(pItem)
2823  {
2824  }
2825 
2826  friend class VmaList<T, AllocatorT>;
2827  };
2828 
2829  class const_iterator
2830  {
2831  public:
2832  const_iterator() :
2833  m_pList(VMA_NULL),
2834  m_pItem(VMA_NULL)
2835  {
2836  }
2837 
2838  const_iterator(const iterator& src) :
2839  m_pList(src.m_pList),
2840  m_pItem(src.m_pItem)
2841  {
2842  }
2843 
2844  const T& operator*() const
2845  {
2846  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2847  return m_pItem->Value;
2848  }
2849  const T* operator->() const
2850  {
2851  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2852  return &m_pItem->Value;
2853  }
2854 
2855  const_iterator& operator++()
2856  {
2857  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
2858  m_pItem = m_pItem->pNext;
2859  return *this;
2860  }
2861  const_iterator& operator--()
2862  {
2863  if(m_pItem != VMA_NULL)
2864  {
2865  m_pItem = m_pItem->pPrev;
2866  }
2867  else
2868  {
2869  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2870  m_pItem = m_pList->Back();
2871  }
2872  return *this;
2873  }
2874 
2875  const_iterator operator++(int)
2876  {
2877  const_iterator result = *this;
2878  ++*this;
2879  return result;
2880  }
2881  const_iterator operator--(int)
2882  {
2883  const_iterator result = *this;
2884  --*this;
2885  return result;
2886  }
2887 
2888  bool operator==(const const_iterator& rhs) const
2889  {
2890  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2891  return m_pItem == rhs.m_pItem;
2892  }
2893  bool operator!=(const const_iterator& rhs) const
2894  {
2895  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2896  return m_pItem != rhs.m_pItem;
2897  }
2898 
2899  private:
2900  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2901  m_pList(pList),
2902  m_pItem(pItem)
2903  {
2904  }
2905 
2906  const VmaRawList<T>* m_pList;
2907  const VmaListItem<T>* m_pItem;
2908 
2909  friend class VmaList<T, AllocatorT>;
2910  };
2911 
2912  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2913 
2914  bool empty() const { return m_RawList.IsEmpty(); }
2915  size_t size() const { return m_RawList.GetCount(); }
2916 
2917  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2918  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2919 
2920  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2921  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2922 
2923  void clear() { m_RawList.Clear(); }
2924  void push_back(const T& value) { m_RawList.PushBack(value); }
2925  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2926  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2927 
2928 private:
2929  VmaRawList<T> m_RawList;
2930 };
2931 
2932 #endif // #if VMA_USE_STL_LIST
2933 
2935 // class VmaMap
2936 
2937 // Unused in this version.
2938 #if 0
2939 
2940 #if VMA_USE_STL_UNORDERED_MAP
2941 
2942 #define VmaPair std::pair
2943 
2944 #define VMA_MAP_TYPE(KeyT, ValueT) \
2945  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2946 
2947 #else // #if VMA_USE_STL_UNORDERED_MAP
2948 
2949 template<typename T1, typename T2>
2950 struct VmaPair
2951 {
2952  T1 first;
2953  T2 second;
2954 
2955  VmaPair() : first(), second() { }
2956  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2957 };
2958 
2959 /* Class compatible with subset of interface of std::unordered_map.
2960 KeyT, ValueT must be POD because they will be stored in VmaVector.
2961 */
2962 template<typename KeyT, typename ValueT>
2963 class VmaMap
2964 {
2965 public:
2966  typedef VmaPair<KeyT, ValueT> PairType;
2967  typedef PairType* iterator;
2968 
2969  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2970 
2971  iterator begin() { return m_Vector.begin(); }
2972  iterator end() { return m_Vector.end(); }
2973 
2974  void insert(const PairType& pair);
2975  iterator find(const KeyT& key);
2976  void erase(iterator it);
2977 
2978 private:
2979  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2980 };
2981 
2982 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2983 
2984 template<typename FirstT, typename SecondT>
2985 struct VmaPairFirstLess
2986 {
2987  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2988  {
2989  return lhs.first < rhs.first;
2990  }
2991  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2992  {
2993  return lhs.first < rhsFirst;
2994  }
2995 };
2996 
2997 template<typename KeyT, typename ValueT>
2998 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2999 {
3000  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3001  m_Vector.data(),
3002  m_Vector.data() + m_Vector.size(),
3003  pair,
3004  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3005  VmaVectorInsert(m_Vector, indexToInsert, pair);
3006 }
3007 
3008 template<typename KeyT, typename ValueT>
3009 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3010 {
3011  PairType* it = VmaBinaryFindFirstNotLess(
3012  m_Vector.data(),
3013  m_Vector.data() + m_Vector.size(),
3014  key,
3015  VmaPairFirstLess<KeyT, ValueT>());
3016  if((it != m_Vector.end()) && (it->first == key))
3017  {
3018  return it;
3019  }
3020  else
3021  {
3022  return m_Vector.end();
3023  }
3024 }
3025 
3026 template<typename KeyT, typename ValueT>
3027 void VmaMap<KeyT, ValueT>::erase(iterator it)
3028 {
3029  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3030 }
3031 
3032 #endif // #if VMA_USE_STL_UNORDERED_MAP
3033 
3034 #endif // #if 0
3035 
3037 
3038 class VmaDeviceMemoryBlock;
3039 
3040 struct VmaAllocation_T
3041 {
3042 private:
3043  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3044 
3045  enum FLAGS
3046  {
3047  FLAG_USER_DATA_STRING = 0x01,
3048  };
3049 
3050 public:
3051  enum ALLOCATION_TYPE
3052  {
3053  ALLOCATION_TYPE_NONE,
3054  ALLOCATION_TYPE_BLOCK,
3055  ALLOCATION_TYPE_DEDICATED,
3056  };
3057 
3058  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3059  m_Alignment(1),
3060  m_Size(0),
3061  m_pUserData(VMA_NULL),
3062  m_LastUseFrameIndex(currentFrameIndex),
3063  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3064  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3065  m_MapCount(0),
3066  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3067  {
3068  }
3069 
3070  ~VmaAllocation_T()
3071  {
3072  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3073 
3074  // Check if owned string was freed.
3075  VMA_ASSERT(m_pUserData == VMA_NULL);
3076  }
3077 
3078  void InitBlockAllocation(
3079  VmaPool hPool,
3080  VmaDeviceMemoryBlock* block,
3081  VkDeviceSize offset,
3082  VkDeviceSize alignment,
3083  VkDeviceSize size,
3084  VmaSuballocationType suballocationType,
3085  bool mapped,
3086  bool canBecomeLost)
3087  {
3088  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3089  VMA_ASSERT(block != VMA_NULL);
3090  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3091  m_Alignment = alignment;
3092  m_Size = size;
3093  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3094  m_SuballocationType = (uint8_t)suballocationType;
3095  m_BlockAllocation.m_hPool = hPool;
3096  m_BlockAllocation.m_Block = block;
3097  m_BlockAllocation.m_Offset = offset;
3098  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3099  }
3100 
3101  void InitLost()
3102  {
3103  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3104  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3105  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3106  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3107  m_BlockAllocation.m_Block = VMA_NULL;
3108  m_BlockAllocation.m_Offset = 0;
3109  m_BlockAllocation.m_CanBecomeLost = true;
3110  }
3111 
3112  void ChangeBlockAllocation(
3113  VmaDeviceMemoryBlock* block,
3114  VkDeviceSize offset)
3115  {
3116  VMA_ASSERT(block != VMA_NULL);
3117  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3118  m_BlockAllocation.m_Block = block;
3119  m_BlockAllocation.m_Offset = offset;
3120  }
3121 
3122  // pMappedData not null means allocation is created with MAPPED flag.
3123  void InitDedicatedAllocation(
3124  uint32_t memoryTypeIndex,
3125  VkDeviceMemory hMemory,
3126  VmaSuballocationType suballocationType,
3127  void* pMappedData,
3128  VkDeviceSize size)
3129  {
3130  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3131  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3132  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3133  m_Alignment = 0;
3134  m_Size = size;
3135  m_SuballocationType = (uint8_t)suballocationType;
3136  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3137  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3138  m_DedicatedAllocation.m_hMemory = hMemory;
3139  m_DedicatedAllocation.m_pMappedData = pMappedData;
3140  }
3141 
3142  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3143  VkDeviceSize GetAlignment() const { return m_Alignment; }
3144  VkDeviceSize GetSize() const { return m_Size; }
3145  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3146  void* GetUserData() const { return m_pUserData; }
3147  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3148  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3149 
3150  VmaDeviceMemoryBlock* GetBlock() const
3151  {
3152  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3153  return m_BlockAllocation.m_Block;
3154  }
3155  VkDeviceSize GetOffset() const;
3156  VkDeviceMemory GetMemory() const;
3157  uint32_t GetMemoryTypeIndex() const;
3158  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3159  void* GetMappedData() const;
3160  bool CanBecomeLost() const;
3161  VmaPool GetPool() const;
3162 
3163  uint32_t GetLastUseFrameIndex() const
3164  {
3165  return m_LastUseFrameIndex.load();
3166  }
3167  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3168  {
3169  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3170  }
3171  /*
3172  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3173  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3174  - Else, returns false.
3175 
3176  If hAllocation is already lost, assert - you should not call it then.
3177  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3178  */
3179  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3180 
3181  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3182  {
3183  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3184  outInfo.blockCount = 1;
3185  outInfo.allocationCount = 1;
3186  outInfo.unusedRangeCount = 0;
3187  outInfo.usedBytes = m_Size;
3188  outInfo.unusedBytes = 0;
3189  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3190  outInfo.unusedRangeSizeMin = UINT64_MAX;
3191  outInfo.unusedRangeSizeMax = 0;
3192  }
3193 
3194  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3195  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3196 
3197 private:
3198  VkDeviceSize m_Alignment;
3199  VkDeviceSize m_Size;
3200  void* m_pUserData;
3201  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3202  uint8_t m_Type; // ALLOCATION_TYPE
3203  uint8_t m_SuballocationType; // VmaSuballocationType
3204  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3205  // Bits with mask 0x7F, used only when ALLOCATION_TYPE_DEDICATED, are reference counter for vmaMapMemory()/vmaUnmapMemory().
3206  uint8_t m_MapCount;
3207  uint8_t m_Flags; // enum FLAGS
3208 
3209  // Allocation out of VmaDeviceMemoryBlock.
3210  struct BlockAllocation
3211  {
3212  VmaPool m_hPool; // Null if belongs to general memory.
3213  VmaDeviceMemoryBlock* m_Block;
3214  VkDeviceSize m_Offset;
3215  bool m_CanBecomeLost;
3216  };
3217 
3218  // Allocation for an object that has its own private VkDeviceMemory.
3219  struct DedicatedAllocation
3220  {
3221  uint32_t m_MemoryTypeIndex;
3222  VkDeviceMemory m_hMemory;
3223  void* m_pMappedData; // Not null means memory is mapped.
3224  };
3225 
3226  union
3227  {
3228  // Allocation out of VmaDeviceMemoryBlock.
3229  BlockAllocation m_BlockAllocation;
3230  // Allocation for an object that has its own private VkDeviceMemory.
3231  DedicatedAllocation m_DedicatedAllocation;
3232  };
3233 
3234  void FreeUserDataString(VmaAllocator hAllocator);
3235 };
3236 
3237 /*
3238 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3239 allocated memory block or free.
3240 */
3241 struct VmaSuballocation
3242 {
3243  VkDeviceSize offset;
3244  VkDeviceSize size;
3245  VmaAllocation hAllocation;
3246  VmaSuballocationType type;
3247 };
3248 
3249 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3250 
3251 // Cost of one additional allocation lost, as equivalent in bytes.
3252 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3253 
3254 /*
3255 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3256 
3257 If canMakeOtherLost was false:
3258 - item points to a FREE suballocation.
3259 - itemsToMakeLostCount is 0.
3260 
3261 If canMakeOtherLost was true:
3262 - item points to first of sequence of suballocations, which are either FREE,
3263  or point to VmaAllocations that can become lost.
3264 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3265  the requested allocation to succeed.
3266 */
3267 struct VmaAllocationRequest
3268 {
3269  VkDeviceSize offset;
3270  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3271  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3272  VmaSuballocationList::iterator item;
3273  size_t itemsToMakeLostCount;
3274 
3275  VkDeviceSize CalcCost() const
3276  {
3277  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3278  }
3279 };
3280 
3281 /*
3282 Data structure used for bookkeeping of allocations and unused ranges of memory
3283 in a single VkDeviceMemory block.
3284 */
3285 class VmaBlockMetadata
3286 {
3287 public:
3288  VmaBlockMetadata(VmaAllocator hAllocator);
3289  ~VmaBlockMetadata();
3290  void Init(VkDeviceSize size);
3291 
3292  // Validates all data structures inside this object. If not valid, returns false.
3293  bool Validate() const;
3294  VkDeviceSize GetSize() const { return m_Size; }
3295  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3296  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3297  VkDeviceSize GetUnusedRangeSizeMax() const;
3298  // Returns true if this block is empty - contains only single free suballocation.
3299  bool IsEmpty() const;
3300 
3301  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3302  void AddPoolStats(VmaPoolStats& inoutStats) const;
3303 
3304 #if VMA_STATS_STRING_ENABLED
3305  void PrintDetailedMap(class VmaJsonWriter& json) const;
3306 #endif
3307 
3308  // Creates trivial request for case when block is empty.
3309  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3310 
3311  // Tries to find a place for suballocation with given parameters inside this block.
3312  // If succeeded, fills pAllocationRequest and returns true.
3313  // If failed, returns false.
3314  bool CreateAllocationRequest(
3315  uint32_t currentFrameIndex,
3316  uint32_t frameInUseCount,
3317  VkDeviceSize bufferImageGranularity,
3318  VkDeviceSize allocSize,
3319  VkDeviceSize allocAlignment,
3320  VmaSuballocationType allocType,
3321  bool canMakeOtherLost,
3322  VmaAllocationRequest* pAllocationRequest);
3323 
3324  bool MakeRequestedAllocationsLost(
3325  uint32_t currentFrameIndex,
3326  uint32_t frameInUseCount,
3327  VmaAllocationRequest* pAllocationRequest);
3328 
3329  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3330 
3331  // Makes actual allocation based on request. Request must already be checked and valid.
3332  void Alloc(
3333  const VmaAllocationRequest& request,
3334  VmaSuballocationType type,
3335  VkDeviceSize allocSize,
3336  VmaAllocation hAllocation);
3337 
3338  // Frees suballocation assigned to given memory region.
3339  void Free(const VmaAllocation allocation);
3340 
3341 private:
3342  VkDeviceSize m_Size;
3343  uint32_t m_FreeCount;
3344  VkDeviceSize m_SumFreeSize;
3345  VmaSuballocationList m_Suballocations;
3346  // Suballocations that are free and have size greater than certain threshold.
3347  // Sorted by size, ascending.
3348  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3349 
3350  bool ValidateFreeSuballocationList() const;
3351 
3352  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3353  // If yes, fills pOffset and returns true. If no, returns false.
3354  bool CheckAllocation(
3355  uint32_t currentFrameIndex,
3356  uint32_t frameInUseCount,
3357  VkDeviceSize bufferImageGranularity,
3358  VkDeviceSize allocSize,
3359  VkDeviceSize allocAlignment,
3360  VmaSuballocationType allocType,
3361  VmaSuballocationList::const_iterator suballocItem,
3362  bool canMakeOtherLost,
3363  VkDeviceSize* pOffset,
3364  size_t* itemsToMakeLostCount,
3365  VkDeviceSize* pSumFreeSize,
3366  VkDeviceSize* pSumItemSize) const;
3367  // Given free suballocation, it merges it with following one, which must also be free.
3368  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3369  // Releases given suballocation, making it free.
3370  // Merges it with adjacent free suballocations if applicable.
3371  // Returns iterator to new free suballocation at this place.
3372  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3373  // Given free suballocation, it inserts it into sorted list of
3374  // m_FreeSuballocationsBySize if it's suitable.
3375  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3376  // Given free suballocation, it removes it from sorted list of
3377  // m_FreeSuballocationsBySize if it's suitable.
3378  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3379 };
3380 
3381 // Helper class that represents mapped memory. Synchronized internally.
3382 class VmaDeviceMemoryMapping
3383 {
3384 public:
3385  VmaDeviceMemoryMapping();
3386  ~VmaDeviceMemoryMapping();
3387 
3388  void* GetMappedData() const { return m_pMappedData; }
3389 
3390  // ppData can be null.
3391  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData);
3392  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory);
3393 
3394 private:
3395  VMA_MUTEX m_Mutex;
3396  uint32_t m_MapCount;
3397  void* m_pMappedData;
3398 };
3399 
3400 /*
3401 Represents a single block of device memory (`VkDeviceMemory`) with all the
3402 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3403 
3404 Thread-safety: This class must be externally synchronized.
3405 */
3406 class VmaDeviceMemoryBlock
3407 {
3408 public:
3409  uint32_t m_MemoryTypeIndex;
3410  VkDeviceMemory m_hMemory;
3411  VmaDeviceMemoryMapping m_Mapping;
3412  VmaBlockMetadata m_Metadata;
3413 
3414  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3415 
3416  ~VmaDeviceMemoryBlock()
3417  {
3418  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3419  }
3420 
3421  // Always call after construction.
3422  void Init(
3423  uint32_t newMemoryTypeIndex,
3424  VkDeviceMemory newMemory,
3425  VkDeviceSize newSize);
3426  // Always call before destruction.
3427  void Destroy(VmaAllocator allocator);
3428 
3429  // Validates all data structures inside this object. If not valid, returns false.
3430  bool Validate() const;
3431 
3432  // ppData can be null.
3433  VkResult Map(VmaAllocator hAllocator, void** ppData);
3434  void Unmap(VmaAllocator hAllocator);
3435 };
3436 
3437 struct VmaPointerLess
3438 {
3439  bool operator()(const void* lhs, const void* rhs) const
3440  {
3441  return lhs < rhs;
3442  }
3443 };
3444 
3445 class VmaDefragmentator;
3446 
3447 /*
3448 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3449 Vulkan memory type.
3450 
3451 Synchronized internally with a mutex.
3452 */
3453 struct VmaBlockVector
3454 {
3455  VmaBlockVector(
3456  VmaAllocator hAllocator,
3457  uint32_t memoryTypeIndex,
3458  VkDeviceSize preferredBlockSize,
3459  size_t minBlockCount,
3460  size_t maxBlockCount,
3461  VkDeviceSize bufferImageGranularity,
3462  uint32_t frameInUseCount,
3463  bool isCustomPool);
3464  ~VmaBlockVector();
3465 
3466  VkResult CreateMinBlocks();
3467 
3468  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3469  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3470  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3471  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3472 
3473  void GetPoolStats(VmaPoolStats* pStats);
3474 
3475  bool IsEmpty() const { return m_Blocks.empty(); }
3476 
3477  VkResult Allocate(
3478  VmaPool hCurrentPool,
3479  uint32_t currentFrameIndex,
3480  const VkMemoryRequirements& vkMemReq,
3481  const VmaAllocationCreateInfo& createInfo,
3482  VmaSuballocationType suballocType,
3483  VmaAllocation* pAllocation);
3484 
3485  void Free(
3486  VmaAllocation hAllocation);
3487 
3488  // Adds statistics of this BlockVector to pStats.
3489  void AddStats(VmaStats* pStats);
3490 
3491 #if VMA_STATS_STRING_ENABLED
3492  void PrintDetailedMap(class VmaJsonWriter& json);
3493 #endif
3494 
3495  void MakePoolAllocationsLost(
3496  uint32_t currentFrameIndex,
3497  size_t* pLostAllocationCount);
3498 
3499  VmaDefragmentator* EnsureDefragmentator(
3500  VmaAllocator hAllocator,
3501  uint32_t currentFrameIndex);
3502 
3503  VkResult Defragment(
3504  VmaDefragmentationStats* pDefragmentationStats,
3505  VkDeviceSize& maxBytesToMove,
3506  uint32_t& maxAllocationsToMove);
3507 
3508  void DestroyDefragmentator();
3509 
3510 private:
3511  friend class VmaDefragmentator;
3512 
3513  const VmaAllocator m_hAllocator;
3514  const uint32_t m_MemoryTypeIndex;
3515  const VkDeviceSize m_PreferredBlockSize;
3516  const size_t m_MinBlockCount;
3517  const size_t m_MaxBlockCount;
3518  const VkDeviceSize m_BufferImageGranularity;
3519  const uint32_t m_FrameInUseCount;
3520  const bool m_IsCustomPool;
3521  VMA_MUTEX m_Mutex;
3522  // Incrementally sorted by sumFreeSize, ascending.
3523  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3524  /* There can be at most one allocation that is completely empty - a
3525  hysteresis to avoid pessimistic case of alternating creation and destruction
3526  of a VkDeviceMemory. */
3527  bool m_HasEmptyBlock;
3528  VmaDefragmentator* m_pDefragmentator;
3529 
3530  // Finds and removes given block from vector.
3531  void Remove(VmaDeviceMemoryBlock* pBlock);
3532 
3533  // Performs single step in sorting m_Blocks. They may not be fully sorted
3534  // after this call.
3535  void IncrementallySortBlocks();
3536 
3537  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3538 };
3539 
3540 struct VmaPool_T
3541 {
3542 public:
3543  VmaBlockVector m_BlockVector;
3544 
3545  // Takes ownership.
3546  VmaPool_T(
3547  VmaAllocator hAllocator,
3548  const VmaPoolCreateInfo& createInfo);
3549  ~VmaPool_T();
3550 
3551  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3552 
3553 #if VMA_STATS_STRING_ENABLED
3554  //void PrintDetailedMap(class VmaStringBuilder& sb);
3555 #endif
3556 };
3557 
3558 class VmaDefragmentator
3559 {
3560  const VmaAllocator m_hAllocator;
3561  VmaBlockVector* const m_pBlockVector;
3562  uint32_t m_CurrentFrameIndex;
3563  VkDeviceSize m_BytesMoved;
3564  uint32_t m_AllocationsMoved;
3565 
3566  struct AllocationInfo
3567  {
3568  VmaAllocation m_hAllocation;
3569  VkBool32* m_pChanged;
3570 
3571  AllocationInfo() :
3572  m_hAllocation(VK_NULL_HANDLE),
3573  m_pChanged(VMA_NULL)
3574  {
3575  }
3576  };
3577 
3578  struct AllocationInfoSizeGreater
3579  {
3580  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3581  {
3582  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3583  }
3584  };
3585 
3586  // Used between AddAllocation and Defragment.
3587  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3588 
3589  struct BlockInfo
3590  {
3591  VmaDeviceMemoryBlock* m_pBlock;
3592  bool m_HasNonMovableAllocations;
3593  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3594 
3595  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3596  m_pBlock(VMA_NULL),
3597  m_HasNonMovableAllocations(true),
3598  m_Allocations(pAllocationCallbacks),
3599  m_pMappedDataForDefragmentation(VMA_NULL)
3600  {
3601  }
3602 
3603  void CalcHasNonMovableAllocations()
3604  {
3605  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3606  const size_t defragmentAllocCount = m_Allocations.size();
3607  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3608  }
3609 
3610  void SortAllocationsBySizeDescecnding()
3611  {
3612  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3613  }
3614 
3615  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3616  void Unmap(VmaAllocator hAllocator);
3617 
3618  private:
3619  // Not null if mapped for defragmentation only, not originally mapped.
3620  void* m_pMappedDataForDefragmentation;
3621  };
3622 
3623  struct BlockPointerLess
3624  {
3625  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3626  {
3627  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3628  }
3629  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3630  {
3631  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3632  }
3633  };
3634 
3635  // 1. Blocks with some non-movable allocations go first.
3636  // 2. Blocks with smaller sumFreeSize go first.
3637  struct BlockInfoCompareMoveDestination
3638  {
3639  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3640  {
3641  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3642  {
3643  return true;
3644  }
3645  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3646  {
3647  return false;
3648  }
3649  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3650  {
3651  return true;
3652  }
3653  return false;
3654  }
3655  };
3656 
3657  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3658  BlockInfoVector m_Blocks;
3659 
3660  VkResult DefragmentRound(
3661  VkDeviceSize maxBytesToMove,
3662  uint32_t maxAllocationsToMove);
3663 
3664  static bool MoveMakesSense(
3665  size_t dstBlockIndex, VkDeviceSize dstOffset,
3666  size_t srcBlockIndex, VkDeviceSize srcOffset);
3667 
3668 public:
3669  VmaDefragmentator(
3670  VmaAllocator hAllocator,
3671  VmaBlockVector* pBlockVector,
3672  uint32_t currentFrameIndex);
3673 
3674  ~VmaDefragmentator();
3675 
3676  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3677  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3678 
3679  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3680 
3681  VkResult Defragment(
3682  VkDeviceSize maxBytesToMove,
3683  uint32_t maxAllocationsToMove);
3684 };
3685 
3686 // Main allocator object.
3687 struct VmaAllocator_T
3688 {
3689  bool m_UseMutex;
3690  bool m_UseKhrDedicatedAllocation;
3691  VkDevice m_hDevice;
3692  bool m_AllocationCallbacksSpecified;
3693  VkAllocationCallbacks m_AllocationCallbacks;
3694  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3695 
3696  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3697  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3698  VMA_MUTEX m_HeapSizeLimitMutex;
3699 
3700  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3701  VkPhysicalDeviceMemoryProperties m_MemProps;
3702 
3703  // Default pools.
3704  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3705 
3706  // Each vector is sorted by memory (handle value).
3707  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
3708  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
3709  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
3710 
3711  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
3712  ~VmaAllocator_T();
3713 
3714  const VkAllocationCallbacks* GetAllocationCallbacks() const
3715  {
3716  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
3717  }
3718  const VmaVulkanFunctions& GetVulkanFunctions() const
3719  {
3720  return m_VulkanFunctions;
3721  }
3722 
3723  VkDeviceSize GetBufferImageGranularity() const
3724  {
3725  return VMA_MAX(
3726  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
3727  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
3728  }
3729 
3730  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
3731  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
3732 
3733  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
3734  {
3735  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
3736  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3737  }
3738 
3739  void GetBufferMemoryRequirements(
3740  VkBuffer hBuffer,
3741  VkMemoryRequirements& memReq,
3742  bool& requiresDedicatedAllocation,
3743  bool& prefersDedicatedAllocation) const;
3744  void GetImageMemoryRequirements(
3745  VkImage hImage,
3746  VkMemoryRequirements& memReq,
3747  bool& requiresDedicatedAllocation,
3748  bool& prefersDedicatedAllocation) const;
3749 
3750  // Main allocation function.
3751  VkResult AllocateMemory(
3752  const VkMemoryRequirements& vkMemReq,
3753  bool requiresDedicatedAllocation,
3754  bool prefersDedicatedAllocation,
3755  VkBuffer dedicatedBuffer,
3756  VkImage dedicatedImage,
3757  const VmaAllocationCreateInfo& createInfo,
3758  VmaSuballocationType suballocType,
3759  VmaAllocation* pAllocation);
3760 
3761  // Main deallocation function.
3762  void FreeMemory(const VmaAllocation allocation);
3763 
3764  void CalculateStats(VmaStats* pStats);
3765 
3766 #if VMA_STATS_STRING_ENABLED
3767  void PrintDetailedMap(class VmaJsonWriter& json);
3768 #endif
3769 
3770  VkResult Defragment(
3771  VmaAllocation* pAllocations,
3772  size_t allocationCount,
3773  VkBool32* pAllocationsChanged,
3774  const VmaDefragmentationInfo* pDefragmentationInfo,
3775  VmaDefragmentationStats* pDefragmentationStats);
3776 
3777  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
3778 
3779  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
3780  void DestroyPool(VmaPool pool);
3781  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
3782 
3783  void SetCurrentFrameIndex(uint32_t frameIndex);
3784 
3785  void MakePoolAllocationsLost(
3786  VmaPool hPool,
3787  size_t* pLostAllocationCount);
3788 
3789  void CreateLostAllocation(VmaAllocation* pAllocation);
3790 
3791  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
3792  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
3793 
3794  VkResult Map(VmaAllocation hAllocation, void** ppData);
3795  void Unmap(VmaAllocation hAllocation);
3796 
3797 private:
3798  VkDeviceSize m_PreferredLargeHeapBlockSize;
3799  VkDeviceSize m_PreferredSmallHeapBlockSize;
3800 
3801  VkPhysicalDevice m_PhysicalDevice;
3802  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
3803 
3804  VMA_MUTEX m_PoolsMutex;
3805  // Protected by m_PoolsMutex. Sorted by pointer value.
3806  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
3807 
3808  VmaVulkanFunctions m_VulkanFunctions;
3809 
3810  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
3811 
3812  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
3813 
3814  VkResult AllocateMemoryOfType(
3815  const VkMemoryRequirements& vkMemReq,
3816  bool dedicatedAllocation,
3817  VkBuffer dedicatedBuffer,
3818  VkImage dedicatedImage,
3819  const VmaAllocationCreateInfo& createInfo,
3820  uint32_t memTypeIndex,
3821  VmaSuballocationType suballocType,
3822  VmaAllocation* pAllocation);
3823 
3824  // Allocates and registers new VkDeviceMemory specifically for single allocation.
3825  VkResult AllocateDedicatedMemory(
3826  VkDeviceSize size,
3827  VmaSuballocationType suballocType,
3828  uint32_t memTypeIndex,
3829  bool map,
3830  bool isUserDataString,
3831  void* pUserData,
3832  VkBuffer dedicatedBuffer,
3833  VkImage dedicatedImage,
3834  VmaAllocation* pAllocation);
3835 
3836  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
3837  void FreeDedicatedMemory(VmaAllocation allocation);
3838 };
3839 
3841 // Memory allocation #2 after VmaAllocator_T definition
3842 
3843 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
3844 {
3845  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
3846 }
3847 
3848 static void VmaFree(VmaAllocator hAllocator, void* ptr)
3849 {
3850  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
3851 }
3852 
3853 template<typename T>
3854 static T* VmaAllocate(VmaAllocator hAllocator)
3855 {
3856  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
3857 }
3858 
3859 template<typename T>
3860 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
3861 {
3862  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
3863 }
3864 
3865 template<typename T>
3866 static void vma_delete(VmaAllocator hAllocator, T* ptr)
3867 {
3868  if(ptr != VMA_NULL)
3869  {
3870  ptr->~T();
3871  VmaFree(hAllocator, ptr);
3872  }
3873 }
3874 
3875 template<typename T>
3876 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
3877 {
3878  if(ptr != VMA_NULL)
3879  {
3880  for(size_t i = count; i--; )
3881  ptr[i].~T();
3882  VmaFree(hAllocator, ptr);
3883  }
3884 }
3885 
3887 // VmaStringBuilder
3888 
3889 #if VMA_STATS_STRING_ENABLED
3890 
3891 class VmaStringBuilder
3892 {
3893 public:
3894  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
3895  size_t GetLength() const { return m_Data.size(); }
3896  const char* GetData() const { return m_Data.data(); }
3897 
3898  void Add(char ch) { m_Data.push_back(ch); }
3899  void Add(const char* pStr);
3900  void AddNewLine() { Add('\n'); }
3901  void AddNumber(uint32_t num);
3902  void AddNumber(uint64_t num);
3903  void AddPointer(const void* ptr);
3904 
3905 private:
3906  VmaVector< char, VmaStlAllocator<char> > m_Data;
3907 };
3908 
3909 void VmaStringBuilder::Add(const char* pStr)
3910 {
3911  const size_t strLen = strlen(pStr);
3912  if(strLen > 0)
3913  {
3914  const size_t oldCount = m_Data.size();
3915  m_Data.resize(oldCount + strLen);
3916  memcpy(m_Data.data() + oldCount, pStr, strLen);
3917  }
3918 }
3919 
3920 void VmaStringBuilder::AddNumber(uint32_t num)
3921 {
3922  char buf[11];
3923  VmaUint32ToStr(buf, sizeof(buf), num);
3924  Add(buf);
3925 }
3926 
3927 void VmaStringBuilder::AddNumber(uint64_t num)
3928 {
3929  char buf[21];
3930  VmaUint64ToStr(buf, sizeof(buf), num);
3931  Add(buf);
3932 }
3933 
3934 void VmaStringBuilder::AddPointer(const void* ptr)
3935 {
3936  char buf[21];
3937  VmaPtrToStr(buf, sizeof(buf), ptr);
3938  Add(buf);
3939 }
3940 
3941 #endif // #if VMA_STATS_STRING_ENABLED
3942 
3944 // VmaJsonWriter
3945 
3946 #if VMA_STATS_STRING_ENABLED
3947 
3948 class VmaJsonWriter
3949 {
3950 public:
3951  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
3952  ~VmaJsonWriter();
3953 
3954  void BeginObject(bool singleLine = false);
3955  void EndObject();
3956 
3957  void BeginArray(bool singleLine = false);
3958  void EndArray();
3959 
3960  void WriteString(const char* pStr);
3961  void BeginString(const char* pStr = VMA_NULL);
3962  void ContinueString(const char* pStr);
3963  void ContinueString(uint32_t n);
3964  void ContinueString(uint64_t n);
3965  void ContinueString_Pointer(const void* ptr);
3966  void EndString(const char* pStr = VMA_NULL);
3967 
3968  void WriteNumber(uint32_t n);
3969  void WriteNumber(uint64_t n);
3970  void WriteBool(bool b);
3971  void WriteNull();
3972 
3973 private:
3974  static const char* const INDENT;
3975 
3976  enum COLLECTION_TYPE
3977  {
3978  COLLECTION_TYPE_OBJECT,
3979  COLLECTION_TYPE_ARRAY,
3980  };
3981  struct StackItem
3982  {
3983  COLLECTION_TYPE type;
3984  uint32_t valueCount;
3985  bool singleLineMode;
3986  };
3987 
3988  VmaStringBuilder& m_SB;
3989  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
3990  bool m_InsideString;
3991 
3992  void BeginValue(bool isString);
3993  void WriteIndent(bool oneLess = false);
3994 };
3995 
3996 const char* const VmaJsonWriter::INDENT = " ";
3997 
3998 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
3999  m_SB(sb),
4000  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4001  m_InsideString(false)
4002 {
4003 }
4004 
4005 VmaJsonWriter::~VmaJsonWriter()
4006 {
4007  VMA_ASSERT(!m_InsideString);
4008  VMA_ASSERT(m_Stack.empty());
4009 }
4010 
4011 void VmaJsonWriter::BeginObject(bool singleLine)
4012 {
4013  VMA_ASSERT(!m_InsideString);
4014 
4015  BeginValue(false);
4016  m_SB.Add('{');
4017 
4018  StackItem item;
4019  item.type = COLLECTION_TYPE_OBJECT;
4020  item.valueCount = 0;
4021  item.singleLineMode = singleLine;
4022  m_Stack.push_back(item);
4023 }
4024 
4025 void VmaJsonWriter::EndObject()
4026 {
4027  VMA_ASSERT(!m_InsideString);
4028 
4029  WriteIndent(true);
4030  m_SB.Add('}');
4031 
4032  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4033  m_Stack.pop_back();
4034 }
4035 
4036 void VmaJsonWriter::BeginArray(bool singleLine)
4037 {
4038  VMA_ASSERT(!m_InsideString);
4039 
4040  BeginValue(false);
4041  m_SB.Add('[');
4042 
4043  StackItem item;
4044  item.type = COLLECTION_TYPE_ARRAY;
4045  item.valueCount = 0;
4046  item.singleLineMode = singleLine;
4047  m_Stack.push_back(item);
4048 }
4049 
4050 void VmaJsonWriter::EndArray()
4051 {
4052  VMA_ASSERT(!m_InsideString);
4053 
4054  WriteIndent(true);
4055  m_SB.Add(']');
4056 
4057  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4058  m_Stack.pop_back();
4059 }
4060 
4061 void VmaJsonWriter::WriteString(const char* pStr)
4062 {
4063  BeginString(pStr);
4064  EndString();
4065 }
4066 
4067 void VmaJsonWriter::BeginString(const char* pStr)
4068 {
4069  VMA_ASSERT(!m_InsideString);
4070 
4071  BeginValue(true);
4072  m_SB.Add('"');
4073  m_InsideString = true;
4074  if(pStr != VMA_NULL && pStr[0] != '\0')
4075  {
4076  ContinueString(pStr);
4077  }
4078 }
4079 
4080 void VmaJsonWriter::ContinueString(const char* pStr)
4081 {
4082  VMA_ASSERT(m_InsideString);
4083 
4084  const size_t strLen = strlen(pStr);
4085  for(size_t i = 0; i < strLen; ++i)
4086  {
4087  char ch = pStr[i];
4088  if(ch == '\'')
4089  {
4090  m_SB.Add("\\\\");
4091  }
4092  else if(ch == '"')
4093  {
4094  m_SB.Add("\\\"");
4095  }
4096  else if(ch >= 32)
4097  {
4098  m_SB.Add(ch);
4099  }
4100  else switch(ch)
4101  {
4102  case '\b':
4103  m_SB.Add("\\b");
4104  break;
4105  case '\f':
4106  m_SB.Add("\\f");
4107  break;
4108  case '\n':
4109  m_SB.Add("\\n");
4110  break;
4111  case '\r':
4112  m_SB.Add("\\r");
4113  break;
4114  case '\t':
4115  m_SB.Add("\\t");
4116  break;
4117  default:
4118  VMA_ASSERT(0 && "Character not currently supported.");
4119  break;
4120  }
4121  }
4122 }
4123 
4124 void VmaJsonWriter::ContinueString(uint32_t n)
4125 {
4126  VMA_ASSERT(m_InsideString);
4127  m_SB.AddNumber(n);
4128 }
4129 
4130 void VmaJsonWriter::ContinueString(uint64_t n)
4131 {
4132  VMA_ASSERT(m_InsideString);
4133  m_SB.AddNumber(n);
4134 }
4135 
4136 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4137 {
4138  VMA_ASSERT(m_InsideString);
4139  m_SB.AddPointer(ptr);
4140 }
4141 
4142 void VmaJsonWriter::EndString(const char* pStr)
4143 {
4144  VMA_ASSERT(m_InsideString);
4145  if(pStr != VMA_NULL && pStr[0] != '\0')
4146  {
4147  ContinueString(pStr);
4148  }
4149  m_SB.Add('"');
4150  m_InsideString = false;
4151 }
4152 
4153 void VmaJsonWriter::WriteNumber(uint32_t n)
4154 {
4155  VMA_ASSERT(!m_InsideString);
4156  BeginValue(false);
4157  m_SB.AddNumber(n);
4158 }
4159 
4160 void VmaJsonWriter::WriteNumber(uint64_t n)
4161 {
4162  VMA_ASSERT(!m_InsideString);
4163  BeginValue(false);
4164  m_SB.AddNumber(n);
4165 }
4166 
4167 void VmaJsonWriter::WriteBool(bool b)
4168 {
4169  VMA_ASSERT(!m_InsideString);
4170  BeginValue(false);
4171  m_SB.Add(b ? "true" : "false");
4172 }
4173 
4174 void VmaJsonWriter::WriteNull()
4175 {
4176  VMA_ASSERT(!m_InsideString);
4177  BeginValue(false);
4178  m_SB.Add("null");
4179 }
4180 
4181 void VmaJsonWriter::BeginValue(bool isString)
4182 {
4183  if(!m_Stack.empty())
4184  {
4185  StackItem& currItem = m_Stack.back();
4186  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4187  currItem.valueCount % 2 == 0)
4188  {
4189  VMA_ASSERT(isString);
4190  }
4191 
4192  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4193  currItem.valueCount % 2 != 0)
4194  {
4195  m_SB.Add(": ");
4196  }
4197  else if(currItem.valueCount > 0)
4198  {
4199  m_SB.Add(", ");
4200  WriteIndent();
4201  }
4202  else
4203  {
4204  WriteIndent();
4205  }
4206  ++currItem.valueCount;
4207  }
4208 }
4209 
4210 void VmaJsonWriter::WriteIndent(bool oneLess)
4211 {
4212  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4213  {
4214  m_SB.AddNewLine();
4215 
4216  size_t count = m_Stack.size();
4217  if(count > 0 && oneLess)
4218  {
4219  --count;
4220  }
4221  for(size_t i = 0; i < count; ++i)
4222  {
4223  m_SB.Add(INDENT);
4224  }
4225  }
4226 }
4227 
4228 #endif // #if VMA_STATS_STRING_ENABLED
4229 
4231 
4232 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4233 {
4234  if(IsUserDataString())
4235  {
4236  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4237 
4238  FreeUserDataString(hAllocator);
4239 
4240  if(pUserData != VMA_NULL)
4241  {
4242  const char* const newStrSrc = (char*)pUserData;
4243  const size_t newStrLen = strlen(newStrSrc);
4244  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4245  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4246  m_pUserData = newStrDst;
4247  }
4248  }
4249  else
4250  {
4251  m_pUserData = pUserData;
4252  }
4253 }
4254 
4255 VkDeviceSize VmaAllocation_T::GetOffset() const
4256 {
4257  switch(m_Type)
4258  {
4259  case ALLOCATION_TYPE_BLOCK:
4260  return m_BlockAllocation.m_Offset;
4261  case ALLOCATION_TYPE_DEDICATED:
4262  return 0;
4263  default:
4264  VMA_ASSERT(0);
4265  return 0;
4266  }
4267 }
4268 
4269 VkDeviceMemory VmaAllocation_T::GetMemory() const
4270 {
4271  switch(m_Type)
4272  {
4273  case ALLOCATION_TYPE_BLOCK:
4274  return m_BlockAllocation.m_Block->m_hMemory;
4275  case ALLOCATION_TYPE_DEDICATED:
4276  return m_DedicatedAllocation.m_hMemory;
4277  default:
4278  VMA_ASSERT(0);
4279  return VK_NULL_HANDLE;
4280  }
4281 }
4282 
4283 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4284 {
4285  switch(m_Type)
4286  {
4287  case ALLOCATION_TYPE_BLOCK:
4288  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4289  case ALLOCATION_TYPE_DEDICATED:
4290  return m_DedicatedAllocation.m_MemoryTypeIndex;
4291  default:
4292  VMA_ASSERT(0);
4293  return UINT32_MAX;
4294  }
4295 }
4296 
4297 void* VmaAllocation_T::GetMappedData() const
4298 {
4299  switch(m_Type)
4300  {
4301  case ALLOCATION_TYPE_BLOCK:
4302  if(m_MapCount != 0)
4303  {
4304  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4305  VMA_ASSERT(pBlockData != VMA_NULL);
4306  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4307  }
4308  else
4309  {
4310  return VMA_NULL;
4311  }
4312  break;
4313  case ALLOCATION_TYPE_DEDICATED:
4314  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4315  return m_DedicatedAllocation.m_pMappedData;
4316  default:
4317  VMA_ASSERT(0);
4318  return VMA_NULL;
4319  }
4320 }
4321 
4322 bool VmaAllocation_T::CanBecomeLost() const
4323 {
4324  switch(m_Type)
4325  {
4326  case ALLOCATION_TYPE_BLOCK:
4327  return m_BlockAllocation.m_CanBecomeLost;
4328  case ALLOCATION_TYPE_DEDICATED:
4329  return false;
4330  default:
4331  VMA_ASSERT(0);
4332  return false;
4333  }
4334 }
4335 
4336 VmaPool VmaAllocation_T::GetPool() const
4337 {
4338  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4339  return m_BlockAllocation.m_hPool;
4340 }
4341 
4342 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4343 {
4344  VMA_ASSERT(CanBecomeLost());
4345 
4346  /*
4347  Warning: This is a carefully designed algorithm.
4348  Do not modify unless you really know what you're doing :)
4349  */
4350  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4351  for(;;)
4352  {
4353  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4354  {
4355  VMA_ASSERT(0);
4356  return false;
4357  }
4358  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4359  {
4360  return false;
4361  }
4362  else // Last use time earlier than current time.
4363  {
4364  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4365  {
4366  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4367  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4368  return true;
4369  }
4370  }
4371  }
4372 }
4373 
4374 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4375 {
4376  VMA_ASSERT(IsUserDataString());
4377  if(m_pUserData != VMA_NULL)
4378  {
4379  char* const oldStr = (char*)m_pUserData;
4380  const size_t oldStrLen = strlen(oldStr);
4381  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4382  m_pUserData = VMA_NULL;
4383  }
4384 }
4385 
4386 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4387 {
4388  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4389 
4390  if(m_MapCount != 0)
4391  {
4392  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4393  {
4394  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4395  *ppData = m_DedicatedAllocation.m_pMappedData;
4396  ++m_MapCount;
4397  return VK_SUCCESS;
4398  }
4399  else
4400  {
4401  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4402  return VK_ERROR_MEMORY_MAP_FAILED;
4403  }
4404  }
4405  else
4406  {
4407  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4408  hAllocator->m_hDevice,
4409  m_DedicatedAllocation.m_hMemory,
4410  0, // offset
4411  VK_WHOLE_SIZE,
4412  0, // flags
4413  ppData);
4414  if(result == VK_SUCCESS)
4415  {
4416  m_DedicatedAllocation.m_pMappedData = *ppData;
4417  m_MapCount = 1;
4418  }
4419  return result;
4420  }
4421 }
4422 
4423 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4424 {
4425  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4426 
4427  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4428  {
4429  --m_MapCount;
4430  if(m_MapCount == 0)
4431  {
4432  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4433  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4434  hAllocator->m_hDevice,
4435  m_DedicatedAllocation.m_hMemory);
4436  }
4437  }
4438  else
4439  {
4440  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4441  }
4442 }
4443 
4444 #if VMA_STATS_STRING_ENABLED
4445 
4446 // Correspond to values of enum VmaSuballocationType.
4447 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4448  "FREE",
4449  "UNKNOWN",
4450  "BUFFER",
4451  "IMAGE_UNKNOWN",
4452  "IMAGE_LINEAR",
4453  "IMAGE_OPTIMAL",
4454 };
4455 
4456 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4457 {
4458  json.BeginObject();
4459 
4460  json.WriteString("Blocks");
4461  json.WriteNumber(stat.blockCount);
4462 
4463  json.WriteString("Allocations");
4464  json.WriteNumber(stat.allocationCount);
4465 
4466  json.WriteString("UnusedRanges");
4467  json.WriteNumber(stat.unusedRangeCount);
4468 
4469  json.WriteString("UsedBytes");
4470  json.WriteNumber(stat.usedBytes);
4471 
4472  json.WriteString("UnusedBytes");
4473  json.WriteNumber(stat.unusedBytes);
4474 
4475  if(stat.allocationCount > 1)
4476  {
4477  json.WriteString("AllocationSize");
4478  json.BeginObject(true);
4479  json.WriteString("Min");
4480  json.WriteNumber(stat.allocationSizeMin);
4481  json.WriteString("Avg");
4482  json.WriteNumber(stat.allocationSizeAvg);
4483  json.WriteString("Max");
4484  json.WriteNumber(stat.allocationSizeMax);
4485  json.EndObject();
4486  }
4487 
4488  if(stat.unusedRangeCount > 1)
4489  {
4490  json.WriteString("UnusedRangeSize");
4491  json.BeginObject(true);
4492  json.WriteString("Min");
4493  json.WriteNumber(stat.unusedRangeSizeMin);
4494  json.WriteString("Avg");
4495  json.WriteNumber(stat.unusedRangeSizeAvg);
4496  json.WriteString("Max");
4497  json.WriteNumber(stat.unusedRangeSizeMax);
4498  json.EndObject();
4499  }
4500 
4501  json.EndObject();
4502 }
4503 
4504 #endif // #if VMA_STATS_STRING_ENABLED
4505 
4506 struct VmaSuballocationItemSizeLess
4507 {
4508  bool operator()(
4509  const VmaSuballocationList::iterator lhs,
4510  const VmaSuballocationList::iterator rhs) const
4511  {
4512  return lhs->size < rhs->size;
4513  }
4514  bool operator()(
4515  const VmaSuballocationList::iterator lhs,
4516  VkDeviceSize rhsSize) const
4517  {
4518  return lhs->size < rhsSize;
4519  }
4520 };
4521 
4523 // class VmaBlockMetadata
4524 
4525 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4526  m_Size(0),
4527  m_FreeCount(0),
4528  m_SumFreeSize(0),
4529  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4530  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4531 {
4532 }
4533 
4534 VmaBlockMetadata::~VmaBlockMetadata()
4535 {
4536 }
4537 
4538 void VmaBlockMetadata::Init(VkDeviceSize size)
4539 {
4540  m_Size = size;
4541  m_FreeCount = 1;
4542  m_SumFreeSize = size;
4543 
4544  VmaSuballocation suballoc = {};
4545  suballoc.offset = 0;
4546  suballoc.size = size;
4547  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4548  suballoc.hAllocation = VK_NULL_HANDLE;
4549 
4550  m_Suballocations.push_back(suballoc);
4551  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4552  --suballocItem;
4553  m_FreeSuballocationsBySize.push_back(suballocItem);
4554 }
4555 
4556 bool VmaBlockMetadata::Validate() const
4557 {
4558  if(m_Suballocations.empty())
4559  {
4560  return false;
4561  }
4562 
4563  // Expected offset of new suballocation as calculates from previous ones.
4564  VkDeviceSize calculatedOffset = 0;
4565  // Expected number of free suballocations as calculated from traversing their list.
4566  uint32_t calculatedFreeCount = 0;
4567  // Expected sum size of free suballocations as calculated from traversing their list.
4568  VkDeviceSize calculatedSumFreeSize = 0;
4569  // Expected number of free suballocations that should be registered in
4570  // m_FreeSuballocationsBySize calculated from traversing their list.
4571  size_t freeSuballocationsToRegister = 0;
4572  // True if previous visisted suballocation was free.
4573  bool prevFree = false;
4574 
4575  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4576  suballocItem != m_Suballocations.cend();
4577  ++suballocItem)
4578  {
4579  const VmaSuballocation& subAlloc = *suballocItem;
4580 
4581  // Actual offset of this suballocation doesn't match expected one.
4582  if(subAlloc.offset != calculatedOffset)
4583  {
4584  return false;
4585  }
4586 
4587  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4588  // Two adjacent free suballocations are invalid. They should be merged.
4589  if(prevFree && currFree)
4590  {
4591  return false;
4592  }
4593  prevFree = currFree;
4594 
4595  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4596  {
4597  return false;
4598  }
4599 
4600  if(currFree)
4601  {
4602  calculatedSumFreeSize += subAlloc.size;
4603  ++calculatedFreeCount;
4604  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4605  {
4606  ++freeSuballocationsToRegister;
4607  }
4608  }
4609 
4610  calculatedOffset += subAlloc.size;
4611  }
4612 
4613  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4614  // match expected one.
4615  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4616  {
4617  return false;
4618  }
4619 
4620  VkDeviceSize lastSize = 0;
4621  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4622  {
4623  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4624 
4625  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4626  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4627  {
4628  return false;
4629  }
4630  // They must be sorted by size ascending.
4631  if(suballocItem->size < lastSize)
4632  {
4633  return false;
4634  }
4635 
4636  lastSize = suballocItem->size;
4637  }
4638 
4639  // Check if totals match calculacted values.
4640  return
4641  ValidateFreeSuballocationList() &&
4642  (calculatedOffset == m_Size) &&
4643  (calculatedSumFreeSize == m_SumFreeSize) &&
4644  (calculatedFreeCount == m_FreeCount);
4645 }
4646 
4647 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
4648 {
4649  if(!m_FreeSuballocationsBySize.empty())
4650  {
4651  return m_FreeSuballocationsBySize.back()->size;
4652  }
4653  else
4654  {
4655  return 0;
4656  }
4657 }
4658 
4659 bool VmaBlockMetadata::IsEmpty() const
4660 {
4661  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
4662 }
4663 
4664 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
4665 {
4666  outInfo.blockCount = 1;
4667 
4668  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4669  outInfo.allocationCount = rangeCount - m_FreeCount;
4670  outInfo.unusedRangeCount = m_FreeCount;
4671 
4672  outInfo.unusedBytes = m_SumFreeSize;
4673  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
4674 
4675  outInfo.allocationSizeMin = UINT64_MAX;
4676  outInfo.allocationSizeMax = 0;
4677  outInfo.unusedRangeSizeMin = UINT64_MAX;
4678  outInfo.unusedRangeSizeMax = 0;
4679 
4680  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4681  suballocItem != m_Suballocations.cend();
4682  ++suballocItem)
4683  {
4684  const VmaSuballocation& suballoc = *suballocItem;
4685  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
4686  {
4687  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
4688  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
4689  }
4690  else
4691  {
4692  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
4693  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
4694  }
4695  }
4696 }
4697 
4698 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
4699 {
4700  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
4701 
4702  inoutStats.size += m_Size;
4703  inoutStats.unusedSize += m_SumFreeSize;
4704  inoutStats.allocationCount += rangeCount - m_FreeCount;
4705  inoutStats.unusedRangeCount += m_FreeCount;
4706  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
4707 }
4708 
4709 #if VMA_STATS_STRING_ENABLED
4710 
4711 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
4712 {
4713  json.BeginObject();
4714 
4715  json.WriteString("TotalBytes");
4716  json.WriteNumber(m_Size);
4717 
4718  json.WriteString("UnusedBytes");
4719  json.WriteNumber(m_SumFreeSize);
4720 
4721  json.WriteString("Allocations");
4722  json.WriteNumber(m_Suballocations.size() - m_FreeCount);
4723 
4724  json.WriteString("UnusedRanges");
4725  json.WriteNumber(m_FreeCount);
4726 
4727  json.WriteString("Suballocations");
4728  json.BeginArray();
4729  size_t i = 0;
4730  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4731  suballocItem != m_Suballocations.cend();
4732  ++suballocItem, ++i)
4733  {
4734  json.BeginObject(true);
4735 
4736  json.WriteString("Type");
4737  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
4738 
4739  json.WriteString("Size");
4740  json.WriteNumber(suballocItem->size);
4741 
4742  json.WriteString("Offset");
4743  json.WriteNumber(suballocItem->offset);
4744 
4745  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4746  {
4747  const void* pUserData = suballocItem->hAllocation->GetUserData();
4748  if(pUserData != VMA_NULL)
4749  {
4750  json.WriteString("UserData");
4751  if(suballocItem->hAllocation->IsUserDataString())
4752  {
4753  json.WriteString((const char*)pUserData);
4754  }
4755  else
4756  {
4757  json.BeginString();
4758  json.ContinueString_Pointer(pUserData);
4759  json.EndString();
4760  }
4761  }
4762  }
4763 
4764  json.EndObject();
4765  }
4766  json.EndArray();
4767 
4768  json.EndObject();
4769 }
4770 
4771 #endif // #if VMA_STATS_STRING_ENABLED
4772 
4773 /*
4774 How many suitable free suballocations to analyze before choosing best one.
4775 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
4776  be chosen.
4777 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
4778  suballocations will be analized and best one will be chosen.
4779 - Any other value is also acceptable.
4780 */
4781 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
4782 
4783 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
4784 {
4785  VMA_ASSERT(IsEmpty());
4786  pAllocationRequest->offset = 0;
4787  pAllocationRequest->sumFreeSize = m_SumFreeSize;
4788  pAllocationRequest->sumItemSize = 0;
4789  pAllocationRequest->item = m_Suballocations.begin();
4790  pAllocationRequest->itemsToMakeLostCount = 0;
4791 }
4792 
4793 bool VmaBlockMetadata::CreateAllocationRequest(
4794  uint32_t currentFrameIndex,
4795  uint32_t frameInUseCount,
4796  VkDeviceSize bufferImageGranularity,
4797  VkDeviceSize allocSize,
4798  VkDeviceSize allocAlignment,
4799  VmaSuballocationType allocType,
4800  bool canMakeOtherLost,
4801  VmaAllocationRequest* pAllocationRequest)
4802 {
4803  VMA_ASSERT(allocSize > 0);
4804  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
4805  VMA_ASSERT(pAllocationRequest != VMA_NULL);
4806  VMA_HEAVY_ASSERT(Validate());
4807 
4808  // There is not enough total free space in this block to fullfill the request: Early return.
4809  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
4810  {
4811  return false;
4812  }
4813 
4814  // New algorithm, efficiently searching freeSuballocationsBySize.
4815  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
4816  if(freeSuballocCount > 0)
4817  {
4818  if(VMA_BEST_FIT)
4819  {
4820  // Find first free suballocation with size not less than allocSize.
4821  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
4822  m_FreeSuballocationsBySize.data(),
4823  m_FreeSuballocationsBySize.data() + freeSuballocCount,
4824  allocSize,
4825  VmaSuballocationItemSizeLess());
4826  size_t index = it - m_FreeSuballocationsBySize.data();
4827  for(; index < freeSuballocCount; ++index)
4828  {
4829  if(CheckAllocation(
4830  currentFrameIndex,
4831  frameInUseCount,
4832  bufferImageGranularity,
4833  allocSize,
4834  allocAlignment,
4835  allocType,
4836  m_FreeSuballocationsBySize[index],
4837  false, // canMakeOtherLost
4838  &pAllocationRequest->offset,
4839  &pAllocationRequest->itemsToMakeLostCount,
4840  &pAllocationRequest->sumFreeSize,
4841  &pAllocationRequest->sumItemSize))
4842  {
4843  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4844  return true;
4845  }
4846  }
4847  }
4848  else
4849  {
4850  // Search staring from biggest suballocations.
4851  for(size_t index = freeSuballocCount; index--; )
4852  {
4853  if(CheckAllocation(
4854  currentFrameIndex,
4855  frameInUseCount,
4856  bufferImageGranularity,
4857  allocSize,
4858  allocAlignment,
4859  allocType,
4860  m_FreeSuballocationsBySize[index],
4861  false, // canMakeOtherLost
4862  &pAllocationRequest->offset,
4863  &pAllocationRequest->itemsToMakeLostCount,
4864  &pAllocationRequest->sumFreeSize,
4865  &pAllocationRequest->sumItemSize))
4866  {
4867  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
4868  return true;
4869  }
4870  }
4871  }
4872  }
4873 
4874  if(canMakeOtherLost)
4875  {
4876  // Brute-force algorithm. TODO: Come up with something better.
4877 
4878  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
4879  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
4880 
4881  VmaAllocationRequest tmpAllocRequest = {};
4882  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
4883  suballocIt != m_Suballocations.end();
4884  ++suballocIt)
4885  {
4886  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
4887  suballocIt->hAllocation->CanBecomeLost())
4888  {
4889  if(CheckAllocation(
4890  currentFrameIndex,
4891  frameInUseCount,
4892  bufferImageGranularity,
4893  allocSize,
4894  allocAlignment,
4895  allocType,
4896  suballocIt,
4897  canMakeOtherLost,
4898  &tmpAllocRequest.offset,
4899  &tmpAllocRequest.itemsToMakeLostCount,
4900  &tmpAllocRequest.sumFreeSize,
4901  &tmpAllocRequest.sumItemSize))
4902  {
4903  tmpAllocRequest.item = suballocIt;
4904 
4905  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
4906  {
4907  *pAllocationRequest = tmpAllocRequest;
4908  }
4909  }
4910  }
4911  }
4912 
4913  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
4914  {
4915  return true;
4916  }
4917  }
4918 
4919  return false;
4920 }
4921 
4922 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
4923  uint32_t currentFrameIndex,
4924  uint32_t frameInUseCount,
4925  VmaAllocationRequest* pAllocationRequest)
4926 {
4927  while(pAllocationRequest->itemsToMakeLostCount > 0)
4928  {
4929  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
4930  {
4931  ++pAllocationRequest->item;
4932  }
4933  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4934  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
4935  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
4936  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4937  {
4938  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
4939  --pAllocationRequest->itemsToMakeLostCount;
4940  }
4941  else
4942  {
4943  return false;
4944  }
4945  }
4946 
4947  VMA_HEAVY_ASSERT(Validate());
4948  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
4949  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
4950 
4951  return true;
4952 }
4953 
4954 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4955 {
4956  uint32_t lostAllocationCount = 0;
4957  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
4958  it != m_Suballocations.end();
4959  ++it)
4960  {
4961  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
4962  it->hAllocation->CanBecomeLost() &&
4963  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
4964  {
4965  it = FreeSuballocation(it);
4966  ++lostAllocationCount;
4967  }
4968  }
4969  return lostAllocationCount;
4970 }
4971 
4972 void VmaBlockMetadata::Alloc(
4973  const VmaAllocationRequest& request,
4974  VmaSuballocationType type,
4975  VkDeviceSize allocSize,
4976  VmaAllocation hAllocation)
4977 {
4978  VMA_ASSERT(request.item != m_Suballocations.end());
4979  VmaSuballocation& suballoc = *request.item;
4980  // Given suballocation is a free block.
4981  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
4982  // Given offset is inside this suballocation.
4983  VMA_ASSERT(request.offset >= suballoc.offset);
4984  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
4985  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
4986  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
4987 
4988  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
4989  // it to become used.
4990  UnregisterFreeSuballocation(request.item);
4991 
4992  suballoc.offset = request.offset;
4993  suballoc.size = allocSize;
4994  suballoc.type = type;
4995  suballoc.hAllocation = hAllocation;
4996 
4997  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
4998  if(paddingEnd)
4999  {
5000  VmaSuballocation paddingSuballoc = {};
5001  paddingSuballoc.offset = request.offset + allocSize;
5002  paddingSuballoc.size = paddingEnd;
5003  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5004  VmaSuballocationList::iterator next = request.item;
5005  ++next;
5006  const VmaSuballocationList::iterator paddingEndItem =
5007  m_Suballocations.insert(next, paddingSuballoc);
5008  RegisterFreeSuballocation(paddingEndItem);
5009  }
5010 
5011  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5012  if(paddingBegin)
5013  {
5014  VmaSuballocation paddingSuballoc = {};
5015  paddingSuballoc.offset = request.offset - paddingBegin;
5016  paddingSuballoc.size = paddingBegin;
5017  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5018  const VmaSuballocationList::iterator paddingBeginItem =
5019  m_Suballocations.insert(request.item, paddingSuballoc);
5020  RegisterFreeSuballocation(paddingBeginItem);
5021  }
5022 
5023  // Update totals.
5024  m_FreeCount = m_FreeCount - 1;
5025  if(paddingBegin > 0)
5026  {
5027  ++m_FreeCount;
5028  }
5029  if(paddingEnd > 0)
5030  {
5031  ++m_FreeCount;
5032  }
5033  m_SumFreeSize -= allocSize;
5034 }
5035 
5036 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5037 {
5038  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5039  suballocItem != m_Suballocations.end();
5040  ++suballocItem)
5041  {
5042  VmaSuballocation& suballoc = *suballocItem;
5043  if(suballoc.hAllocation == allocation)
5044  {
5045  FreeSuballocation(suballocItem);
5046  VMA_HEAVY_ASSERT(Validate());
5047  return;
5048  }
5049  }
5050  VMA_ASSERT(0 && "Not found!");
5051 }
5052 
5053 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5054 {
5055  VkDeviceSize lastSize = 0;
5056  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5057  {
5058  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5059 
5060  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5061  {
5062  VMA_ASSERT(0);
5063  return false;
5064  }
5065  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5066  {
5067  VMA_ASSERT(0);
5068  return false;
5069  }
5070  if(it->size < lastSize)
5071  {
5072  VMA_ASSERT(0);
5073  return false;
5074  }
5075 
5076  lastSize = it->size;
5077  }
5078  return true;
5079 }
5080 
5081 bool VmaBlockMetadata::CheckAllocation(
5082  uint32_t currentFrameIndex,
5083  uint32_t frameInUseCount,
5084  VkDeviceSize bufferImageGranularity,
5085  VkDeviceSize allocSize,
5086  VkDeviceSize allocAlignment,
5087  VmaSuballocationType allocType,
5088  VmaSuballocationList::const_iterator suballocItem,
5089  bool canMakeOtherLost,
5090  VkDeviceSize* pOffset,
5091  size_t* itemsToMakeLostCount,
5092  VkDeviceSize* pSumFreeSize,
5093  VkDeviceSize* pSumItemSize) const
5094 {
5095  VMA_ASSERT(allocSize > 0);
5096  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5097  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5098  VMA_ASSERT(pOffset != VMA_NULL);
5099 
5100  *itemsToMakeLostCount = 0;
5101  *pSumFreeSize = 0;
5102  *pSumItemSize = 0;
5103 
5104  if(canMakeOtherLost)
5105  {
5106  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5107  {
5108  *pSumFreeSize = suballocItem->size;
5109  }
5110  else
5111  {
5112  if(suballocItem->hAllocation->CanBecomeLost() &&
5113  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5114  {
5115  ++*itemsToMakeLostCount;
5116  *pSumItemSize = suballocItem->size;
5117  }
5118  else
5119  {
5120  return false;
5121  }
5122  }
5123 
5124  // Remaining size is too small for this request: Early return.
5125  if(m_Size - suballocItem->offset < allocSize)
5126  {
5127  return false;
5128  }
5129 
5130  // Start from offset equal to beginning of this suballocation.
5131  *pOffset = suballocItem->offset;
5132 
5133  // Apply VMA_DEBUG_MARGIN at the beginning.
5134  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5135  {
5136  *pOffset += VMA_DEBUG_MARGIN;
5137  }
5138 
5139  // Apply alignment.
5140  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5141  *pOffset = VmaAlignUp(*pOffset, alignment);
5142 
5143  // Check previous suballocations for BufferImageGranularity conflicts.
5144  // Make bigger alignment if necessary.
5145  if(bufferImageGranularity > 1)
5146  {
5147  bool bufferImageGranularityConflict = false;
5148  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5149  while(prevSuballocItem != m_Suballocations.cbegin())
5150  {
5151  --prevSuballocItem;
5152  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5153  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5154  {
5155  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5156  {
5157  bufferImageGranularityConflict = true;
5158  break;
5159  }
5160  }
5161  else
5162  // Already on previous page.
5163  break;
5164  }
5165  if(bufferImageGranularityConflict)
5166  {
5167  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5168  }
5169  }
5170 
5171  // Now that we have final *pOffset, check if we are past suballocItem.
5172  // If yes, return false - this function should be called for another suballocItem as starting point.
5173  if(*pOffset >= suballocItem->offset + suballocItem->size)
5174  {
5175  return false;
5176  }
5177 
5178  // Calculate padding at the beginning based on current offset.
5179  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5180 
5181  // Calculate required margin at the end if this is not last suballocation.
5182  VmaSuballocationList::const_iterator next = suballocItem;
5183  ++next;
5184  const VkDeviceSize requiredEndMargin =
5185  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5186 
5187  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5188  // Another early return check.
5189  if(suballocItem->offset + totalSize > m_Size)
5190  {
5191  return false;
5192  }
5193 
5194  // Advance lastSuballocItem until desired size is reached.
5195  // Update itemsToMakeLostCount.
5196  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5197  if(totalSize > suballocItem->size)
5198  {
5199  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5200  while(remainingSize > 0)
5201  {
5202  ++lastSuballocItem;
5203  if(lastSuballocItem == m_Suballocations.cend())
5204  {
5205  return false;
5206  }
5207  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5208  {
5209  *pSumFreeSize += lastSuballocItem->size;
5210  }
5211  else
5212  {
5213  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5214  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5215  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5216  {
5217  ++*itemsToMakeLostCount;
5218  *pSumItemSize += lastSuballocItem->size;
5219  }
5220  else
5221  {
5222  return false;
5223  }
5224  }
5225  remainingSize = (lastSuballocItem->size < remainingSize) ?
5226  remainingSize - lastSuballocItem->size : 0;
5227  }
5228  }
5229 
5230  // Check next suballocations for BufferImageGranularity conflicts.
5231  // If conflict exists, we must mark more allocations lost or fail.
5232  if(bufferImageGranularity > 1)
5233  {
5234  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5235  ++nextSuballocItem;
5236  while(nextSuballocItem != m_Suballocations.cend())
5237  {
5238  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5239  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5240  {
5241  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5242  {
5243  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5244  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5245  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5246  {
5247  ++*itemsToMakeLostCount;
5248  }
5249  else
5250  {
5251  return false;
5252  }
5253  }
5254  }
5255  else
5256  {
5257  // Already on next page.
5258  break;
5259  }
5260  ++nextSuballocItem;
5261  }
5262  }
5263  }
5264  else
5265  {
5266  const VmaSuballocation& suballoc = *suballocItem;
5267  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5268 
5269  *pSumFreeSize = suballoc.size;
5270 
5271  // Size of this suballocation is too small for this request: Early return.
5272  if(suballoc.size < allocSize)
5273  {
5274  return false;
5275  }
5276 
5277  // Start from offset equal to beginning of this suballocation.
5278  *pOffset = suballoc.offset;
5279 
5280  // Apply VMA_DEBUG_MARGIN at the beginning.
5281  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5282  {
5283  *pOffset += VMA_DEBUG_MARGIN;
5284  }
5285 
5286  // Apply alignment.
5287  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5288  *pOffset = VmaAlignUp(*pOffset, alignment);
5289 
5290  // Check previous suballocations for BufferImageGranularity conflicts.
5291  // Make bigger alignment if necessary.
5292  if(bufferImageGranularity > 1)
5293  {
5294  bool bufferImageGranularityConflict = false;
5295  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5296  while(prevSuballocItem != m_Suballocations.cbegin())
5297  {
5298  --prevSuballocItem;
5299  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5300  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5301  {
5302  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5303  {
5304  bufferImageGranularityConflict = true;
5305  break;
5306  }
5307  }
5308  else
5309  // Already on previous page.
5310  break;
5311  }
5312  if(bufferImageGranularityConflict)
5313  {
5314  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5315  }
5316  }
5317 
5318  // Calculate padding at the beginning based on current offset.
5319  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5320 
5321  // Calculate required margin at the end if this is not last suballocation.
5322  VmaSuballocationList::const_iterator next = suballocItem;
5323  ++next;
5324  const VkDeviceSize requiredEndMargin =
5325  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5326 
5327  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5328  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5329  {
5330  return false;
5331  }
5332 
5333  // Check next suballocations for BufferImageGranularity conflicts.
5334  // If conflict exists, allocation cannot be made here.
5335  if(bufferImageGranularity > 1)
5336  {
5337  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5338  ++nextSuballocItem;
5339  while(nextSuballocItem != m_Suballocations.cend())
5340  {
5341  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5342  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5343  {
5344  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5345  {
5346  return false;
5347  }
5348  }
5349  else
5350  {
5351  // Already on next page.
5352  break;
5353  }
5354  ++nextSuballocItem;
5355  }
5356  }
5357  }
5358 
5359  // All tests passed: Success. pOffset is already filled.
5360  return true;
5361 }
5362 
5363 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5364 {
5365  VMA_ASSERT(item != m_Suballocations.end());
5366  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5367 
5368  VmaSuballocationList::iterator nextItem = item;
5369  ++nextItem;
5370  VMA_ASSERT(nextItem != m_Suballocations.end());
5371  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5372 
5373  item->size += nextItem->size;
5374  --m_FreeCount;
5375  m_Suballocations.erase(nextItem);
5376 }
5377 
5378 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5379 {
5380  // Change this suballocation to be marked as free.
5381  VmaSuballocation& suballoc = *suballocItem;
5382  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5383  suballoc.hAllocation = VK_NULL_HANDLE;
5384 
5385  // Update totals.
5386  ++m_FreeCount;
5387  m_SumFreeSize += suballoc.size;
5388 
5389  // Merge with previous and/or next suballocation if it's also free.
5390  bool mergeWithNext = false;
5391  bool mergeWithPrev = false;
5392 
5393  VmaSuballocationList::iterator nextItem = suballocItem;
5394  ++nextItem;
5395  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5396  {
5397  mergeWithNext = true;
5398  }
5399 
5400  VmaSuballocationList::iterator prevItem = suballocItem;
5401  if(suballocItem != m_Suballocations.begin())
5402  {
5403  --prevItem;
5404  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5405  {
5406  mergeWithPrev = true;
5407  }
5408  }
5409 
5410  if(mergeWithNext)
5411  {
5412  UnregisterFreeSuballocation(nextItem);
5413  MergeFreeWithNext(suballocItem);
5414  }
5415 
5416  if(mergeWithPrev)
5417  {
5418  UnregisterFreeSuballocation(prevItem);
5419  MergeFreeWithNext(prevItem);
5420  RegisterFreeSuballocation(prevItem);
5421  return prevItem;
5422  }
5423  else
5424  {
5425  RegisterFreeSuballocation(suballocItem);
5426  return suballocItem;
5427  }
5428 }
5429 
5430 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5431 {
5432  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5433  VMA_ASSERT(item->size > 0);
5434 
5435  // You may want to enable this validation at the beginning or at the end of
5436  // this function, depending on what do you want to check.
5437  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5438 
5439  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5440  {
5441  if(m_FreeSuballocationsBySize.empty())
5442  {
5443  m_FreeSuballocationsBySize.push_back(item);
5444  }
5445  else
5446  {
5447  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5448  }
5449  }
5450 
5451  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5452 }
5453 
5454 
5455 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5456 {
5457  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5458  VMA_ASSERT(item->size > 0);
5459 
5460  // You may want to enable this validation at the beginning or at the end of
5461  // this function, depending on what do you want to check.
5462  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5463 
5464  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5465  {
5466  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5467  m_FreeSuballocationsBySize.data(),
5468  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5469  item,
5470  VmaSuballocationItemSizeLess());
5471  for(size_t index = it - m_FreeSuballocationsBySize.data();
5472  index < m_FreeSuballocationsBySize.size();
5473  ++index)
5474  {
5475  if(m_FreeSuballocationsBySize[index] == item)
5476  {
5477  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5478  return;
5479  }
5480  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5481  }
5482  VMA_ASSERT(0 && "Not found.");
5483  }
5484 
5485  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5486 }
5487 
5489 // class VmaDeviceMemoryMapping
5490 
5491 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5492  m_MapCount(0),
5493  m_pMappedData(VMA_NULL)
5494 {
5495 }
5496 
5497 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5498 {
5499  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5500 }
5501 
5502 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, void **ppData)
5503 {
5504  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5505  if(m_MapCount != 0)
5506  {
5507  ++m_MapCount;
5508  VMA_ASSERT(m_pMappedData != VMA_NULL);
5509  if(ppData != VMA_NULL)
5510  {
5511  *ppData = m_pMappedData;
5512  }
5513  return VK_SUCCESS;
5514  }
5515  else
5516  {
5517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5518  hAllocator->m_hDevice,
5519  hMemory,
5520  0, // offset
5521  VK_WHOLE_SIZE,
5522  0, // flags
5523  &m_pMappedData);
5524  if(result == VK_SUCCESS)
5525  {
5526  if(ppData != VMA_NULL)
5527  {
5528  *ppData = m_pMappedData;
5529  }
5530  m_MapCount = 1;
5531  }
5532  return result;
5533  }
5534 }
5535 
5536 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory)
5537 {
5538  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5539  if(m_MapCount != 0)
5540  {
5541  if(--m_MapCount == 0)
5542  {
5543  m_pMappedData = VMA_NULL;
5544  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5545  }
5546  }
5547  else
5548  {
5549  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5550  }
5551 }
5552 
5554 // class VmaDeviceMemoryBlock
5555 
5556 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5557  m_MemoryTypeIndex(UINT32_MAX),
5558  m_hMemory(VK_NULL_HANDLE),
5559  m_Metadata(hAllocator)
5560 {
5561 }
5562 
5563 void VmaDeviceMemoryBlock::Init(
5564  uint32_t newMemoryTypeIndex,
5565  VkDeviceMemory newMemory,
5566  VkDeviceSize newSize)
5567 {
5568  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5569 
5570  m_MemoryTypeIndex = newMemoryTypeIndex;
5571  m_hMemory = newMemory;
5572 
5573  m_Metadata.Init(newSize);
5574 }
5575 
5576 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5577 {
5578  // This is the most important assert in the entire library.
5579  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5580  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5581 
5582  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5583  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5584  m_hMemory = VK_NULL_HANDLE;
5585 }
5586 
5587 bool VmaDeviceMemoryBlock::Validate() const
5588 {
5589  if((m_hMemory == VK_NULL_HANDLE) ||
5590  (m_Metadata.GetSize() == 0))
5591  {
5592  return false;
5593  }
5594 
5595  return m_Metadata.Validate();
5596 }
5597 
5598 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, void** ppData)
5599 {
5600  return m_Mapping.Map(hAllocator, m_hMemory, ppData);
5601 }
5602 
5603 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator)
5604 {
5605  m_Mapping.Unmap(hAllocator, m_hMemory);
5606 }
5607 
5608 static void InitStatInfo(VmaStatInfo& outInfo)
5609 {
5610  memset(&outInfo, 0, sizeof(outInfo));
5611  outInfo.allocationSizeMin = UINT64_MAX;
5612  outInfo.unusedRangeSizeMin = UINT64_MAX;
5613 }
5614 
5615 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
5616 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
5617 {
5618  inoutInfo.blockCount += srcInfo.blockCount;
5619  inoutInfo.allocationCount += srcInfo.allocationCount;
5620  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
5621  inoutInfo.usedBytes += srcInfo.usedBytes;
5622  inoutInfo.unusedBytes += srcInfo.unusedBytes;
5623  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
5624  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
5625  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
5626  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
5627 }
5628 
5629 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
5630 {
5631  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
5632  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
5633  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
5634  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
5635 }
5636 
5637 VmaPool_T::VmaPool_T(
5638  VmaAllocator hAllocator,
5639  const VmaPoolCreateInfo& createInfo) :
5640  m_BlockVector(
5641  hAllocator,
5642  createInfo.memoryTypeIndex,
5643  createInfo.blockSize,
5644  createInfo.minBlockCount,
5645  createInfo.maxBlockCount,
5646  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
5647  createInfo.frameInUseCount,
5648  true) // isCustomPool
5649 {
5650 }
5651 
5652 VmaPool_T::~VmaPool_T()
5653 {
5654 }
5655 
5656 #if VMA_STATS_STRING_ENABLED
5657 
5658 #endif // #if VMA_STATS_STRING_ENABLED
5659 
5660 VmaBlockVector::VmaBlockVector(
5661  VmaAllocator hAllocator,
5662  uint32_t memoryTypeIndex,
5663  VkDeviceSize preferredBlockSize,
5664  size_t minBlockCount,
5665  size_t maxBlockCount,
5666  VkDeviceSize bufferImageGranularity,
5667  uint32_t frameInUseCount,
5668  bool isCustomPool) :
5669  m_hAllocator(hAllocator),
5670  m_MemoryTypeIndex(memoryTypeIndex),
5671  m_PreferredBlockSize(preferredBlockSize),
5672  m_MinBlockCount(minBlockCount),
5673  m_MaxBlockCount(maxBlockCount),
5674  m_BufferImageGranularity(bufferImageGranularity),
5675  m_FrameInUseCount(frameInUseCount),
5676  m_IsCustomPool(isCustomPool),
5677  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
5678  m_HasEmptyBlock(false),
5679  m_pDefragmentator(VMA_NULL)
5680 {
5681 }
5682 
5683 VmaBlockVector::~VmaBlockVector()
5684 {
5685  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
5686 
5687  for(size_t i = m_Blocks.size(); i--; )
5688  {
5689  m_Blocks[i]->Destroy(m_hAllocator);
5690  vma_delete(m_hAllocator, m_Blocks[i]);
5691  }
5692 }
5693 
5694 VkResult VmaBlockVector::CreateMinBlocks()
5695 {
5696  for(size_t i = 0; i < m_MinBlockCount; ++i)
5697  {
5698  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
5699  if(res != VK_SUCCESS)
5700  {
5701  return res;
5702  }
5703  }
5704  return VK_SUCCESS;
5705 }
5706 
5707 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
5708 {
5709  pStats->size = 0;
5710  pStats->unusedSize = 0;
5711  pStats->allocationCount = 0;
5712  pStats->unusedRangeCount = 0;
5713  pStats->unusedRangeSizeMax = 0;
5714 
5715  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5716 
5717  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
5718  {
5719  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
5720  VMA_ASSERT(pBlock);
5721  VMA_HEAVY_ASSERT(pBlock->Validate());
5722  pBlock->m_Metadata.AddPoolStats(*pStats);
5723  }
5724 }
5725 
5726 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
5727 
5728 VkResult VmaBlockVector::Allocate(
5729  VmaPool hCurrentPool,
5730  uint32_t currentFrameIndex,
5731  const VkMemoryRequirements& vkMemReq,
5732  const VmaAllocationCreateInfo& createInfo,
5733  VmaSuballocationType suballocType,
5734  VmaAllocation* pAllocation)
5735 {
5736  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
5737  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
5738 
5739  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5740 
5741  // 1. Search existing allocations. Try to allocate without making other allocations lost.
5742  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5743  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5744  {
5745  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5746  VMA_ASSERT(pCurrBlock);
5747  VmaAllocationRequest currRequest = {};
5748  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5749  currentFrameIndex,
5750  m_FrameInUseCount,
5751  m_BufferImageGranularity,
5752  vkMemReq.size,
5753  vkMemReq.alignment,
5754  suballocType,
5755  false, // canMakeOtherLost
5756  &currRequest))
5757  {
5758  // Allocate from pCurrBlock.
5759  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
5760 
5761  if(mapped)
5762  {
5763  VkResult res = pCurrBlock->Map(m_hAllocator, nullptr);
5764  if(res != VK_SUCCESS)
5765  {
5766  return res;
5767  }
5768  }
5769 
5770  // We no longer have an empty Allocation.
5771  if(pCurrBlock->m_Metadata.IsEmpty())
5772  {
5773  m_HasEmptyBlock = false;
5774  }
5775 
5776  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5777  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
5778  (*pAllocation)->InitBlockAllocation(
5779  hCurrentPool,
5780  pCurrBlock,
5781  currRequest.offset,
5782  vkMemReq.alignment,
5783  vkMemReq.size,
5784  suballocType,
5785  mapped,
5786  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5787  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
5788  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5789  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5790  return VK_SUCCESS;
5791  }
5792  }
5793 
5794  const bool canCreateNewBlock =
5795  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
5796  (m_Blocks.size() < m_MaxBlockCount);
5797 
5798  // 2. Try to create new block.
5799  if(canCreateNewBlock)
5800  {
5801  // 2.1. Start with full preferredBlockSize.
5802  VkDeviceSize blockSize = m_PreferredBlockSize;
5803  size_t newBlockIndex = 0;
5804  VkResult res = CreateBlock(blockSize, &newBlockIndex);
5805  // Allocating blocks of other sizes is allowed only in default pools.
5806  // In custom pools block size is fixed.
5807  if(res < 0 && m_IsCustomPool == false)
5808  {
5809  // 2.2. Try half the size.
5810  blockSize /= 2;
5811  if(blockSize >= vkMemReq.size)
5812  {
5813  res = CreateBlock(blockSize, &newBlockIndex);
5814  if(res < 0)
5815  {
5816  // 2.3. Try quarter the size.
5817  blockSize /= 2;
5818  if(blockSize >= vkMemReq.size)
5819  {
5820  res = CreateBlock(blockSize, &newBlockIndex);
5821  }
5822  }
5823  }
5824  }
5825  if(res == VK_SUCCESS)
5826  {
5827  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
5828  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
5829 
5830  if(mapped)
5831  {
5832  res = pBlock->Map(m_hAllocator, nullptr);
5833  if(res != VK_SUCCESS)
5834  {
5835  return res;
5836  }
5837  }
5838 
5839  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
5840  VmaAllocationRequest allocRequest;
5841  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
5842  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5843  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
5844  (*pAllocation)->InitBlockAllocation(
5845  hCurrentPool,
5846  pBlock,
5847  allocRequest.offset,
5848  vkMemReq.alignment,
5849  vkMemReq.size,
5850  suballocType,
5851  mapped,
5852  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5853  VMA_HEAVY_ASSERT(pBlock->Validate());
5854  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
5855  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5856  return VK_SUCCESS;
5857  }
5858  }
5859 
5860  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
5861 
5862  // 3. Try to allocate from existing blocks with making other allocations lost.
5863  if(canMakeOtherLost)
5864  {
5865  uint32_t tryIndex = 0;
5866  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
5867  {
5868  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
5869  VmaAllocationRequest bestRequest = {};
5870  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
5871 
5872  // 1. Search existing allocations.
5873  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
5874  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
5875  {
5876  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
5877  VMA_ASSERT(pCurrBlock);
5878  VmaAllocationRequest currRequest = {};
5879  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
5880  currentFrameIndex,
5881  m_FrameInUseCount,
5882  m_BufferImageGranularity,
5883  vkMemReq.size,
5884  vkMemReq.alignment,
5885  suballocType,
5886  canMakeOtherLost,
5887  &currRequest))
5888  {
5889  const VkDeviceSize currRequestCost = currRequest.CalcCost();
5890  if(pBestRequestBlock == VMA_NULL ||
5891  currRequestCost < bestRequestCost)
5892  {
5893  pBestRequestBlock = pCurrBlock;
5894  bestRequest = currRequest;
5895  bestRequestCost = currRequestCost;
5896 
5897  if(bestRequestCost == 0)
5898  {
5899  break;
5900  }
5901  }
5902  }
5903  }
5904 
5905  if(pBestRequestBlock != VMA_NULL)
5906  {
5907  if(mapped)
5908  {
5909  VkResult res = pBestRequestBlock->Map(m_hAllocator, nullptr);
5910  if(res != VK_SUCCESS)
5911  {
5912  return res;
5913  }
5914  }
5915 
5916  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
5917  currentFrameIndex,
5918  m_FrameInUseCount,
5919  &bestRequest))
5920  {
5921  // We no longer have an empty Allocation.
5922  if(pBestRequestBlock->m_Metadata.IsEmpty())
5923  {
5924  m_HasEmptyBlock = false;
5925  }
5926  // Allocate from this pBlock.
5927  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
5928  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
5929  (*pAllocation)->InitBlockAllocation(
5930  hCurrentPool,
5931  pBestRequestBlock,
5932  bestRequest.offset,
5933  vkMemReq.alignment,
5934  vkMemReq.size,
5935  suballocType,
5936  mapped,
5937  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
5938  VMA_HEAVY_ASSERT(pBlock->Validate());
5939  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
5940  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
5941  return VK_SUCCESS;
5942  }
5943  // else: Some allocations must have been touched while we are here. Next try.
5944  }
5945  else
5946  {
5947  // Could not find place in any of the blocks - break outer loop.
5948  break;
5949  }
5950  }
5951  /* Maximum number of tries exceeded - a very unlike event when many other
5952  threads are simultaneously touching allocations making it impossible to make
5953  lost at the same time as we try to allocate. */
5954  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
5955  {
5956  return VK_ERROR_TOO_MANY_OBJECTS;
5957  }
5958  }
5959 
5960  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5961 }
5962 
5963 void VmaBlockVector::Free(
5964  VmaAllocation hAllocation)
5965 {
5966  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
5967 
5968  // Scope for lock.
5969  {
5970  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
5971 
5972  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
5973 
5974  if(hAllocation->IsPersistentMap())
5975  {
5976  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory);
5977  }
5978 
5979  pBlock->m_Metadata.Free(hAllocation);
5980  VMA_HEAVY_ASSERT(pBlock->Validate());
5981 
5982  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
5983 
5984  // pBlock became empty after this deallocation.
5985  if(pBlock->m_Metadata.IsEmpty())
5986  {
5987  // Already has empty Allocation. We don't want to have two, so delete this one.
5988  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
5989  {
5990  pBlockToDelete = pBlock;
5991  Remove(pBlock);
5992  }
5993  // We now have first empty Allocation.
5994  else
5995  {
5996  m_HasEmptyBlock = true;
5997  }
5998  }
5999  // pBlock didn't become empty, but we have another empty block - find and free that one.
6000  // (This is optional, heuristics.)
6001  else if(m_HasEmptyBlock)
6002  {
6003  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6004  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6005  {
6006  pBlockToDelete = pLastBlock;
6007  m_Blocks.pop_back();
6008  m_HasEmptyBlock = false;
6009  }
6010  }
6011 
6012  IncrementallySortBlocks();
6013  }
6014 
6015  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6016  // lock, for performance reason.
6017  if(pBlockToDelete != VMA_NULL)
6018  {
6019  VMA_DEBUG_LOG(" Deleted empty allocation");
6020  pBlockToDelete->Destroy(m_hAllocator);
6021  vma_delete(m_hAllocator, pBlockToDelete);
6022  }
6023 }
6024 
6025 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6026 {
6027  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6028  {
6029  if(m_Blocks[blockIndex] == pBlock)
6030  {
6031  VmaVectorRemove(m_Blocks, blockIndex);
6032  return;
6033  }
6034  }
6035  VMA_ASSERT(0);
6036 }
6037 
6038 void VmaBlockVector::IncrementallySortBlocks()
6039 {
6040  // Bubble sort only until first swap.
6041  for(size_t i = 1; i < m_Blocks.size(); ++i)
6042  {
6043  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6044  {
6045  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6046  return;
6047  }
6048  }
6049 }
6050 
6051 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6052 {
6053  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6054  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6055  allocInfo.allocationSize = blockSize;
6056  VkDeviceMemory mem = VK_NULL_HANDLE;
6057  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6058  if(res < 0)
6059  {
6060  return res;
6061  }
6062 
6063  // New VkDeviceMemory successfully created.
6064 
6065  // Create new Allocation for it.
6066  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6067  pBlock->Init(
6068  m_MemoryTypeIndex,
6069  mem,
6070  allocInfo.allocationSize);
6071 
6072  m_Blocks.push_back(pBlock);
6073  if(pNewBlockIndex != VMA_NULL)
6074  {
6075  *pNewBlockIndex = m_Blocks.size() - 1;
6076  }
6077 
6078  return VK_SUCCESS;
6079 }
6080 
6081 #if VMA_STATS_STRING_ENABLED
6082 
6083 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6084 {
6085  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6086 
6087  json.BeginObject();
6088 
6089  if(m_IsCustomPool)
6090  {
6091  json.WriteString("MemoryTypeIndex");
6092  json.WriteNumber(m_MemoryTypeIndex);
6093 
6094  json.WriteString("BlockSize");
6095  json.WriteNumber(m_PreferredBlockSize);
6096 
6097  json.WriteString("BlockCount");
6098  json.BeginObject(true);
6099  if(m_MinBlockCount > 0)
6100  {
6101  json.WriteString("Min");
6102  json.WriteNumber(m_MinBlockCount);
6103  }
6104  if(m_MaxBlockCount < SIZE_MAX)
6105  {
6106  json.WriteString("Max");
6107  json.WriteNumber(m_MaxBlockCount);
6108  }
6109  json.WriteString("Cur");
6110  json.WriteNumber(m_Blocks.size());
6111  json.EndObject();
6112 
6113  if(m_FrameInUseCount > 0)
6114  {
6115  json.WriteString("FrameInUseCount");
6116  json.WriteNumber(m_FrameInUseCount);
6117  }
6118  }
6119  else
6120  {
6121  json.WriteString("PreferredBlockSize");
6122  json.WriteNumber(m_PreferredBlockSize);
6123  }
6124 
6125  json.WriteString("Blocks");
6126  json.BeginArray();
6127  for(size_t i = 0; i < m_Blocks.size(); ++i)
6128  {
6129  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6130  }
6131  json.EndArray();
6132 
6133  json.EndObject();
6134 }
6135 
6136 #endif // #if VMA_STATS_STRING_ENABLED
6137 
6138 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6139  VmaAllocator hAllocator,
6140  uint32_t currentFrameIndex)
6141 {
6142  if(m_pDefragmentator == VMA_NULL)
6143  {
6144  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6145  hAllocator,
6146  this,
6147  currentFrameIndex);
6148  }
6149 
6150  return m_pDefragmentator;
6151 }
6152 
6153 VkResult VmaBlockVector::Defragment(
6154  VmaDefragmentationStats* pDefragmentationStats,
6155  VkDeviceSize& maxBytesToMove,
6156  uint32_t& maxAllocationsToMove)
6157 {
6158  if(m_pDefragmentator == VMA_NULL)
6159  {
6160  return VK_SUCCESS;
6161  }
6162 
6163  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6164 
6165  // Defragment.
6166  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6167 
6168  // Accumulate statistics.
6169  if(pDefragmentationStats != VMA_NULL)
6170  {
6171  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6172  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6173  pDefragmentationStats->bytesMoved += bytesMoved;
6174  pDefragmentationStats->allocationsMoved += allocationsMoved;
6175  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6176  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6177  maxBytesToMove -= bytesMoved;
6178  maxAllocationsToMove -= allocationsMoved;
6179  }
6180 
6181  // Free empty blocks.
6182  m_HasEmptyBlock = false;
6183  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6184  {
6185  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6186  if(pBlock->m_Metadata.IsEmpty())
6187  {
6188  if(m_Blocks.size() > m_MinBlockCount)
6189  {
6190  if(pDefragmentationStats != VMA_NULL)
6191  {
6192  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6193  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6194  }
6195 
6196  VmaVectorRemove(m_Blocks, blockIndex);
6197  pBlock->Destroy(m_hAllocator);
6198  vma_delete(m_hAllocator, pBlock);
6199  }
6200  else
6201  {
6202  m_HasEmptyBlock = true;
6203  }
6204  }
6205  }
6206 
6207  return result;
6208 }
6209 
6210 void VmaBlockVector::DestroyDefragmentator()
6211 {
6212  if(m_pDefragmentator != VMA_NULL)
6213  {
6214  vma_delete(m_hAllocator, m_pDefragmentator);
6215  m_pDefragmentator = VMA_NULL;
6216  }
6217 }
6218 
6219 void VmaBlockVector::MakePoolAllocationsLost(
6220  uint32_t currentFrameIndex,
6221  size_t* pLostAllocationCount)
6222 {
6223  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6224 
6225  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6226  {
6227  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6228  VMA_ASSERT(pBlock);
6229  pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6230  }
6231 }
6232 
6233 void VmaBlockVector::AddStats(VmaStats* pStats)
6234 {
6235  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6236  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6237 
6238  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6239 
6240  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6241  {
6242  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6243  VMA_ASSERT(pBlock);
6244  VMA_HEAVY_ASSERT(pBlock->Validate());
6245  VmaStatInfo allocationStatInfo;
6246  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6247  VmaAddStatInfo(pStats->total, allocationStatInfo);
6248  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6249  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6250  }
6251 }
6252 
6254 // VmaDefragmentator members definition
6255 
6256 VmaDefragmentator::VmaDefragmentator(
6257  VmaAllocator hAllocator,
6258  VmaBlockVector* pBlockVector,
6259  uint32_t currentFrameIndex) :
6260  m_hAllocator(hAllocator),
6261  m_pBlockVector(pBlockVector),
6262  m_CurrentFrameIndex(currentFrameIndex),
6263  m_BytesMoved(0),
6264  m_AllocationsMoved(0),
6265  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6266  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6267 {
6268 }
6269 
6270 VmaDefragmentator::~VmaDefragmentator()
6271 {
6272  for(size_t i = m_Blocks.size(); i--; )
6273  {
6274  vma_delete(m_hAllocator, m_Blocks[i]);
6275  }
6276 }
6277 
6278 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6279 {
6280  AllocationInfo allocInfo;
6281  allocInfo.m_hAllocation = hAlloc;
6282  allocInfo.m_pChanged = pChanged;
6283  m_Allocations.push_back(allocInfo);
6284 }
6285 
6286 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6287 {
6288  // It has already been mapped for defragmentation.
6289  if(m_pMappedDataForDefragmentation)
6290  {
6291  *ppMappedData = m_pMappedDataForDefragmentation;
6292  return VK_SUCCESS;
6293  }
6294 
6295  // It is originally mapped.
6296  if(m_pBlock->m_Mapping.GetMappedData())
6297  {
6298  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6299  return VK_SUCCESS;
6300  }
6301 
6302  // Map on first usage.
6303  VkResult res = m_pBlock->Map(hAllocator, &m_pMappedDataForDefragmentation);
6304  *ppMappedData = m_pMappedDataForDefragmentation;
6305  return res;
6306 }
6307 
6308 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6309 {
6310  if(m_pMappedDataForDefragmentation != VMA_NULL)
6311  {
6312  m_pBlock->Unmap(hAllocator);
6313  }
6314 }
6315 
6316 VkResult VmaDefragmentator::DefragmentRound(
6317  VkDeviceSize maxBytesToMove,
6318  uint32_t maxAllocationsToMove)
6319 {
6320  if(m_Blocks.empty())
6321  {
6322  return VK_SUCCESS;
6323  }
6324 
6325  size_t srcBlockIndex = m_Blocks.size() - 1;
6326  size_t srcAllocIndex = SIZE_MAX;
6327  for(;;)
6328  {
6329  // 1. Find next allocation to move.
6330  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6331  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6332  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6333  {
6334  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6335  {
6336  // Finished: no more allocations to process.
6337  if(srcBlockIndex == 0)
6338  {
6339  return VK_SUCCESS;
6340  }
6341  else
6342  {
6343  --srcBlockIndex;
6344  srcAllocIndex = SIZE_MAX;
6345  }
6346  }
6347  else
6348  {
6349  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6350  }
6351  }
6352 
6353  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6354  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6355 
6356  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6357  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6358  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6359  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6360 
6361  // 2. Try to find new place for this allocation in preceding or current block.
6362  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6363  {
6364  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6365  VmaAllocationRequest dstAllocRequest;
6366  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6367  m_CurrentFrameIndex,
6368  m_pBlockVector->GetFrameInUseCount(),
6369  m_pBlockVector->GetBufferImageGranularity(),
6370  size,
6371  alignment,
6372  suballocType,
6373  false, // canMakeOtherLost
6374  &dstAllocRequest) &&
6375  MoveMakesSense(
6376  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6377  {
6378  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6379 
6380  // Reached limit on number of allocations or bytes to move.
6381  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6382  (m_BytesMoved + size > maxBytesToMove))
6383  {
6384  return VK_INCOMPLETE;
6385  }
6386 
6387  void* pDstMappedData = VMA_NULL;
6388  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6389  if(res != VK_SUCCESS)
6390  {
6391  return res;
6392  }
6393 
6394  void* pSrcMappedData = VMA_NULL;
6395  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6396  if(res != VK_SUCCESS)
6397  {
6398  return res;
6399  }
6400 
6401  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6402  memcpy(
6403  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6404  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6405  static_cast<size_t>(size));
6406 
6407  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6408  pSrcBlockInfo->m_pBlock->m_Metadata.Free(allocInfo.m_hAllocation);
6409 
6410  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6411 
6412  if(allocInfo.m_pChanged != VMA_NULL)
6413  {
6414  *allocInfo.m_pChanged = VK_TRUE;
6415  }
6416 
6417  ++m_AllocationsMoved;
6418  m_BytesMoved += size;
6419 
6420  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6421 
6422  break;
6423  }
6424  }
6425 
6426  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6427 
6428  if(srcAllocIndex > 0)
6429  {
6430  --srcAllocIndex;
6431  }
6432  else
6433  {
6434  if(srcBlockIndex > 0)
6435  {
6436  --srcBlockIndex;
6437  srcAllocIndex = SIZE_MAX;
6438  }
6439  else
6440  {
6441  return VK_SUCCESS;
6442  }
6443  }
6444  }
6445 }
6446 
6447 VkResult VmaDefragmentator::Defragment(
6448  VkDeviceSize maxBytesToMove,
6449  uint32_t maxAllocationsToMove)
6450 {
6451  if(m_Allocations.empty())
6452  {
6453  return VK_SUCCESS;
6454  }
6455 
6456  // Create block info for each block.
6457  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6458  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6459  {
6460  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6461  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6462  m_Blocks.push_back(pBlockInfo);
6463  }
6464 
6465  // Sort them by m_pBlock pointer value.
6466  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6467 
6468  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6469  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6470  {
6471  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6472  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6473  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6474  {
6475  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6476  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6477  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6478  {
6479  (*it)->m_Allocations.push_back(allocInfo);
6480  }
6481  else
6482  {
6483  VMA_ASSERT(0);
6484  }
6485  }
6486  }
6487  m_Allocations.clear();
6488 
6489  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6490  {
6491  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6492  pBlockInfo->CalcHasNonMovableAllocations();
6493  pBlockInfo->SortAllocationsBySizeDescecnding();
6494  }
6495 
6496  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6497  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6498 
6499  // Execute defragmentation rounds (the main part).
6500  VkResult result = VK_SUCCESS;
6501  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6502  {
6503  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6504  }
6505 
6506  // Unmap blocks that were mapped for defragmentation.
6507  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6508  {
6509  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6510  }
6511 
6512  return result;
6513 }
6514 
6515 bool VmaDefragmentator::MoveMakesSense(
6516  size_t dstBlockIndex, VkDeviceSize dstOffset,
6517  size_t srcBlockIndex, VkDeviceSize srcOffset)
6518 {
6519  if(dstBlockIndex < srcBlockIndex)
6520  {
6521  return true;
6522  }
6523  if(dstBlockIndex > srcBlockIndex)
6524  {
6525  return false;
6526  }
6527  if(dstOffset < srcOffset)
6528  {
6529  return true;
6530  }
6531  return false;
6532 }
6533 
6535 // VmaAllocator_T
6536 
6537 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6538  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6539  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6540  m_PhysicalDevice(pCreateInfo->physicalDevice),
6541  m_hDevice(pCreateInfo->device),
6542  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6543  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6544  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6545  m_PreferredLargeHeapBlockSize(0),
6546  m_PreferredSmallHeapBlockSize(0),
6547  m_CurrentFrameIndex(0),
6548  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6549 {
6550  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6551 
6552  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6553  memset(&m_MemProps, 0, sizeof(m_MemProps));
6554  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6555 
6556  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6557  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6558 
6559  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6560  {
6561  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6562  }
6563 
6564  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6565  {
6566  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6567  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6568  }
6569 
6570  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6571 
6572  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6573  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6574 
6575  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
6576  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
6577  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
6578  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
6579 
6580  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
6581  {
6582  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
6583  {
6584  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
6585  if(limit != VK_WHOLE_SIZE)
6586  {
6587  m_HeapSizeLimit[heapIndex] = limit;
6588  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
6589  {
6590  m_MemProps.memoryHeaps[heapIndex].size = limit;
6591  }
6592  }
6593  }
6594  }
6595 
6596  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
6597  {
6598  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
6599 
6600  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
6601  this,
6602  memTypeIndex,
6603  preferredBlockSize,
6604  0,
6605  SIZE_MAX,
6606  GetBufferImageGranularity(),
6607  pCreateInfo->frameInUseCount,
6608  false); // isCustomPool
6609  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
6610  // becase minBlockCount is 0.
6611  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
6612  }
6613 }
6614 
6615 VmaAllocator_T::~VmaAllocator_T()
6616 {
6617  VMA_ASSERT(m_Pools.empty());
6618 
6619  for(size_t i = GetMemoryTypeCount(); i--; )
6620  {
6621  vma_delete(this, m_pDedicatedAllocations[i]);
6622  vma_delete(this, m_pBlockVectors[i]);
6623  }
6624 }
6625 
6626 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
6627 {
6628 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6629  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
6630  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
6631  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
6632  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
6633  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
6634  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
6635  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
6636  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
6637  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
6638  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
6639  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
6640  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
6641  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
6642  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
6643  // Ignoring vkGetBufferMemoryRequirements2KHR.
6644  // Ignoring vkGetImageMemoryRequirements2KHR.
6645 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
6646 
6647 #define VMA_COPY_IF_NOT_NULL(funcName) \
6648  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
6649 
6650  if(pVulkanFunctions != VMA_NULL)
6651  {
6652  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
6653  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
6654  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
6655  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
6656  VMA_COPY_IF_NOT_NULL(vkMapMemory);
6657  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
6658  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
6659  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
6660  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
6661  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
6662  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
6663  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
6664  VMA_COPY_IF_NOT_NULL(vkCreateImage);
6665  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
6666  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
6667  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
6668  }
6669 
6670 #undef VMA_COPY_IF_NOT_NULL
6671 
6672  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
6673  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
6674  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
6675  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
6676  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
6677  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
6678  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
6679  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
6680  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
6681  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
6682  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
6683  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
6684  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
6685  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
6686  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
6687  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
6688  if(m_UseKhrDedicatedAllocation)
6689  {
6690  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
6691  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
6692  }
6693 }
6694 
6695 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
6696 {
6697  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
6698  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
6699  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
6700  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
6701 }
6702 
6703 VkResult VmaAllocator_T::AllocateMemoryOfType(
6704  const VkMemoryRequirements& vkMemReq,
6705  bool dedicatedAllocation,
6706  VkBuffer dedicatedBuffer,
6707  VkImage dedicatedImage,
6708  const VmaAllocationCreateInfo& createInfo,
6709  uint32_t memTypeIndex,
6710  VmaSuballocationType suballocType,
6711  VmaAllocation* pAllocation)
6712 {
6713  VMA_ASSERT(pAllocation != VMA_NULL);
6714  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
6715 
6716  VmaAllocationCreateInfo finalCreateInfo = createInfo;
6717 
6718  // If memory type is not HOST_VISIBLE, disable MAPPED.
6719  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6720  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
6721  {
6722  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
6723  }
6724 
6725  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
6726  VMA_ASSERT(blockVector);
6727 
6728  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
6729  bool preferDedicatedMemory =
6730  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
6731  dedicatedAllocation ||
6732  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
6733  vkMemReq.size > preferredBlockSize / 2;
6734 
6735  if(preferDedicatedMemory &&
6736  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
6737  finalCreateInfo.pool == VK_NULL_HANDLE)
6738  {
6740  }
6741 
6742  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
6743  {
6744  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6745  {
6746  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6747  }
6748  else
6749  {
6750  return AllocateDedicatedMemory(
6751  vkMemReq.size,
6752  suballocType,
6753  memTypeIndex,
6754  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6755  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6756  finalCreateInfo.pUserData,
6757  dedicatedBuffer,
6758  dedicatedImage,
6759  pAllocation);
6760  }
6761  }
6762  else
6763  {
6764  VkResult res = blockVector->Allocate(
6765  VK_NULL_HANDLE, // hCurrentPool
6766  m_CurrentFrameIndex.load(),
6767  vkMemReq,
6768  finalCreateInfo,
6769  suballocType,
6770  pAllocation);
6771  if(res == VK_SUCCESS)
6772  {
6773  return res;
6774  }
6775 
6776  // 5. Try dedicated memory.
6777  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6778  {
6779  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6780  }
6781  else
6782  {
6783  res = AllocateDedicatedMemory(
6784  vkMemReq.size,
6785  suballocType,
6786  memTypeIndex,
6787  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
6788  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
6789  finalCreateInfo.pUserData,
6790  dedicatedBuffer,
6791  dedicatedImage,
6792  pAllocation);
6793  if(res == VK_SUCCESS)
6794  {
6795  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
6796  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
6797  return VK_SUCCESS;
6798  }
6799  else
6800  {
6801  // Everything failed: Return error code.
6802  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6803  return res;
6804  }
6805  }
6806  }
6807 }
6808 
6809 VkResult VmaAllocator_T::AllocateDedicatedMemory(
6810  VkDeviceSize size,
6811  VmaSuballocationType suballocType,
6812  uint32_t memTypeIndex,
6813  bool map,
6814  bool isUserDataString,
6815  void* pUserData,
6816  VkBuffer dedicatedBuffer,
6817  VkImage dedicatedImage,
6818  VmaAllocation* pAllocation)
6819 {
6820  VMA_ASSERT(pAllocation);
6821 
6822  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6823  allocInfo.memoryTypeIndex = memTypeIndex;
6824  allocInfo.allocationSize = size;
6825 
6826  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
6827  if(m_UseKhrDedicatedAllocation)
6828  {
6829  if(dedicatedBuffer != VK_NULL_HANDLE)
6830  {
6831  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
6832  dedicatedAllocInfo.buffer = dedicatedBuffer;
6833  allocInfo.pNext = &dedicatedAllocInfo;
6834  }
6835  else if(dedicatedImage != VK_NULL_HANDLE)
6836  {
6837  dedicatedAllocInfo.image = dedicatedImage;
6838  allocInfo.pNext = &dedicatedAllocInfo;
6839  }
6840  }
6841 
6842  // Allocate VkDeviceMemory.
6843  VkDeviceMemory hMemory = VK_NULL_HANDLE;
6844  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
6845  if(res < 0)
6846  {
6847  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
6848  return res;
6849  }
6850 
6851  void* pMappedData = nullptr;
6852  if(map)
6853  {
6854  res = (*m_VulkanFunctions.vkMapMemory)(
6855  m_hDevice,
6856  hMemory,
6857  0,
6858  VK_WHOLE_SIZE,
6859  0,
6860  &pMappedData);
6861  if(res < 0)
6862  {
6863  VMA_DEBUG_LOG(" vkMapMemory FAILED");
6864  FreeVulkanMemory(memTypeIndex, size, hMemory);
6865  return res;
6866  }
6867  }
6868 
6869  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
6870  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
6871  (*pAllocation)->SetUserData(this, pUserData);
6872 
6873  // Register it in m_pDedicatedAllocations.
6874  {
6875  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
6876  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
6877  VMA_ASSERT(pDedicatedAllocations);
6878  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
6879  }
6880 
6881  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
6882 
6883  return VK_SUCCESS;
6884 }
6885 
6886 void VmaAllocator_T::GetBufferMemoryRequirements(
6887  VkBuffer hBuffer,
6888  VkMemoryRequirements& memReq,
6889  bool& requiresDedicatedAllocation,
6890  bool& prefersDedicatedAllocation) const
6891 {
6892  if(m_UseKhrDedicatedAllocation)
6893  {
6894  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
6895  memReqInfo.buffer = hBuffer;
6896 
6897  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6898 
6899  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6900  memReq2.pNext = &memDedicatedReq;
6901 
6902  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6903 
6904  memReq = memReq2.memoryRequirements;
6905  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6906  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6907  }
6908  else
6909  {
6910  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
6911  requiresDedicatedAllocation = false;
6912  prefersDedicatedAllocation = false;
6913  }
6914 }
6915 
6916 void VmaAllocator_T::GetImageMemoryRequirements(
6917  VkImage hImage,
6918  VkMemoryRequirements& memReq,
6919  bool& requiresDedicatedAllocation,
6920  bool& prefersDedicatedAllocation) const
6921 {
6922  if(m_UseKhrDedicatedAllocation)
6923  {
6924  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
6925  memReqInfo.image = hImage;
6926 
6927  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
6928 
6929  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
6930  memReq2.pNext = &memDedicatedReq;
6931 
6932  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
6933 
6934  memReq = memReq2.memoryRequirements;
6935  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
6936  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
6937  }
6938  else
6939  {
6940  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
6941  requiresDedicatedAllocation = false;
6942  prefersDedicatedAllocation = false;
6943  }
6944 }
6945 
6946 VkResult VmaAllocator_T::AllocateMemory(
6947  const VkMemoryRequirements& vkMemReq,
6948  bool requiresDedicatedAllocation,
6949  bool prefersDedicatedAllocation,
6950  VkBuffer dedicatedBuffer,
6951  VkImage dedicatedImage,
6952  const VmaAllocationCreateInfo& createInfo,
6953  VmaSuballocationType suballocType,
6954  VmaAllocation* pAllocation)
6955 {
6956  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
6957  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6958  {
6959  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
6960  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6961  }
6962  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
6964  {
6965  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
6966  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6967  }
6968  if(requiresDedicatedAllocation)
6969  {
6970  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
6971  {
6972  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
6973  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6974  }
6975  if(createInfo.pool != VK_NULL_HANDLE)
6976  {
6977  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
6978  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6979  }
6980  }
6981  if((createInfo.pool != VK_NULL_HANDLE) &&
6982  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
6983  {
6984  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
6985  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6986  }
6987 
6988  if(createInfo.pool != VK_NULL_HANDLE)
6989  {
6990  return createInfo.pool->m_BlockVector.Allocate(
6991  createInfo.pool,
6992  m_CurrentFrameIndex.load(),
6993  vkMemReq,
6994  createInfo,
6995  suballocType,
6996  pAllocation);
6997  }
6998  else
6999  {
7000  // Bit mask of memory Vulkan types acceptable for this allocation.
7001  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7002  uint32_t memTypeIndex = UINT32_MAX;
7003  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7004  if(res == VK_SUCCESS)
7005  {
7006  res = AllocateMemoryOfType(
7007  vkMemReq,
7008  requiresDedicatedAllocation || prefersDedicatedAllocation,
7009  dedicatedBuffer,
7010  dedicatedImage,
7011  createInfo,
7012  memTypeIndex,
7013  suballocType,
7014  pAllocation);
7015  // Succeeded on first try.
7016  if(res == VK_SUCCESS)
7017  {
7018  return res;
7019  }
7020  // Allocation from this memory type failed. Try other compatible memory types.
7021  else
7022  {
7023  for(;;)
7024  {
7025  // Remove old memTypeIndex from list of possibilities.
7026  memoryTypeBits &= ~(1u << memTypeIndex);
7027  // Find alternative memTypeIndex.
7028  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7029  if(res == VK_SUCCESS)
7030  {
7031  res = AllocateMemoryOfType(
7032  vkMemReq,
7033  requiresDedicatedAllocation || prefersDedicatedAllocation,
7034  dedicatedBuffer,
7035  dedicatedImage,
7036  createInfo,
7037  memTypeIndex,
7038  suballocType,
7039  pAllocation);
7040  // Allocation from this alternative memory type succeeded.
7041  if(res == VK_SUCCESS)
7042  {
7043  return res;
7044  }
7045  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7046  }
7047  // No other matching memory type index could be found.
7048  else
7049  {
7050  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7051  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7052  }
7053  }
7054  }
7055  }
7056  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7057  else
7058  return res;
7059  }
7060 }
7061 
7062 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7063 {
7064  VMA_ASSERT(allocation);
7065 
7066  if(allocation->CanBecomeLost() == false ||
7067  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7068  {
7069  switch(allocation->GetType())
7070  {
7071  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7072  {
7073  VmaBlockVector* pBlockVector = VMA_NULL;
7074  VmaPool hPool = allocation->GetPool();
7075  if(hPool != VK_NULL_HANDLE)
7076  {
7077  pBlockVector = &hPool->m_BlockVector;
7078  }
7079  else
7080  {
7081  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7082  pBlockVector = m_pBlockVectors[memTypeIndex];
7083  }
7084  pBlockVector->Free(allocation);
7085  }
7086  break;
7087  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7088  FreeDedicatedMemory(allocation);
7089  break;
7090  default:
7091  VMA_ASSERT(0);
7092  }
7093  }
7094 
7095  allocation->SetUserData(this, VMA_NULL);
7096  vma_delete(this, allocation);
7097 }
7098 
7099 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7100 {
7101  // Initialize.
7102  InitStatInfo(pStats->total);
7103  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7104  InitStatInfo(pStats->memoryType[i]);
7105  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7106  InitStatInfo(pStats->memoryHeap[i]);
7107 
7108  // Process default pools.
7109  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7110  {
7111  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7112  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7113  VMA_ASSERT(pBlockVector);
7114  pBlockVector->AddStats(pStats);
7115  }
7116 
7117  // Process custom pools.
7118  {
7119  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7120  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7121  {
7122  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7123  }
7124  }
7125 
7126  // Process dedicated allocations.
7127  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7128  {
7129  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7130  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7131  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7132  VMA_ASSERT(pDedicatedAllocVector);
7133  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7134  {
7135  VmaStatInfo allocationStatInfo;
7136  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7137  VmaAddStatInfo(pStats->total, allocationStatInfo);
7138  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7139  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7140  }
7141  }
7142 
7143  // Postprocess.
7144  VmaPostprocessCalcStatInfo(pStats->total);
7145  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7146  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7147  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7148  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7149 }
7150 
7151 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7152 
7153 VkResult VmaAllocator_T::Defragment(
7154  VmaAllocation* pAllocations,
7155  size_t allocationCount,
7156  VkBool32* pAllocationsChanged,
7157  const VmaDefragmentationInfo* pDefragmentationInfo,
7158  VmaDefragmentationStats* pDefragmentationStats)
7159 {
7160  if(pAllocationsChanged != VMA_NULL)
7161  {
7162  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7163  }
7164  if(pDefragmentationStats != VMA_NULL)
7165  {
7166  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7167  }
7168 
7169  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7170 
7171  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7172 
7173  const size_t poolCount = m_Pools.size();
7174 
7175  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7176  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7177  {
7178  VmaAllocation hAlloc = pAllocations[allocIndex];
7179  VMA_ASSERT(hAlloc);
7180  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7181  // DedicatedAlloc cannot be defragmented.
7182  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7183  // Only HOST_VISIBLE memory types can be defragmented.
7184  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7185  // Lost allocation cannot be defragmented.
7186  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7187  {
7188  VmaBlockVector* pAllocBlockVector = nullptr;
7189 
7190  const VmaPool hAllocPool = hAlloc->GetPool();
7191  // This allocation belongs to custom pool.
7192  if(hAllocPool != VK_NULL_HANDLE)
7193  {
7194  pAllocBlockVector = &hAllocPool->GetBlockVector();
7195  }
7196  // This allocation belongs to general pool.
7197  else
7198  {
7199  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7200  }
7201 
7202  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7203 
7204  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7205  &pAllocationsChanged[allocIndex] : VMA_NULL;
7206  pDefragmentator->AddAllocation(hAlloc, pChanged);
7207  }
7208  }
7209 
7210  VkResult result = VK_SUCCESS;
7211 
7212  // ======== Main processing.
7213 
7214  VkDeviceSize maxBytesToMove = SIZE_MAX;
7215  uint32_t maxAllocationsToMove = UINT32_MAX;
7216  if(pDefragmentationInfo != VMA_NULL)
7217  {
7218  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7219  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7220  }
7221 
7222  // Process standard memory.
7223  for(uint32_t memTypeIndex = 0;
7224  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7225  ++memTypeIndex)
7226  {
7227  // Only HOST_VISIBLE memory types can be defragmented.
7228  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7229  {
7230  result = m_pBlockVectors[memTypeIndex]->Defragment(
7231  pDefragmentationStats,
7232  maxBytesToMove,
7233  maxAllocationsToMove);
7234  }
7235  }
7236 
7237  // Process custom pools.
7238  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7239  {
7240  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7241  pDefragmentationStats,
7242  maxBytesToMove,
7243  maxAllocationsToMove);
7244  }
7245 
7246  // ======== Destroy defragmentators.
7247 
7248  // Process custom pools.
7249  for(size_t poolIndex = poolCount; poolIndex--; )
7250  {
7251  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7252  }
7253 
7254  // Process standard memory.
7255  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7256  {
7257  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7258  {
7259  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7260  }
7261  }
7262 
7263  return result;
7264 }
7265 
7266 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7267 {
7268  if(hAllocation->CanBecomeLost())
7269  {
7270  /*
7271  Warning: This is a carefully designed algorithm.
7272  Do not modify unless you really know what you're doing :)
7273  */
7274  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7275  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7276  for(;;)
7277  {
7278  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7279  {
7280  pAllocationInfo->memoryType = UINT32_MAX;
7281  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7282  pAllocationInfo->offset = 0;
7283  pAllocationInfo->size = hAllocation->GetSize();
7284  pAllocationInfo->pMappedData = VMA_NULL;
7285  pAllocationInfo->pUserData = hAllocation->GetUserData();
7286  return;
7287  }
7288  else if(localLastUseFrameIndex == localCurrFrameIndex)
7289  {
7290  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7291  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7292  pAllocationInfo->offset = hAllocation->GetOffset();
7293  pAllocationInfo->size = hAllocation->GetSize();
7294  pAllocationInfo->pMappedData = VMA_NULL;
7295  pAllocationInfo->pUserData = hAllocation->GetUserData();
7296  return;
7297  }
7298  else // Last use time earlier than current time.
7299  {
7300  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7301  {
7302  localLastUseFrameIndex = localCurrFrameIndex;
7303  }
7304  }
7305  }
7306  }
7307  else
7308  {
7309  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7310  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7311  pAllocationInfo->offset = hAllocation->GetOffset();
7312  pAllocationInfo->size = hAllocation->GetSize();
7313  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7314  pAllocationInfo->pUserData = hAllocation->GetUserData();
7315  }
7316 }
7317 
7318 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7319 {
7320  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7321 
7322  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7323 
7324  if(newCreateInfo.maxBlockCount == 0)
7325  {
7326  newCreateInfo.maxBlockCount = SIZE_MAX;
7327  }
7328  if(newCreateInfo.blockSize == 0)
7329  {
7330  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7331  }
7332 
7333  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7334 
7335  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7336  if(res != VK_SUCCESS)
7337  {
7338  vma_delete(this, *pPool);
7339  *pPool = VMA_NULL;
7340  return res;
7341  }
7342 
7343  // Add to m_Pools.
7344  {
7345  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7346  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7347  }
7348 
7349  return VK_SUCCESS;
7350 }
7351 
7352 void VmaAllocator_T::DestroyPool(VmaPool pool)
7353 {
7354  // Remove from m_Pools.
7355  {
7356  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7357  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7358  VMA_ASSERT(success && "Pool not found in Allocator.");
7359  }
7360 
7361  vma_delete(this, pool);
7362 }
7363 
7364 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7365 {
7366  pool->m_BlockVector.GetPoolStats(pPoolStats);
7367 }
7368 
7369 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7370 {
7371  m_CurrentFrameIndex.store(frameIndex);
7372 }
7373 
7374 void VmaAllocator_T::MakePoolAllocationsLost(
7375  VmaPool hPool,
7376  size_t* pLostAllocationCount)
7377 {
7378  hPool->m_BlockVector.MakePoolAllocationsLost(
7379  m_CurrentFrameIndex.load(),
7380  pLostAllocationCount);
7381 }
7382 
7383 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7384 {
7385  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7386  (*pAllocation)->InitLost();
7387 }
7388 
7389 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7390 {
7391  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7392 
7393  VkResult res;
7394  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7395  {
7396  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7397  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7398  {
7399  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7400  if(res == VK_SUCCESS)
7401  {
7402  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7403  }
7404  }
7405  else
7406  {
7407  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7408  }
7409  }
7410  else
7411  {
7412  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7413  }
7414 
7415  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7416  {
7417  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7418  }
7419 
7420  return res;
7421 }
7422 
7423 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7424 {
7425  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7426  {
7427  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7428  }
7429 
7430  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7431 
7432  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7433  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7434  {
7435  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7436  m_HeapSizeLimit[heapIndex] += size;
7437  }
7438 }
7439 
7440 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7441 {
7442  if(hAllocation->CanBecomeLost())
7443  {
7444  return VK_ERROR_MEMORY_MAP_FAILED;
7445  }
7446 
7447  switch(hAllocation->GetType())
7448  {
7449  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7450  {
7451  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7452  char *pBytes = nullptr;
7453  VkResult res = pBlock->Map(this, (void**)&pBytes);
7454  if(res == VK_SUCCESS)
7455  {
7456  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7457  }
7458  return res;
7459  }
7460  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7461  return hAllocation->DedicatedAllocMap(this, ppData);
7462  default:
7463  VMA_ASSERT(0);
7464  return VK_ERROR_MEMORY_MAP_FAILED;
7465  }
7466 }
7467 
7468 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7469 {
7470  switch(hAllocation->GetType())
7471  {
7472  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7473  {
7474  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7475  pBlock->Unmap(this);
7476  }
7477  break;
7478  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7479  hAllocation->DedicatedAllocUnmap(this);
7480  break;
7481  default:
7482  VMA_ASSERT(0);
7483  }
7484 }
7485 
7486 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7487 {
7488  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7489 
7490  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7491  {
7492  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7493  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7494  VMA_ASSERT(pDedicatedAllocations);
7495  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7496  VMA_ASSERT(success);
7497  }
7498 
7499  VkDeviceMemory hMemory = allocation->GetMemory();
7500 
7501  if(allocation->GetMappedData() != VMA_NULL)
7502  {
7503  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7504  }
7505 
7506  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7507 
7508  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7509 }
7510 
7511 #if VMA_STATS_STRING_ENABLED
7512 
7513 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7514 {
7515  bool dedicatedAllocationsStarted = false;
7516  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7517  {
7518  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7519  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7520  VMA_ASSERT(pDedicatedAllocVector);
7521  if(pDedicatedAllocVector->empty() == false)
7522  {
7523  if(dedicatedAllocationsStarted == false)
7524  {
7525  dedicatedAllocationsStarted = true;
7526  json.WriteString("DedicatedAllocations");
7527  json.BeginObject();
7528  }
7529 
7530  json.BeginString("Type ");
7531  json.ContinueString(memTypeIndex);
7532  json.EndString();
7533 
7534  json.BeginArray();
7535 
7536  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7537  {
7538  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7539  json.BeginObject(true);
7540 
7541  json.WriteString("Size");
7542  json.WriteNumber(hAlloc->GetSize());
7543 
7544  json.WriteString("Type");
7545  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7546 
7547  json.EndObject();
7548  }
7549 
7550  json.EndArray();
7551  }
7552  }
7553  if(dedicatedAllocationsStarted)
7554  {
7555  json.EndObject();
7556  }
7557 
7558  {
7559  bool allocationsStarted = false;
7560  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7561  {
7562  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
7563  {
7564  if(allocationsStarted == false)
7565  {
7566  allocationsStarted = true;
7567  json.WriteString("DefaultPools");
7568  json.BeginObject();
7569  }
7570 
7571  json.BeginString("Type ");
7572  json.ContinueString(memTypeIndex);
7573  json.EndString();
7574 
7575  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
7576  }
7577  }
7578  if(allocationsStarted)
7579  {
7580  json.EndObject();
7581  }
7582  }
7583 
7584  {
7585  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7586  const size_t poolCount = m_Pools.size();
7587  if(poolCount > 0)
7588  {
7589  json.WriteString("Pools");
7590  json.BeginArray();
7591  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
7592  {
7593  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
7594  }
7595  json.EndArray();
7596  }
7597  }
7598 }
7599 
7600 #endif // #if VMA_STATS_STRING_ENABLED
7601 
7602 static VkResult AllocateMemoryForImage(
7603  VmaAllocator allocator,
7604  VkImage image,
7605  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7606  VmaSuballocationType suballocType,
7607  VmaAllocation* pAllocation)
7608 {
7609  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
7610 
7611  VkMemoryRequirements vkMemReq = {};
7612  bool requiresDedicatedAllocation = false;
7613  bool prefersDedicatedAllocation = false;
7614  allocator->GetImageMemoryRequirements(image, vkMemReq,
7615  requiresDedicatedAllocation, prefersDedicatedAllocation);
7616 
7617  return allocator->AllocateMemory(
7618  vkMemReq,
7619  requiresDedicatedAllocation,
7620  prefersDedicatedAllocation,
7621  VK_NULL_HANDLE, // dedicatedBuffer
7622  image, // dedicatedImage
7623  *pAllocationCreateInfo,
7624  suballocType,
7625  pAllocation);
7626 }
7627 
7629 // Public interface
7630 
7631 VkResult vmaCreateAllocator(
7632  const VmaAllocatorCreateInfo* pCreateInfo,
7633  VmaAllocator* pAllocator)
7634 {
7635  VMA_ASSERT(pCreateInfo && pAllocator);
7636  VMA_DEBUG_LOG("vmaCreateAllocator");
7637  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
7638  return VK_SUCCESS;
7639 }
7640 
7641 void vmaDestroyAllocator(
7642  VmaAllocator allocator)
7643 {
7644  if(allocator != VK_NULL_HANDLE)
7645  {
7646  VMA_DEBUG_LOG("vmaDestroyAllocator");
7647  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
7648  vma_delete(&allocationCallbacks, allocator);
7649  }
7650 }
7651 
7653  VmaAllocator allocator,
7654  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
7655 {
7656  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
7657  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
7658 }
7659 
7661  VmaAllocator allocator,
7662  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
7663 {
7664  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
7665  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
7666 }
7667 
7669  VmaAllocator allocator,
7670  uint32_t memoryTypeIndex,
7671  VkMemoryPropertyFlags* pFlags)
7672 {
7673  VMA_ASSERT(allocator && pFlags);
7674  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
7675  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
7676 }
7677 
7679  VmaAllocator allocator,
7680  uint32_t frameIndex)
7681 {
7682  VMA_ASSERT(allocator);
7683  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
7684 
7685  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7686 
7687  allocator->SetCurrentFrameIndex(frameIndex);
7688 }
7689 
7690 void vmaCalculateStats(
7691  VmaAllocator allocator,
7692  VmaStats* pStats)
7693 {
7694  VMA_ASSERT(allocator && pStats);
7695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7696  allocator->CalculateStats(pStats);
7697 }
7698 
7699 #if VMA_STATS_STRING_ENABLED
7700 
7701 void vmaBuildStatsString(
7702  VmaAllocator allocator,
7703  char** ppStatsString,
7704  VkBool32 detailedMap)
7705 {
7706  VMA_ASSERT(allocator && ppStatsString);
7707  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7708 
7709  VmaStringBuilder sb(allocator);
7710  {
7711  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
7712  json.BeginObject();
7713 
7714  VmaStats stats;
7715  allocator->CalculateStats(&stats);
7716 
7717  json.WriteString("Total");
7718  VmaPrintStatInfo(json, stats.total);
7719 
7720  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
7721  {
7722  json.BeginString("Heap ");
7723  json.ContinueString(heapIndex);
7724  json.EndString();
7725  json.BeginObject();
7726 
7727  json.WriteString("Size");
7728  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
7729 
7730  json.WriteString("Flags");
7731  json.BeginArray(true);
7732  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
7733  {
7734  json.WriteString("DEVICE_LOCAL");
7735  }
7736  json.EndArray();
7737 
7738  if(stats.memoryHeap[heapIndex].blockCount > 0)
7739  {
7740  json.WriteString("Stats");
7741  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
7742  }
7743 
7744  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
7745  {
7746  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
7747  {
7748  json.BeginString("Type ");
7749  json.ContinueString(typeIndex);
7750  json.EndString();
7751 
7752  json.BeginObject();
7753 
7754  json.WriteString("Flags");
7755  json.BeginArray(true);
7756  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
7757  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
7758  {
7759  json.WriteString("DEVICE_LOCAL");
7760  }
7761  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7762  {
7763  json.WriteString("HOST_VISIBLE");
7764  }
7765  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
7766  {
7767  json.WriteString("HOST_COHERENT");
7768  }
7769  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
7770  {
7771  json.WriteString("HOST_CACHED");
7772  }
7773  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
7774  {
7775  json.WriteString("LAZILY_ALLOCATED");
7776  }
7777  json.EndArray();
7778 
7779  if(stats.memoryType[typeIndex].blockCount > 0)
7780  {
7781  json.WriteString("Stats");
7782  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
7783  }
7784 
7785  json.EndObject();
7786  }
7787  }
7788 
7789  json.EndObject();
7790  }
7791  if(detailedMap == VK_TRUE)
7792  {
7793  allocator->PrintDetailedMap(json);
7794  }
7795 
7796  json.EndObject();
7797  }
7798 
7799  const size_t len = sb.GetLength();
7800  char* const pChars = vma_new_array(allocator, char, len + 1);
7801  if(len > 0)
7802  {
7803  memcpy(pChars, sb.GetData(), len);
7804  }
7805  pChars[len] = '\0';
7806  *ppStatsString = pChars;
7807 }
7808 
7809 void vmaFreeStatsString(
7810  VmaAllocator allocator,
7811  char* pStatsString)
7812 {
7813  if(pStatsString != VMA_NULL)
7814  {
7815  VMA_ASSERT(allocator);
7816  size_t len = strlen(pStatsString);
7817  vma_delete_array(allocator, pStatsString, len + 1);
7818  }
7819 }
7820 
7821 #endif // #if VMA_STATS_STRING_ENABLED
7822 
7825 VkResult vmaFindMemoryTypeIndex(
7826  VmaAllocator allocator,
7827  uint32_t memoryTypeBits,
7828  const VmaAllocationCreateInfo* pAllocationCreateInfo,
7829  uint32_t* pMemoryTypeIndex)
7830 {
7831  VMA_ASSERT(allocator != VK_NULL_HANDLE);
7832  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
7833  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
7834 
7835  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
7836  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
7837  if(preferredFlags == 0)
7838  {
7839  preferredFlags = requiredFlags;
7840  }
7841  // preferredFlags, if not 0, must be a superset of requiredFlags.
7842  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
7843 
7844  // Convert usage to requiredFlags and preferredFlags.
7845  switch(pAllocationCreateInfo->usage)
7846  {
7848  break;
7850  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7851  break;
7853  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
7854  break;
7856  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7857  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
7858  break;
7860  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7861  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
7862  break;
7863  default:
7864  break;
7865  }
7866 
7867  *pMemoryTypeIndex = UINT32_MAX;
7868  uint32_t minCost = UINT32_MAX;
7869  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
7870  memTypeIndex < allocator->GetMemoryTypeCount();
7871  ++memTypeIndex, memTypeBit <<= 1)
7872  {
7873  // This memory type is acceptable according to memoryTypeBits bitmask.
7874  if((memTypeBit & memoryTypeBits) != 0)
7875  {
7876  const VkMemoryPropertyFlags currFlags =
7877  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
7878  // This memory type contains requiredFlags.
7879  if((requiredFlags & ~currFlags) == 0)
7880  {
7881  // Calculate cost as number of bits from preferredFlags not present in this memory type.
7882  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
7883  // Remember memory type with lowest cost.
7884  if(currCost < minCost)
7885  {
7886  *pMemoryTypeIndex = memTypeIndex;
7887  if(currCost == 0)
7888  {
7889  return VK_SUCCESS;
7890  }
7891  minCost = currCost;
7892  }
7893  }
7894  }
7895  }
7896  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
7897 }
7898 
7899 VkResult vmaCreatePool(
7900  VmaAllocator allocator,
7901  const VmaPoolCreateInfo* pCreateInfo,
7902  VmaPool* pPool)
7903 {
7904  VMA_ASSERT(allocator && pCreateInfo && pPool);
7905 
7906  VMA_DEBUG_LOG("vmaCreatePool");
7907 
7908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7909 
7910  return allocator->CreatePool(pCreateInfo, pPool);
7911 }
7912 
7913 void vmaDestroyPool(
7914  VmaAllocator allocator,
7915  VmaPool pool)
7916 {
7917  VMA_ASSERT(allocator);
7918 
7919  if(pool == VK_NULL_HANDLE)
7920  {
7921  return;
7922  }
7923 
7924  VMA_DEBUG_LOG("vmaDestroyPool");
7925 
7926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7927 
7928  allocator->DestroyPool(pool);
7929 }
7930 
7931 void vmaGetPoolStats(
7932  VmaAllocator allocator,
7933  VmaPool pool,
7934  VmaPoolStats* pPoolStats)
7935 {
7936  VMA_ASSERT(allocator && pool && pPoolStats);
7937 
7938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7939 
7940  allocator->GetPoolStats(pool, pPoolStats);
7941 }
7942 
7944  VmaAllocator allocator,
7945  VmaPool pool,
7946  size_t* pLostAllocationCount)
7947 {
7948  VMA_ASSERT(allocator && pool);
7949 
7950  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7951 
7952  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
7953 }
7954 
7955 VkResult vmaAllocateMemory(
7956  VmaAllocator allocator,
7957  const VkMemoryRequirements* pVkMemoryRequirements,
7958  const VmaAllocationCreateInfo* pCreateInfo,
7959  VmaAllocation* pAllocation,
7960  VmaAllocationInfo* pAllocationInfo)
7961 {
7962  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
7963 
7964  VMA_DEBUG_LOG("vmaAllocateMemory");
7965 
7966  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7967 
7968  VkResult result = allocator->AllocateMemory(
7969  *pVkMemoryRequirements,
7970  false, // requiresDedicatedAllocation
7971  false, // prefersDedicatedAllocation
7972  VK_NULL_HANDLE, // dedicatedBuffer
7973  VK_NULL_HANDLE, // dedicatedImage
7974  *pCreateInfo,
7975  VMA_SUBALLOCATION_TYPE_UNKNOWN,
7976  pAllocation);
7977 
7978  if(pAllocationInfo && result == VK_SUCCESS)
7979  {
7980  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
7981  }
7982 
7983  return result;
7984 }
7985 
7987  VmaAllocator allocator,
7988  VkBuffer buffer,
7989  const VmaAllocationCreateInfo* pCreateInfo,
7990  VmaAllocation* pAllocation,
7991  VmaAllocationInfo* pAllocationInfo)
7992 {
7993  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
7994 
7995  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
7996 
7997  VMA_DEBUG_GLOBAL_MUTEX_LOCK
7998 
7999  VkMemoryRequirements vkMemReq = {};
8000  bool requiresDedicatedAllocation = false;
8001  bool prefersDedicatedAllocation = false;
8002  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8003  requiresDedicatedAllocation,
8004  prefersDedicatedAllocation);
8005 
8006  VkResult result = allocator->AllocateMemory(
8007  vkMemReq,
8008  requiresDedicatedAllocation,
8009  prefersDedicatedAllocation,
8010  buffer, // dedicatedBuffer
8011  VK_NULL_HANDLE, // dedicatedImage
8012  *pCreateInfo,
8013  VMA_SUBALLOCATION_TYPE_BUFFER,
8014  pAllocation);
8015 
8016  if(pAllocationInfo && result == VK_SUCCESS)
8017  {
8018  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8019  }
8020 
8021  return result;
8022 }
8023 
8024 VkResult vmaAllocateMemoryForImage(
8025  VmaAllocator allocator,
8026  VkImage image,
8027  const VmaAllocationCreateInfo* pCreateInfo,
8028  VmaAllocation* pAllocation,
8029  VmaAllocationInfo* pAllocationInfo)
8030 {
8031  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8032 
8033  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8034 
8035  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8036 
8037  VkResult result = AllocateMemoryForImage(
8038  allocator,
8039  image,
8040  pCreateInfo,
8041  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8042  pAllocation);
8043 
8044  if(pAllocationInfo && result == VK_SUCCESS)
8045  {
8046  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8047  }
8048 
8049  return result;
8050 }
8051 
8052 void vmaFreeMemory(
8053  VmaAllocator allocator,
8054  VmaAllocation allocation)
8055 {
8056  VMA_ASSERT(allocator && allocation);
8057 
8058  VMA_DEBUG_LOG("vmaFreeMemory");
8059 
8060  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8061 
8062  allocator->FreeMemory(allocation);
8063 }
8064 
8066  VmaAllocator allocator,
8067  VmaAllocation allocation,
8068  VmaAllocationInfo* pAllocationInfo)
8069 {
8070  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8071 
8072  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8073 
8074  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8075 }
8076 
8078  VmaAllocator allocator,
8079  VmaAllocation allocation,
8080  void* pUserData)
8081 {
8082  VMA_ASSERT(allocator && allocation);
8083 
8084  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8085 
8086  allocation->SetUserData(allocator, pUserData);
8087 }
8088 
8090  VmaAllocator allocator,
8091  VmaAllocation* pAllocation)
8092 {
8093  VMA_ASSERT(allocator && pAllocation);
8094 
8095  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8096 
8097  allocator->CreateLostAllocation(pAllocation);
8098 }
8099 
8100 VkResult vmaMapMemory(
8101  VmaAllocator allocator,
8102  VmaAllocation allocation,
8103  void** ppData)
8104 {
8105  VMA_ASSERT(allocator && allocation && ppData);
8106 
8107  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8108 
8109  return allocator->Map(allocation, ppData);
8110 }
8111 
8112 void vmaUnmapMemory(
8113  VmaAllocator allocator,
8114  VmaAllocation allocation)
8115 {
8116  VMA_ASSERT(allocator && allocation);
8117 
8118  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8119 
8120  allocator->Unmap(allocation);
8121 }
8122 
8123 VkResult vmaDefragment(
8124  VmaAllocator allocator,
8125  VmaAllocation* pAllocations,
8126  size_t allocationCount,
8127  VkBool32* pAllocationsChanged,
8128  const VmaDefragmentationInfo *pDefragmentationInfo,
8129  VmaDefragmentationStats* pDefragmentationStats)
8130 {
8131  VMA_ASSERT(allocator && pAllocations);
8132 
8133  VMA_DEBUG_LOG("vmaDefragment");
8134 
8135  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8136 
8137  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8138 }
8139 
8140 VkResult vmaCreateBuffer(
8141  VmaAllocator allocator,
8142  const VkBufferCreateInfo* pBufferCreateInfo,
8143  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8144  VkBuffer* pBuffer,
8145  VmaAllocation* pAllocation,
8146  VmaAllocationInfo* pAllocationInfo)
8147 {
8148  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8149 
8150  VMA_DEBUG_LOG("vmaCreateBuffer");
8151 
8152  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8153 
8154  *pBuffer = VK_NULL_HANDLE;
8155  *pAllocation = VK_NULL_HANDLE;
8156 
8157  // 1. Create VkBuffer.
8158  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8159  allocator->m_hDevice,
8160  pBufferCreateInfo,
8161  allocator->GetAllocationCallbacks(),
8162  pBuffer);
8163  if(res >= 0)
8164  {
8165  // 2. vkGetBufferMemoryRequirements.
8166  VkMemoryRequirements vkMemReq = {};
8167  bool requiresDedicatedAllocation = false;
8168  bool prefersDedicatedAllocation = false;
8169  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8170  requiresDedicatedAllocation, prefersDedicatedAllocation);
8171 
8172  // 3. Allocate memory using allocator.
8173  res = allocator->AllocateMemory(
8174  vkMemReq,
8175  requiresDedicatedAllocation,
8176  prefersDedicatedAllocation,
8177  *pBuffer, // dedicatedBuffer
8178  VK_NULL_HANDLE, // dedicatedImage
8179  *pAllocationCreateInfo,
8180  VMA_SUBALLOCATION_TYPE_BUFFER,
8181  pAllocation);
8182  if(res >= 0)
8183  {
8184  // 3. Bind buffer with memory.
8185  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8186  allocator->m_hDevice,
8187  *pBuffer,
8188  (*pAllocation)->GetMemory(),
8189  (*pAllocation)->GetOffset());
8190  if(res >= 0)
8191  {
8192  // All steps succeeded.
8193  if(pAllocationInfo != VMA_NULL)
8194  {
8195  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8196  }
8197  return VK_SUCCESS;
8198  }
8199  allocator->FreeMemory(*pAllocation);
8200  *pAllocation = VK_NULL_HANDLE;
8201  return res;
8202  }
8203  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8204  *pBuffer = VK_NULL_HANDLE;
8205  return res;
8206  }
8207  return res;
8208 }
8209 
8210 void vmaDestroyBuffer(
8211  VmaAllocator allocator,
8212  VkBuffer buffer,
8213  VmaAllocation allocation)
8214 {
8215  if(buffer != VK_NULL_HANDLE)
8216  {
8217  VMA_ASSERT(allocator);
8218 
8219  VMA_DEBUG_LOG("vmaDestroyBuffer");
8220 
8221  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8222 
8223  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8224 
8225  allocator->FreeMemory(allocation);
8226  }
8227 }
8228 
8229 VkResult vmaCreateImage(
8230  VmaAllocator allocator,
8231  const VkImageCreateInfo* pImageCreateInfo,
8232  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8233  VkImage* pImage,
8234  VmaAllocation* pAllocation,
8235  VmaAllocationInfo* pAllocationInfo)
8236 {
8237  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8238 
8239  VMA_DEBUG_LOG("vmaCreateImage");
8240 
8241  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8242 
8243  *pImage = VK_NULL_HANDLE;
8244  *pAllocation = VK_NULL_HANDLE;
8245 
8246  // 1. Create VkImage.
8247  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8248  allocator->m_hDevice,
8249  pImageCreateInfo,
8250  allocator->GetAllocationCallbacks(),
8251  pImage);
8252  if(res >= 0)
8253  {
8254  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8255  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8256  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8257 
8258  // 2. Allocate memory using allocator.
8259  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8260  if(res >= 0)
8261  {
8262  // 3. Bind image with memory.
8263  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8264  allocator->m_hDevice,
8265  *pImage,
8266  (*pAllocation)->GetMemory(),
8267  (*pAllocation)->GetOffset());
8268  if(res >= 0)
8269  {
8270  // All steps succeeded.
8271  if(pAllocationInfo != VMA_NULL)
8272  {
8273  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8274  }
8275  return VK_SUCCESS;
8276  }
8277  allocator->FreeMemory(*pAllocation);
8278  *pAllocation = VK_NULL_HANDLE;
8279  return res;
8280  }
8281  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8282  *pImage = VK_NULL_HANDLE;
8283  return res;
8284  }
8285  return res;
8286 }
8287 
8288 void vmaDestroyImage(
8289  VmaAllocator allocator,
8290  VkImage image,
8291  VmaAllocation allocation)
8292 {
8293  if(image != VK_NULL_HANDLE)
8294  {
8295  VMA_ASSERT(allocator);
8296 
8297  VMA_DEBUG_LOG("vmaDestroyImage");
8298 
8299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8300 
8301  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8302 
8303  allocator->FreeMemory(allocation);
8304  }
8305 }
8306 
8307 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:670
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:887
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:695
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:680
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Memory will be used on device only, so faster access from the device is preferred. No need to be mappable on host.
Definition: vk_mem_alloc.h:861
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:674
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1142
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:692
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1308
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1012
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1066
Definition: vk_mem_alloc.h:924
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:663
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:963
Definition: vk_mem_alloc.h:871
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:707
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:754
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:689
VkDeviceSize preferredSmallHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from small heaps <= 512 MB...
Definition: vk_mem_alloc.h:704
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:875
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:819
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:677
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:818
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:685
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1312
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:724
VmaStatInfo total
Definition: vk_mem_alloc.h:828
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1320
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:946
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1303
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:678
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:599
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:698
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1020
Definition: vk_mem_alloc.h:1014
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1152
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:675
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:965
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1036
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1072
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:661
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1023
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
VmaMemoryUsage
Definition: vk_mem_alloc.h:856
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1298
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1316
Memory will be used for frequent (dynamic) updates from host and reads on device (upload).
Definition: vk_mem_alloc.h:867
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:676
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:824
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:605
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:626
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:631
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1318
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:957
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1082
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:671
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:807
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1031
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:618
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:931
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:820
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:622
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1026
Memory will be used for frequent writing on device and readback on host (download).
Definition: vk_mem_alloc.h:870
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:952
Definition: vk_mem_alloc.h:943
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:810
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:673
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1044
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:710
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1075
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:941
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:970
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either NULL or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:742
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:826
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:911
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:819
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:682
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:620
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:681
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1058
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1166
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps. ...
Definition: vk_mem_alloc.h:701
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:819
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:816
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1063
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1147
Definition: vk_mem_alloc.h:939
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1314
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:669
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:684
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:814
No intended memory usage specified. Use other members of VmaAllocationCreateInfo to specify your requ...
Definition: vk_mem_alloc.h:859
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1016
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:812
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:679
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:683
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:898
Memory will be mapped on host. Could be used for transfer to/from device.
Definition: vk_mem_alloc.h:864
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1161
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:659
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:672
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1128
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:994
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:820
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:827
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1069
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:820
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1133