Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
161 #include <vulkan/vulkan.h>
162 
164 
168 VK_DEFINE_HANDLE(VmaAllocator)
169 
170 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
172  VmaAllocator allocator,
173  uint32_t memoryType,
174  VkDeviceMemory memory,
175  VkDeviceSize size);
177 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
178  VmaAllocator allocator,
179  uint32_t memoryType,
180  VkDeviceMemory memory,
181  VkDeviceSize size);
182 
188 typedef struct VmaDeviceMemoryCallbacks {
194 
196 typedef enum VmaAllocatorFlagBits {
202 
205 typedef VkFlags VmaAllocatorFlags;
206 
209 {
213 
214  VkPhysicalDevice physicalDevice;
216 
217  VkDevice device;
219 
222 
225 
226  const VkAllocationCallbacks* pAllocationCallbacks;
228 
231 
233 VkResult vmaCreateAllocator(
234  const VmaAllocatorCreateInfo* pCreateInfo,
235  VmaAllocator* pAllocator);
236 
239  VmaAllocator allocator);
240 
246  VmaAllocator allocator,
247  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
248 
254  VmaAllocator allocator,
255  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
256 
264  VmaAllocator allocator,
265  uint32_t memoryTypeIndex,
266  VkMemoryPropertyFlags* pFlags);
267 
268 typedef struct VmaStatInfo
269 {
270  uint32_t AllocationCount;
273  VkDeviceSize UsedBytes;
274  VkDeviceSize UnusedBytes;
275  VkDeviceSize SuballocationSizeMin, SuballocationSizeAvg, SuballocationSizeMax;
276  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
277 } VmaStatInfo;
278 
280 struct VmaStats
281 {
282  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
283  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
285 };
286 
288 void vmaCalculateStats(
289  VmaAllocator allocator,
290  VmaStats* pStats);
291 
292 #define VMA_STATS_STRING_ENABLED 1
293 
294 #if VMA_STATS_STRING_ENABLED
295 
297 
300  VmaAllocator allocator,
301  char** ppStatsString,
302  VkBool32 detailedMap);
303 
304 void vmaFreeStatsString(
305  VmaAllocator allocator,
306  char* pStatsString);
307 
308 #endif // #if VMA_STATS_STRING_ENABLED
309 
312 
317 typedef enum VmaMemoryUsage
318 {
324 
327 
330 
334 
346 
362 
366 
367 typedef struct VmaMemoryRequirements
368 {
378  VkMemoryPropertyFlags requiredFlags;
383  VkMemoryPropertyFlags preferredFlags;
385  void* pUserData;
387 
402 VkResult vmaFindMemoryTypeIndex(
403  VmaAllocator allocator,
404  uint32_t memoryTypeBits,
405  const VmaMemoryRequirements* pMemoryRequirements,
406  uint32_t* pMemoryTypeIndex);
407 
410 
415 VK_DEFINE_HANDLE(VmaAllocation)
416 
417 
419 typedef struct VmaAllocationInfo {
424  uint32_t memoryType;
431  VkDeviceMemory deviceMemory;
436  VkDeviceSize offset;
441  VkDeviceSize size;
447  void* pMappedData;
452  void* pUserData;
454 
465 VkResult vmaAllocateMemory(
466  VmaAllocator allocator,
467  const VkMemoryRequirements* pVkMemoryRequirements,
468  const VmaMemoryRequirements* pVmaMemoryRequirements,
469  VmaAllocation* pAllocation,
470  VmaAllocationInfo* pAllocationInfo);
471 
479  VmaAllocator allocator,
480  VkBuffer buffer,
481  const VmaMemoryRequirements* pMemoryRequirements,
482  VmaAllocation* pAllocation,
483  VmaAllocationInfo* pAllocationInfo);
484 
487  VmaAllocator allocator,
488  VkImage image,
489  const VmaMemoryRequirements* pMemoryRequirements,
490  VmaAllocation* pAllocation,
491  VmaAllocationInfo* pAllocationInfo);
492 
494 void vmaFreeMemory(
495  VmaAllocator allocator,
496  VmaAllocation allocation);
497 
500  VmaAllocator allocator,
501  VmaAllocation allocation,
502  VmaAllocationInfo* pAllocationInfo);
503 
506  VmaAllocator allocator,
507  VmaAllocation allocation,
508  void* pUserData);
509 
518 VkResult vmaMapMemory(
519  VmaAllocator allocator,
520  VmaAllocation allocation,
521  void** ppData);
522 
523 void vmaUnmapMemory(
524  VmaAllocator allocator,
525  VmaAllocation allocation);
526 
545 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator);
546 
554 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator);
555 
557 typedef struct VmaDefragmentationInfo {
562  VkDeviceSize maxBytesToMove;
569 
571 typedef struct VmaDefragmentationStats {
573  VkDeviceSize bytesMoved;
575  VkDeviceSize bytesFreed;
581 
652 VkResult vmaDefragment(
653  VmaAllocator allocator,
654  VmaAllocation* pAllocations,
655  size_t allocationCount,
656  VkBool32* pAllocationsChanged,
657  const VmaDefragmentationInfo *pDefragmentationInfo,
658  VmaDefragmentationStats* pDefragmentationStats);
659 
662 
685 VkResult vmaCreateBuffer(
686  VmaAllocator allocator,
687  const VkBufferCreateInfo* pCreateInfo,
688  const VmaMemoryRequirements* pMemoryRequirements,
689  VkBuffer* pBuffer,
690  VmaAllocation* pAllocation,
691  VmaAllocationInfo* pAllocationInfo);
692 
693 void vmaDestroyBuffer(
694  VmaAllocator allocator,
695  VkBuffer buffer,
696  VmaAllocation allocation);
697 
699 VkResult vmaCreateImage(
700  VmaAllocator allocator,
701  const VkImageCreateInfo* pCreateInfo,
702  const VmaMemoryRequirements* pMemoryRequirements,
703  VkImage* pImage,
704  VmaAllocation* pAllocation,
705  VmaAllocationInfo* pAllocationInfo);
706 
707 void vmaDestroyImage(
708  VmaAllocator allocator,
709  VkImage image,
710  VmaAllocation allocation);
711 
714 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
715 
716 #ifdef VMA_IMPLEMENTATION
717 #undef VMA_IMPLEMENTATION
718 
719 #include <cstdint>
720 #include <cstdlib>
721 
722 /*******************************************************************************
723 CONFIGURATION SECTION
724 
725 Define some of these macros before each #include of this header or change them
726 here if you need other then default behavior depending on your environment.
727 */
728 
729 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
730 //#define VMA_USE_STL_CONTAINERS 1
731 
732 /* Set this macro to 1 to make the library including and using STL containers:
733 std::pair, std::vector, std::list, std::unordered_map.
734 
735 Set it to 0 or undefined to make the library using its own implementation of
736 the containers.
737 */
738 #if VMA_USE_STL_CONTAINERS
739  #define VMA_USE_STL_VECTOR 1
740  #define VMA_USE_STL_UNORDERED_MAP 1
741  #define VMA_USE_STL_LIST 1
742 #endif
743 
744 #if VMA_USE_STL_VECTOR
745  #include <vector>
746 #endif
747 
748 #if VMA_USE_STL_UNORDERED_MAP
749  #include <unordered_map>
750 #endif
751 
752 #if VMA_USE_STL_LIST
753  #include <list>
754 #endif
755 
756 /*
757 Following headers are used in this CONFIGURATION section only, so feel free to
758 remove them if not needed.
759 */
760 #include <cassert> // for assert
761 #include <algorithm> // for min, max
762 #include <mutex> // for std::mutex
763 
764 #if !defined(_WIN32)
765  #include <malloc.h> // for aligned_alloc()
766 #endif
767 
768 // Normal assert to check for programmer's errors, especially in Debug configuration.
769 #ifndef VMA_ASSERT
770  #ifdef _DEBUG
771  #define VMA_ASSERT(expr) assert(expr)
772  #else
773  #define VMA_ASSERT(expr)
774  #endif
775 #endif
776 
777 // Assert that will be called very often, like inside data structures e.g. operator[].
778 // Making it non-empty can make program slow.
779 #ifndef VMA_HEAVY_ASSERT
780  #ifdef _DEBUG
781  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
782  #else
783  #define VMA_HEAVY_ASSERT(expr)
784  #endif
785 #endif
786 
787 #ifndef VMA_NULL
788  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
789  #define VMA_NULL nullptr
790 #endif
791 
792 #ifndef VMA_ALIGN_OF
793  #define VMA_ALIGN_OF(type) (__alignof(type))
794 #endif
795 
796 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
797  #if defined(_WIN32)
798  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
799  #else
800  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
801  #endif
802 #endif
803 
804 #ifndef VMA_SYSTEM_FREE
805  #if defined(_WIN32)
806  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
807  #else
808  #define VMA_SYSTEM_FREE(ptr) free(ptr)
809  #endif
810 #endif
811 
812 #ifndef VMA_MIN
813  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
814 #endif
815 
816 #ifndef VMA_MAX
817  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
818 #endif
819 
820 #ifndef VMA_SWAP
821  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
822 #endif
823 
824 #ifndef VMA_SORT
825  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
826 #endif
827 
828 #ifndef VMA_DEBUG_LOG
829  #define VMA_DEBUG_LOG(format, ...)
830  /*
831  #define VMA_DEBUG_LOG(format, ...) do { \
832  printf(format, __VA_ARGS__); \
833  printf("\n"); \
834  } while(false)
835  */
836 #endif
837 
838 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
839 #if VMA_STATS_STRING_ENABLED
840  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
841  {
842  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
843  }
844  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
845  {
846  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
847  }
848 #endif
849 
850 #ifndef VMA_MUTEX
851  class VmaMutex
852  {
853  public:
854  VmaMutex() { }
855  ~VmaMutex() { }
856  void Lock() { m_Mutex.lock(); }
857  void Unlock() { m_Mutex.unlock(); }
858  private:
859  std::mutex m_Mutex;
860  };
861  #define VMA_MUTEX VmaMutex
862 #endif
863 
864 #ifndef VMA_BEST_FIT
865 
877  #define VMA_BEST_FIT (1)
878 #endif
879 
880 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
881 
885  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
886 #endif
887 
888 #ifndef VMA_DEBUG_ALIGNMENT
889 
893  #define VMA_DEBUG_ALIGNMENT (1)
894 #endif
895 
896 #ifndef VMA_DEBUG_MARGIN
897 
901  #define VMA_DEBUG_MARGIN (0)
902 #endif
903 
904 #ifndef VMA_DEBUG_GLOBAL_MUTEX
905 
909  #define VMA_DEBUG_GLOBAL_MUTEX (0)
910 #endif
911 
912 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
913 
917  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
918 #endif
919 
920 #ifndef VMA_SMALL_HEAP_MAX_SIZE
921  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
923 #endif
924 
925 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
926  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
928 #endif
929 
930 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
931  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
933 #endif
934 
935 /*******************************************************************************
936 END OF CONFIGURATION
937 */
938 
939 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
940  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
941 
942 // Returns number of bits set to 1 in (v).
943 static inline uint32_t CountBitsSet(uint32_t v)
944 {
945  uint32_t c = v - ((v >> 1) & 0x55555555);
946  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
947  c = ((c >> 4) + c) & 0x0F0F0F0F;
948  c = ((c >> 8) + c) & 0x00FF00FF;
949  c = ((c >> 16) + c) & 0x0000FFFF;
950  return c;
951 }
952 
953 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
954 // Use types like uint32_t, uint64_t as T.
955 template <typename T>
956 static inline T VmaAlignUp(T val, T align)
957 {
958  return (val + align - 1) / align * align;
959 }
960 
961 // Division with mathematical rounding to nearest number.
962 template <typename T>
963 inline T VmaRoundDiv(T x, T y)
964 {
965  return (x + (y / (T)2)) / y;
966 }
967 
968 #ifndef VMA_SORT
969 
970 template<typename Iterator, typename Compare>
971 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
972 {
973  Iterator centerValue = end; --centerValue;
974  Iterator insertIndex = beg;
975  for(Iterator i = beg; i < centerValue; ++i)
976  {
977  if(cmp(*i, *centerValue))
978  {
979  if(insertIndex != i)
980  {
981  VMA_SWAP(*i, *insertIndex);
982  }
983  ++insertIndex;
984  }
985  }
986  if(insertIndex != centerValue)
987  {
988  VMA_SWAP(*insertIndex, *centerValue);
989  }
990  return insertIndex;
991 }
992 
993 template<typename Iterator, typename Compare>
994 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
995 {
996  if(beg < end)
997  {
998  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
999  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
1000  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
1001  }
1002 }
1003 
1004 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
1005 
1006 #endif // #ifndef VMA_SORT
1007 
1008 /*
1009 Returns true if two memory blocks occupy overlapping pages.
1010 ResourceA must be in less memory offset than ResourceB.
1011 
1012 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
1013 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
1014 */
1015 static inline bool VmaBlocksOnSamePage(
1016  VkDeviceSize resourceAOffset,
1017  VkDeviceSize resourceASize,
1018  VkDeviceSize resourceBOffset,
1019  VkDeviceSize pageSize)
1020 {
1021  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
1022  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
1023  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
1024  VkDeviceSize resourceBStart = resourceBOffset;
1025  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
1026  return resourceAEndPage == resourceBStartPage;
1027 }
1028 
1029 enum VmaSuballocationType
1030 {
1031  VMA_SUBALLOCATION_TYPE_FREE = 0,
1032  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
1033  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
1034  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
1035  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
1036  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
1037  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
1038 };
1039 
1040 /*
1041 Returns true if given suballocation types could conflict and must respect
1042 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
1043 or linear image and another one is optimal image. If type is unknown, behave
1044 conservatively.
1045 */
1046 static inline bool VmaIsBufferImageGranularityConflict(
1047  VmaSuballocationType suballocType1,
1048  VmaSuballocationType suballocType2)
1049 {
1050  if(suballocType1 > suballocType2)
1051  {
1052  VMA_SWAP(suballocType1, suballocType2);
1053  }
1054 
1055  switch(suballocType1)
1056  {
1057  case VMA_SUBALLOCATION_TYPE_FREE:
1058  return false;
1059  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
1060  return true;
1061  case VMA_SUBALLOCATION_TYPE_BUFFER:
1062  return
1063  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1064  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1065  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
1066  return
1067  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
1068  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
1069  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1070  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
1071  return
1072  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
1073  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
1074  return false;
1075  default:
1076  VMA_ASSERT(0);
1077  return true;
1078  }
1079 }
1080 
1081 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
1082 struct VmaMutexLock
1083 {
1084 public:
1085  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
1086  m_pMutex(useMutex ? &mutex : VMA_NULL)
1087  {
1088  if(m_pMutex)
1089  {
1090  m_pMutex->Lock();
1091  }
1092  }
1093 
1094  ~VmaMutexLock()
1095  {
1096  if(m_pMutex)
1097  {
1098  m_pMutex->Unlock();
1099  }
1100  }
1101 
1102 private:
1103  VMA_MUTEX* m_pMutex;
1104 };
1105 
1106 #if VMA_DEBUG_GLOBAL_MUTEX
1107  static VMA_MUTEX gDebugGlobalMutex;
1108  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex);
1109 #else
1110  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
1111 #endif
1112 
1113 // Minimum size of a free suballocation to register it in the free suballocation collection.
1114 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
1115 
1116 /*
1117 Performs binary search and returns iterator to first element that is greater or
1118 equal to (key), according to comparison (cmp).
1119 
1120 Cmp should return true if first argument is less than second argument.
1121 
1122 Returned value is the found element, if present in the collection or place where
1123 new element with value (key) should be inserted.
1124 */
1125 template <typename IterT, typename KeyT, typename CmpT>
1126 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
1127 {
1128  size_t down = 0, up = (end - beg);
1129  while(down < up)
1130  {
1131  const size_t mid = (down + up) / 2;
1132  if(cmp(*(beg+mid), key))
1133  {
1134  down = mid + 1;
1135  }
1136  else
1137  {
1138  up = mid;
1139  }
1140  }
1141  return beg + down;
1142 }
1143 
1145 // Memory allocation
1146 
1147 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
1148 {
1149  if((pAllocationCallbacks != VMA_NULL) &&
1150  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
1151  {
1152  return (*pAllocationCallbacks->pfnAllocation)(
1153  pAllocationCallbacks->pUserData,
1154  size,
1155  alignment,
1156  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1157  }
1158  else
1159  {
1160  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
1161  }
1162 }
1163 
1164 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
1165 {
1166  if((pAllocationCallbacks != VMA_NULL) &&
1167  (pAllocationCallbacks->pfnFree != VMA_NULL))
1168  {
1169  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
1170  }
1171  else
1172  {
1173  VMA_SYSTEM_FREE(ptr);
1174  }
1175 }
1176 
1177 template<typename T>
1178 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
1179 {
1180  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
1181 }
1182 
1183 template<typename T>
1184 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
1185 {
1186  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
1187 }
1188 
1189 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
1190 
1191 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
1192 
1193 template<typename T>
1194 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
1195 {
1196  ptr->~T();
1197  VmaFree(pAllocationCallbacks, ptr);
1198 }
1199 
1200 template<typename T>
1201 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
1202 {
1203  if(ptr != VMA_NULL)
1204  {
1205  for(size_t i = count; i--; )
1206  {
1207  ptr[i].~T();
1208  }
1209  VmaFree(pAllocationCallbacks, ptr);
1210  }
1211 }
1212 
1213 // STL-compatible allocator.
1214 template<typename T>
1215 class VmaStlAllocator
1216 {
1217 public:
1218  const VkAllocationCallbacks* const m_pCallbacks;
1219  typedef T value_type;
1220 
1221  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
1222  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
1223 
1224  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
1225  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
1226 
1227  template<typename U>
1228  bool operator==(const VmaStlAllocator<U>& rhs) const
1229  {
1230  return m_pCallbacks == rhs.m_pCallbacks;
1231  }
1232  template<typename U>
1233  bool operator!=(const VmaStlAllocator<U>& rhs) const
1234  {
1235  return m_pCallbacks != rhs.m_pCallbacks;
1236  }
1237 
1238  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
1239 };
1240 
1241 #if VMA_USE_STL_VECTOR
1242 
1243 #define VmaVector std::vector
1244 
1245 template<typename T, typename allocatorT>
1246 static void VectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
1247 {
1248  vec.insert(vec.begin() + index, item);
1249 }
1250 
1251 template<typename T, typename allocatorT>
1252 static void VectorRemove(std::vector<T, allocatorT>& vec, size_t index)
1253 {
1254  vec.erase(vec.begin() + index);
1255 }
1256 
1257 #else // #if VMA_USE_STL_VECTOR
1258 
1259 /* Class with interface compatible with subset of std::vector.
1260 T must be POD because constructors and destructors are not called and memcpy is
1261 used for these objects. */
1262 template<typename T, typename AllocatorT>
1263 class VmaVector
1264 {
1265 public:
1266  VmaVector(const AllocatorT& allocator) :
1267  m_Allocator(allocator),
1268  m_pArray(VMA_NULL),
1269  m_Count(0),
1270  m_Capacity(0)
1271  {
1272  }
1273 
1274  VmaVector(size_t count, const AllocatorT& allocator) :
1275  m_Allocator(allocator),
1276  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, count) : VMA_NULL),
1277  m_Count(count),
1278  m_Capacity(count)
1279  {
1280  }
1281 
1282  VmaVector(const VmaVector<T, AllocatorT>& src) :
1283  m_Allocator(src.m_Allocator),
1284  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src->m_pCallbacks, src.m_Count) : VMA_NULL),
1285  m_Count(src.m_Count),
1286  m_Capacity(src.m_Count)
1287  {
1288  if(m_Count != 0)
1289  {
1290  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
1291  }
1292  }
1293 
1294  ~VmaVector()
1295  {
1296  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1297  }
1298 
1299  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
1300  {
1301  if(&rhs != this)
1302  {
1303  Resize(rhs.m_Count);
1304  if(m_Count != 0)
1305  {
1306  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
1307  }
1308  }
1309  return *this;
1310  }
1311 
1312  bool empty() const { return m_Count == 0; }
1313  size_t size() const { return m_Count; }
1314  T* data() { return m_pArray; }
1315  const T* data() const { return m_pArray; }
1316 
1317  T& operator[](size_t index)
1318  {
1319  VMA_HEAVY_ASSERT(index < m_Count);
1320  return m_pArray[index];
1321  }
1322  const T& operator[](size_t index) const
1323  {
1324  VMA_HEAVY_ASSERT(index < m_Count);
1325  return m_pArray[index];
1326  }
1327 
1328  T& front()
1329  {
1330  VMA_HEAVY_ASSERT(m_Count > 0);
1331  return m_pArray[0];
1332  }
1333  const T& front() const
1334  {
1335  VMA_HEAVY_ASSERT(m_Count > 0);
1336  return m_pArray[0];
1337  }
1338  T& back()
1339  {
1340  VMA_HEAVY_ASSERT(m_Count > 0);
1341  return m_pArray[m_Count - 1];
1342  }
1343  const T& back() const
1344  {
1345  VMA_HEAVY_ASSERT(m_Count > 0);
1346  return m_pArray[m_Count - 1];
1347  }
1348 
1349  void reserve(size_t newCapacity, bool freeMemory = false)
1350  {
1351  newCapacity = VMA_MAX(newCapacity, m_Count);
1352 
1353  if((newCapacity < m_Capacity) && !freeMemory)
1354  {
1355  newCapacity = m_Capacity;
1356  }
1357 
1358  if(newCapacity != m_Capacity)
1359  {
1360  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1361  if(m_Count != 0)
1362  {
1363  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1364  }
1365  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1366  m_Capacity = newCapacity;
1367  m_pArray = newArray;
1368  }
1369  }
1370 
1371  void resize(size_t newCount, bool freeMemory = false)
1372  {
1373  size_t newCapacity = m_Capacity;
1374  if(newCount > m_Capacity)
1375  {
1376  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1377  }
1378  else if(freeMemory)
1379  {
1380  newCapacity = newCount;
1381  }
1382 
1383  if(newCapacity != m_Capacity)
1384  {
1385  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1386  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1387  if(elementsToCopy != 0)
1388  {
1389  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1390  }
1391  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1392  m_Capacity = newCapacity;
1393  m_pArray = newArray;
1394  }
1395 
1396  m_Count = newCount;
1397  }
1398 
1399  void clear(bool freeMemory = false)
1400  {
1401  resize(0, freeMemory);
1402  }
1403 
1404  void insert(size_t index, const T& src)
1405  {
1406  VMA_HEAVY_ASSERT(index <= m_Count);
1407  const size_t oldCount = size();
1408  resize(oldCount + 1);
1409  if(index < oldCount)
1410  {
1411  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1412  }
1413  m_pArray[index] = src;
1414  }
1415 
1416  void remove(size_t index)
1417  {
1418  VMA_HEAVY_ASSERT(index < m_Count);
1419  const size_t oldCount = size();
1420  if(index < oldCount - 1)
1421  {
1422  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1423  }
1424  resize(oldCount - 1);
1425  }
1426 
1427  void push_back(const T& src)
1428  {
1429  const size_t newIndex = size();
1430  resize(newIndex + 1);
1431  m_pArray[newIndex] = src;
1432  }
1433 
1434  void pop_back()
1435  {
1436  VMA_HEAVY_ASSERT(m_Count > 0);
1437  resize(size() - 1);
1438  }
1439 
1440  void push_front(const T& src)
1441  {
1442  insert(0, src);
1443  }
1444 
1445  void pop_front()
1446  {
1447  VMA_HEAVY_ASSERT(m_Count > 0);
1448  remove(0);
1449  }
1450 
1451  typedef T* iterator;
1452 
1453  iterator begin() { return m_pArray; }
1454  iterator end() { return m_pArray + m_Count; }
1455 
1456 private:
1457  AllocatorT m_Allocator;
1458  T* m_pArray;
1459  size_t m_Count;
1460  size_t m_Capacity;
1461 };
1462 
1463 template<typename T, typename allocatorT>
1464 static void VectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1465 {
1466  vec.insert(index, item);
1467 }
1468 
1469 template<typename T, typename allocatorT>
1470 static void VectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1471 {
1472  vec.remove(index);
1473 }
1474 
1475 #endif // #if VMA_USE_STL_VECTOR
1476 
1478 // class VmaPoolAllocator
1479 
1480 /*
1481 Allocator for objects of type T using a list of arrays (pools) to speed up
1482 allocation. Number of elements that can be allocated is not bounded because
1483 allocator can create multiple blocks.
1484 */
1485 template<typename T>
1486 class VmaPoolAllocator
1487 {
1488 public:
1489  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
1490  ~VmaPoolAllocator();
1491  void Clear();
1492  T* Alloc();
1493  void Free(T* ptr);
1494 
1495 private:
1496  union Item
1497  {
1498  uint32_t NextFreeIndex;
1499  T Value;
1500  };
1501 
1502  struct ItemBlock
1503  {
1504  Item* pItems;
1505  uint32_t FirstFreeIndex;
1506  };
1507 
1508  const VkAllocationCallbacks* m_pAllocationCallbacks;
1509  size_t m_ItemsPerBlock;
1510  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
1511 
1512  ItemBlock& CreateNewBlock();
1513 };
1514 
1515 template<typename T>
1516 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
1517  m_pAllocationCallbacks(pAllocationCallbacks),
1518  m_ItemsPerBlock(itemsPerBlock),
1519  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
1520 {
1521  VMA_ASSERT(itemsPerBlock > 0);
1522 }
1523 
1524 template<typename T>
1525 VmaPoolAllocator<T>::~VmaPoolAllocator()
1526 {
1527  Clear();
1528 }
1529 
1530 template<typename T>
1531 void VmaPoolAllocator<T>::Clear()
1532 {
1533  for(size_t i = m_ItemBlocks.size(); i--; )
1534  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
1535  m_ItemBlocks.clear();
1536 }
1537 
1538 template<typename T>
1539 T* VmaPoolAllocator<T>::Alloc()
1540 {
1541  for(size_t i = m_ItemBlocks.size(); i--; )
1542  {
1543  ItemBlock& block = m_ItemBlocks[i];
1544  // This block has some free items: Use first one.
1545  if(block.FirstFreeIndex != UINT32_MAX)
1546  {
1547  Item* const pItem = &block.pItems[block.FirstFreeIndex];
1548  block.FirstFreeIndex = pItem->NextFreeIndex;
1549  return &pItem->Value;
1550  }
1551  }
1552 
1553  // No block has free item: Create new one and use it.
1554  ItemBlock& newBlock = CreateNewBlock();
1555  Item* const pItem = &newBlock.pItems[0];
1556  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1557  return &pItem->Value;
1558 }
1559 
1560 template<typename T>
1561 void VmaPoolAllocator<T>::Free(T* ptr)
1562 {
1563  // Search all memory blocks to find ptr.
1564  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
1565  {
1566  ItemBlock& block = m_ItemBlocks[i];
1567 
1568  // Casting to union.
1569  Item* pItemPtr;
1570  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1571 
1572  // Check if pItemPtr is in address range of this block.
1573  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
1574  {
1575  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
1576  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1577  block.FirstFreeIndex = index;
1578  return;
1579  }
1580  }
1581  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1582 }
1583 
1584 template<typename T>
1585 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
1586 {
1587  ItemBlock newBlock = {
1588  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
1589 
1590  m_ItemBlocks.push_back(newBlock);
1591 
1592  // Setup singly-linked list of all free items in this block.
1593  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
1594  newBlock.pItems[i].NextFreeIndex = i + 1;
1595  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
1596  return m_ItemBlocks.back();
1597 }
1598 
1600 // class VmaRawList, VmaList
1601 
1602 #if VMA_USE_STL_LIST
1603 
1604 #define VmaList std::list
1605 
1606 #else // #if VMA_USE_STL_LIST
1607 
1608 template<typename T>
1609 struct VmaListItem
1610 {
1611  VmaListItem* pPrev;
1612  VmaListItem* pNext;
1613  T Value;
1614 };
1615 
1616 // Doubly linked list.
1617 template<typename T>
1618 class VmaRawList
1619 {
1620 public:
1621  typedef VmaListItem<T> ItemType;
1622 
1623  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
1624  ~VmaRawList();
1625  void Clear();
1626 
1627  size_t GetCount() const { return m_Count; }
1628  bool IsEmpty() const { return m_Count == 0; }
1629 
1630  ItemType* Front() { return m_pFront; }
1631  const ItemType* Front() const { return m_pFront; }
1632  ItemType* Back() { return m_pBack; }
1633  const ItemType* Back() const { return m_pBack; }
1634 
1635  ItemType* PushBack();
1636  ItemType* PushFront();
1637  ItemType* PushBack(const T& value);
1638  ItemType* PushFront(const T& value);
1639  void PopBack();
1640  void PopFront();
1641 
1642  // Item can be null - it means PushBack.
1643  ItemType* InsertBefore(ItemType* pItem);
1644  // Item can be null - it means PushFront.
1645  ItemType* InsertAfter(ItemType* pItem);
1646 
1647  ItemType* InsertBefore(ItemType* pItem, const T& value);
1648  ItemType* InsertAfter(ItemType* pItem, const T& value);
1649 
1650  void Remove(ItemType* pItem);
1651 
1652 private:
1653  const VkAllocationCallbacks* const m_pAllocationCallbacks;
1654  VmaPoolAllocator<ItemType> m_ItemAllocator;
1655  ItemType* m_pFront;
1656  ItemType* m_pBack;
1657  size_t m_Count;
1658 
1659  // Declared not defined, to block copy constructor and assignment operator.
1660  VmaRawList(const VmaRawList<T>& src);
1661  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
1662 };
1663 
1664 template<typename T>
1665 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
1666  m_pAllocationCallbacks(pAllocationCallbacks),
1667  m_ItemAllocator(pAllocationCallbacks, 128),
1668  m_pFront(VMA_NULL),
1669  m_pBack(VMA_NULL),
1670  m_Count(0)
1671 {
1672 }
1673 
1674 template<typename T>
1675 VmaRawList<T>::~VmaRawList()
1676 {
1677  // Intentionally not calling Clear, because that would be unnecessary
1678  // computations to return all items to m_ItemAllocator as free.
1679 }
1680 
1681 template<typename T>
1682 void VmaRawList<T>::Clear()
1683 {
1684  if(IsEmpty() == false)
1685  {
1686  ItemType* pItem = m_pBack;
1687  while(pItem != VMA_NULL)
1688  {
1689  ItemType* const pPrevItem = pItem->pPrev;
1690  m_ItemAllocator.Free(pItem);
1691  pItem = pPrevItem;
1692  }
1693  m_pFront = VMA_NULL;
1694  m_pBack = VMA_NULL;
1695  m_Count = 0;
1696  }
1697 }
1698 
1699 template<typename T>
1700 VmaListItem<T>* VmaRawList<T>::PushBack()
1701 {
1702  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1703  pNewItem->pNext = VMA_NULL;
1704  if(IsEmpty())
1705  {
1706  pNewItem->pPrev = VMA_NULL;
1707  m_pFront = pNewItem;
1708  m_pBack = pNewItem;
1709  m_Count = 1;
1710  }
1711  else
1712  {
1713  pNewItem->pPrev = m_pBack;
1714  m_pBack->pNext = pNewItem;
1715  m_pBack = pNewItem;
1716  ++m_Count;
1717  }
1718  return pNewItem;
1719 }
1720 
1721 template<typename T>
1722 VmaListItem<T>* VmaRawList<T>::PushFront()
1723 {
1724  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1725  pNewItem->pPrev = VMA_NULL;
1726  if(IsEmpty())
1727  {
1728  pNewItem->pNext = VMA_NULL;
1729  m_pFront = pNewItem;
1730  m_pBack = pNewItem;
1731  m_Count = 1;
1732  }
1733  else
1734  {
1735  pNewItem->pNext = m_pFront;
1736  m_pFront->pPrev = pNewItem;
1737  m_pFront = pNewItem;
1738  ++m_Count;
1739  }
1740  return pNewItem;
1741 }
1742 
1743 template<typename T>
1744 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
1745 {
1746  ItemType* const pNewItem = PushBack();
1747  pNewItem->Value = value;
1748  return pNewItem;
1749 }
1750 
1751 template<typename T>
1752 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
1753 {
1754  ItemType* const pNewItem = PushFront();
1755  pNewItem->Value = value;
1756  return pNewItem;
1757 }
1758 
1759 template<typename T>
1760 void VmaRawList<T>::PopBack()
1761 {
1762  VMA_HEAVY_ASSERT(m_Count > 0);
1763  ItemType* const pBackItem = m_pBack;
1764  ItemType* const pPrevItem = pBackItem->pPrev;
1765  if(pPrevItem != VMA_NULL)
1766  {
1767  pPrevItem->pNext = VMA_NULL;
1768  }
1769  m_pBack = pPrevItem;
1770  m_ItemAllocator.Free(pBackItem);
1771  --m_Count;
1772 }
1773 
1774 template<typename T>
1775 void VmaRawList<T>::PopFront()
1776 {
1777  VMA_HEAVY_ASSERT(m_Count > 0);
1778  ItemType* const pFrontItem = m_pFront;
1779  ItemType* const pNextItem = pFrontItem->pNext;
1780  if(pNextItem != VMA_NULL)
1781  {
1782  pNextItem->pPrev = VMA_NULL;
1783  }
1784  m_pFront = pNextItem;
1785  m_ItemAllocator.Free(pFrontItem);
1786  --m_Count;
1787 }
1788 
1789 template<typename T>
1790 void VmaRawList<T>::Remove(ItemType* pItem)
1791 {
1792  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
1793  VMA_HEAVY_ASSERT(m_Count > 0);
1794 
1795  if(pItem->pPrev != VMA_NULL)
1796  {
1797  pItem->pPrev->pNext = pItem->pNext;
1798  }
1799  else
1800  {
1801  VMA_HEAVY_ASSERT(m_pFront == pItem);
1802  m_pFront = pItem->pNext;
1803  }
1804 
1805  if(pItem->pNext != VMA_NULL)
1806  {
1807  pItem->pNext->pPrev = pItem->pPrev;
1808  }
1809  else
1810  {
1811  VMA_HEAVY_ASSERT(m_pBack == pItem);
1812  m_pBack = pItem->pPrev;
1813  }
1814 
1815  m_ItemAllocator.Free(pItem);
1816  --m_Count;
1817 }
1818 
1819 template<typename T>
1820 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
1821 {
1822  if(pItem != VMA_NULL)
1823  {
1824  ItemType* const prevItem = pItem->pPrev;
1825  ItemType* const newItem = m_ItemAllocator.Alloc();
1826  newItem->pPrev = prevItem;
1827  newItem->pNext = pItem;
1828  pItem->pPrev = newItem;
1829  if(prevItem != VMA_NULL)
1830  {
1831  prevItem->pNext = newItem;
1832  }
1833  else
1834  {
1835  VMA_HEAVY_ASSERT(m_pFront == pItem);
1836  m_pFront = newItem;
1837  }
1838  ++m_Count;
1839  return newItem;
1840  }
1841  else
1842  return PushBack();
1843 }
1844 
1845 template<typename T>
1846 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
1847 {
1848  if(pItem != VMA_NULL)
1849  {
1850  ItemType* const nextItem = pItem->pNext;
1851  ItemType* const newItem = m_ItemAllocator.Alloc();
1852  newItem->pNext = nextItem;
1853  newItem->pPrev = pItem;
1854  pItem->pNext = newItem;
1855  if(nextItem != VMA_NULL)
1856  {
1857  nextItem->pPrev = newItem;
1858  }
1859  else
1860  {
1861  VMA_HEAVY_ASSERT(m_pBack == pItem);
1862  m_pBack = newItem;
1863  }
1864  ++m_Count;
1865  return newItem;
1866  }
1867  else
1868  return PushFront();
1869 }
1870 
1871 template<typename T>
1872 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
1873 {
1874  ItemType* const newItem = InsertBefore(pItem);
1875  newItem->Value = value;
1876  return newItem;
1877 }
1878 
1879 template<typename T>
1880 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
1881 {
1882  ItemType* const newItem = InsertAfter(pItem);
1883  newItem->Value = value;
1884  return newItem;
1885 }
1886 
1887 template<typename T, typename AllocatorT>
1888 class VmaList
1889 {
1890 public:
1891  class iterator
1892  {
1893  public:
1894  iterator() :
1895  m_pList(VMA_NULL),
1896  m_pItem(VMA_NULL)
1897  {
1898  }
1899 
1900  T& operator*() const
1901  {
1902  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1903  return m_pItem->Value;
1904  }
1905  T* operator->() const
1906  {
1907  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1908  return &m_pItem->Value;
1909  }
1910 
1911  iterator& operator++()
1912  {
1913  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1914  m_pItem = m_pItem->pNext;
1915  return *this;
1916  }
1917  iterator& operator--()
1918  {
1919  if(m_pItem != VMA_NULL)
1920  {
1921  m_pItem = m_pItem->pPrev;
1922  }
1923  else
1924  {
1925  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
1926  m_pItem = m_pList->Back();
1927  }
1928  return *this;
1929  }
1930 
1931  iterator operator++(int)
1932  {
1933  iterator result = *this;
1934  ++*this;
1935  return result;
1936  }
1937  iterator operator--(int)
1938  {
1939  iterator result = *this;
1940  --*this;
1941  return result;
1942  }
1943 
1944  bool operator==(const iterator& rhs) const
1945  {
1946  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1947  return m_pItem == rhs.m_pItem;
1948  }
1949  bool operator!=(const iterator& rhs) const
1950  {
1951  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1952  return m_pItem != rhs.m_pItem;
1953  }
1954 
1955  private:
1956  VmaRawList<T>* m_pList;
1957  VmaListItem<T>* m_pItem;
1958 
1959  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
1960  m_pList(pList),
1961  m_pItem(pItem)
1962  {
1963  }
1964 
1965  friend class VmaList<T, AllocatorT>;
1966  friend class VmaList<T, AllocatorT>:: const_iterator;
1967  };
1968 
1969  class const_iterator
1970  {
1971  public:
1972  const_iterator() :
1973  m_pList(VMA_NULL),
1974  m_pItem(VMA_NULL)
1975  {
1976  }
1977 
1978  const_iterator(const iterator& src) :
1979  m_pList(src.m_pList),
1980  m_pItem(src.m_pItem)
1981  {
1982  }
1983 
1984  const T& operator*() const
1985  {
1986  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1987  return m_pItem->Value;
1988  }
1989  const T* operator->() const
1990  {
1991  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1992  return &m_pItem->Value;
1993  }
1994 
1995  const_iterator& operator++()
1996  {
1997  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1998  m_pItem = m_pItem->pNext;
1999  return *this;
2000  }
2001  const_iterator& operator--()
2002  {
2003  if(m_pItem != VMA_NULL)
2004  {
2005  m_pItem = m_pItem->pPrev;
2006  }
2007  else
2008  {
2009  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
2010  m_pItem = m_pList->Back();
2011  }
2012  return *this;
2013  }
2014 
2015  const_iterator operator++(int)
2016  {
2017  const_iterator result = *this;
2018  ++*this;
2019  return result;
2020  }
2021  const_iterator operator--(int)
2022  {
2023  const_iterator result = *this;
2024  --*this;
2025  return result;
2026  }
2027 
2028  bool operator==(const const_iterator& rhs) const
2029  {
2030  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2031  return m_pItem == rhs.m_pItem;
2032  }
2033  bool operator!=(const const_iterator& rhs) const
2034  {
2035  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
2036  return m_pItem != rhs.m_pItem;
2037  }
2038 
2039  private:
2040  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
2041  m_pList(pList),
2042  m_pItem(pItem)
2043  {
2044  }
2045 
2046  const VmaRawList<T>* m_pList;
2047  const VmaListItem<T>* m_pItem;
2048 
2049  friend class VmaList<T, AllocatorT>;
2050  };
2051 
2052  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
2053 
2054  bool empty() const { return m_RawList.IsEmpty(); }
2055  size_t size() const { return m_RawList.GetCount(); }
2056 
2057  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
2058  iterator end() { return iterator(&m_RawList, VMA_NULL); }
2059 
2060  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
2061  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
2062 
2063  void clear() { m_RawList.Clear(); }
2064  void push_back(const T& value) { m_RawList.PushBack(value); }
2065  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
2066  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
2067 
2068 private:
2069  VmaRawList<T> m_RawList;
2070 };
2071 
2072 #endif // #if VMA_USE_STL_LIST
2073 
2075 // class VmaMap
2076 
2077 #if VMA_USE_STL_UNORDERED_MAP
2078 
2079 #define VmaPair std::pair
2080 
2081 #define VMA_MAP_TYPE(KeyT, ValueT) \
2082  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
2083 
2084 #else // #if VMA_USE_STL_UNORDERED_MAP
2085 
2086 template<typename T1, typename T2>
2087 struct VmaPair
2088 {
2089  T1 first;
2090  T2 second;
2091 
2092  VmaPair() : first(), second() { }
2093  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
2094 };
2095 
2096 /* Class compatible with subset of interface of std::unordered_map.
2097 KeyT, ValueT must be POD because they will be stored in VmaVector.
2098 */
2099 template<typename KeyT, typename ValueT>
2100 class VmaMap
2101 {
2102 public:
2103  typedef VmaPair<KeyT, ValueT> PairType;
2104  typedef PairType* iterator;
2105 
2106  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
2107 
2108  iterator begin() { return m_Vector.begin(); }
2109  iterator end() { return m_Vector.end(); }
2110 
2111  void insert(const PairType& pair);
2112  iterator find(const KeyT& key);
2113  void erase(iterator it);
2114 
2115 private:
2116  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
2117 };
2118 
2119 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
2120 
2121 template<typename FirstT, typename SecondT>
2122 struct VmaPairFirstLess
2123 {
2124  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
2125  {
2126  return lhs.first < rhs.first;
2127  }
2128  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
2129  {
2130  return lhs.first < rhsFirst;
2131  }
2132 };
2133 
2134 template<typename KeyT, typename ValueT>
2135 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
2136 {
2137  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2138  m_Vector.data(),
2139  m_Vector.data() + m_Vector.size(),
2140  pair,
2141  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
2142  VectorInsert(m_Vector, indexToInsert, pair);
2143 }
2144 
2145 template<typename KeyT, typename ValueT>
2146 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
2147 {
2148  PairType* it = VmaBinaryFindFirstNotLess(
2149  m_Vector.data(),
2150  m_Vector.data() + m_Vector.size(),
2151  key,
2152  VmaPairFirstLess<KeyT, ValueT>());
2153  if((it != m_Vector.end()) && (it->first == key))
2154  {
2155  return it;
2156  }
2157  else
2158  {
2159  return m_Vector.end();
2160  }
2161 }
2162 
2163 template<typename KeyT, typename ValueT>
2164 void VmaMap<KeyT, ValueT>::erase(iterator it)
2165 {
2166  VectorRemove(m_Vector, it - m_Vector.begin());
2167 }
2168 
2169 #endif // #if VMA_USE_STL_UNORDERED_MAP
2170 
2172 
2173 class VmaBlock;
2174 
2175 enum VMA_BLOCK_VECTOR_TYPE
2176 {
2177  VMA_BLOCK_VECTOR_TYPE_UNMAPPED,
2178  VMA_BLOCK_VECTOR_TYPE_MAPPED,
2179  VMA_BLOCK_VECTOR_TYPE_COUNT
2180 };
2181 
2182 static VMA_BLOCK_VECTOR_TYPE VmaMemoryRequirementFlagsToBlockVectorType(VmaMemoryRequirementFlags flags)
2183 {
2184  return (flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0 ?
2185  VMA_BLOCK_VECTOR_TYPE_MAPPED :
2186  VMA_BLOCK_VECTOR_TYPE_UNMAPPED;
2187 }
2188 
2189 struct VmaAllocation_T
2190 {
2191 public:
2192  enum ALLOCATION_TYPE
2193  {
2194  ALLOCATION_TYPE_NONE,
2195  ALLOCATION_TYPE_BLOCK,
2196  ALLOCATION_TYPE_OWN,
2197  };
2198 
2199  VmaAllocation_T()
2200  {
2201  memset(this, 0, sizeof(VmaAllocation_T));
2202  }
2203 
2204  void InitBlockAllocation(
2205  VmaBlock* block,
2206  VkDeviceSize offset,
2207  VkDeviceSize alignment,
2208  VkDeviceSize size,
2209  VmaSuballocationType suballocationType,
2210  void* pUserData)
2211  {
2212  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2213  VMA_ASSERT(block != VMA_NULL);
2214  m_Type = ALLOCATION_TYPE_BLOCK;
2215  m_Alignment = alignment;
2216  m_Size = size;
2217  m_pUserData = pUserData;
2218  m_SuballocationType = suballocationType;
2219  m_BlockAllocation.m_Block = block;
2220  m_BlockAllocation.m_Offset = offset;
2221  }
2222 
2223  void ChangeBlockAllocation(
2224  VmaBlock* block,
2225  VkDeviceSize offset)
2226  {
2227  VMA_ASSERT(block != VMA_NULL);
2228  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2229  m_BlockAllocation.m_Block = block;
2230  m_BlockAllocation.m_Offset = offset;
2231  }
2232 
2233  void InitOwnAllocation(
2234  uint32_t memoryTypeIndex,
2235  VkDeviceMemory hMemory,
2236  VmaSuballocationType suballocationType,
2237  bool persistentMap,
2238  void* pMappedData,
2239  VkDeviceSize size,
2240  void* pUserData)
2241  {
2242  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
2243  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
2244  m_Type = ALLOCATION_TYPE_OWN;
2245  m_Alignment = 0;
2246  m_Size = size;
2247  m_pUserData = pUserData;
2248  m_SuballocationType = suballocationType;
2249  m_OwnAllocation.m_MemoryTypeIndex = memoryTypeIndex;
2250  m_OwnAllocation.m_hMemory = hMemory;
2251  m_OwnAllocation.m_PersistentMap = persistentMap;
2252  m_OwnAllocation.m_pMappedData = pMappedData;
2253  }
2254 
2255  ALLOCATION_TYPE GetType() const { return m_Type; }
2256  VkDeviceSize GetAlignment() const { return m_Alignment; }
2257  VkDeviceSize GetSize() const { return m_Size; }
2258  void* GetUserData() const { return m_pUserData; }
2259  void SetUserData(void* pUserData) { m_pUserData = pUserData; }
2260  VmaSuballocationType GetSuballocationType() const { return m_SuballocationType; }
2261 
2262  VmaBlock* GetBlock() const
2263  {
2264  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
2265  return m_BlockAllocation.m_Block;
2266  }
2267  VkDeviceSize GetOffset() const
2268  {
2269  return (m_Type == ALLOCATION_TYPE_BLOCK) ? m_BlockAllocation.m_Offset : 0;
2270  }
2271  VkDeviceMemory GetMemory() const;
2272  uint32_t GetMemoryTypeIndex() const;
2273  VMA_BLOCK_VECTOR_TYPE GetBlockVectorType() const;
2274  void* GetMappedData() const;
2275 
2276  VkResult OwnAllocMapPersistentlyMappedMemory(VkDevice hDevice)
2277  {
2278  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2279  if(m_OwnAllocation.m_PersistentMap)
2280  {
2281  return vkMapMemory(hDevice, m_OwnAllocation.m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_OwnAllocation.m_pMappedData);
2282  }
2283  return VK_SUCCESS;
2284  }
2285  void OwnAllocUnmapPersistentlyMappedMemory(VkDevice hDevice)
2286  {
2287  VMA_ASSERT(m_Type == ALLOCATION_TYPE_OWN);
2288  if(m_OwnAllocation.m_pMappedData)
2289  {
2290  VMA_ASSERT(m_OwnAllocation.m_PersistentMap);
2291  vkUnmapMemory(hDevice, m_OwnAllocation.m_hMemory);
2292  m_OwnAllocation.m_pMappedData = VMA_NULL;
2293  }
2294  }
2295 
2296 private:
2297  VkDeviceSize m_Alignment;
2298  VkDeviceSize m_Size;
2299  void* m_pUserData;
2300  ALLOCATION_TYPE m_Type;
2301  VmaSuballocationType m_SuballocationType;
2302 
2303  // Allocation out of VmaBlock.
2304  struct BlockAllocation
2305  {
2306  VmaBlock* m_Block;
2307  VkDeviceSize m_Offset;
2308  };
2309 
2310  // Allocation for an object that has its own private VkDeviceMemory.
2311  struct OwnAllocation
2312  {
2313  uint32_t m_MemoryTypeIndex;
2314  VkDeviceMemory m_hMemory;
2315  bool m_PersistentMap;
2316  void* m_pMappedData;
2317  };
2318 
2319  union
2320  {
2321  // Allocation out of VmaBlock.
2322  BlockAllocation m_BlockAllocation;
2323  // Allocation for an object that has its own private VkDeviceMemory.
2324  OwnAllocation m_OwnAllocation;
2325  };
2326 };
2327 
2328 /*
2329 Represents a region of VmaBlock that is either assigned and returned as
2330 allocated memory block or free.
2331 */
2332 struct VmaSuballocation
2333 {
2334  VkDeviceSize offset;
2335  VkDeviceSize size;
2336  VmaSuballocationType type;
2337 };
2338 
2339 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
2340 
2341 // Parameters of an allocation.
2342 struct VmaAllocationRequest
2343 {
2344  VmaSuballocationList::iterator freeSuballocationItem;
2345  VkDeviceSize offset;
2346 };
2347 
2348 /* Single block of memory - VkDeviceMemory with all the data about its regions
2349 assigned or free. */
2350 class VmaBlock
2351 {
2352 public:
2353  uint32_t m_MemoryTypeIndex;
2354  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
2355  VkDeviceMemory m_hMemory;
2356  VkDeviceSize m_Size;
2357  bool m_PersistentMap;
2358  void* m_pMappedData;
2359  uint32_t m_FreeCount;
2360  VkDeviceSize m_SumFreeSize;
2361  VmaSuballocationList m_Suballocations;
2362  // Suballocations that are free and have size greater than certain threshold.
2363  // Sorted by size, ascending.
2364  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
2365 
2366  VmaBlock(VmaAllocator hAllocator);
2367 
2368  ~VmaBlock()
2369  {
2370  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2371  }
2372 
2373  // Always call after construction.
2374  void Init(
2375  uint32_t newMemoryTypeIndex,
2376  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
2377  VkDeviceMemory newMemory,
2378  VkDeviceSize newSize,
2379  bool persistentMap,
2380  void* pMappedData);
2381  // Always call before destruction.
2382  void Destroy(VmaAllocator allocator);
2383 
2384  // Validates all data structures inside this object. If not valid, returns false.
2385  bool Validate() const;
2386 
2387  // Tries to find a place for suballocation with given parameters inside this allocation.
2388  // If succeeded, fills pAllocationRequest and returns true.
2389  // If failed, returns false.
2390  bool CreateAllocationRequest(
2391  VkDeviceSize bufferImageGranularity,
2392  VkDeviceSize allocSize,
2393  VkDeviceSize allocAlignment,
2394  VmaSuballocationType allocType,
2395  VmaAllocationRequest* pAllocationRequest);
2396 
2397  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
2398  // If yes, fills pOffset and returns true. If no, returns false.
2399  bool CheckAllocation(
2400  VkDeviceSize bufferImageGranularity,
2401  VkDeviceSize allocSize,
2402  VkDeviceSize allocAlignment,
2403  VmaSuballocationType allocType,
2404  VmaSuballocationList::const_iterator freeSuballocItem,
2405  VkDeviceSize* pOffset) const;
2406 
2407  // Returns true if this allocation is empty - contains only single free suballocation.
2408  bool IsEmpty() const;
2409 
2410  // Makes actual allocation based on request. Request must already be checked
2411  // and valid.
2412  void Alloc(
2413  const VmaAllocationRequest& request,
2414  VmaSuballocationType type,
2415  VkDeviceSize allocSize);
2416 
2417  // Frees suballocation assigned to given memory region.
2418  void Free(const VmaAllocation allocation);
2419 
2420 #if VMA_STATS_STRING_ENABLED
2421  void PrintDetailedMap(class VmaStringBuilder& sb) const;
2422 #endif
2423 
2424 private:
2425  // Given free suballocation, it merges it with following one, which must also be free.
2426  void MergeFreeWithNext(VmaSuballocationList::iterator item);
2427  // Releases given suballocation, making it free. Merges it with adjacent free
2428  // suballocations if applicable.
2429  void FreeSuballocation(VmaSuballocationList::iterator suballocItem);
2430  // Given free suballocation, it inserts it into sorted list of
2431  // m_FreeSuballocationsBySize if it's suitable.
2432  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
2433  // Given free suballocation, it removes it from sorted list of
2434  // m_FreeSuballocationsBySize if it's suitable.
2435  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
2436 };
2437 
2438 struct VmaPointerLess
2439 {
2440  bool operator()(const void* lhs, const void* rhs) const
2441  {
2442  return lhs < rhs;
2443  }
2444 };
2445 
2446 /* Sequence of VmaBlock. Represents memory blocks allocated for a specific
2447 Vulkan memory type. */
2448 struct VmaBlockVector
2449 {
2450  // Incrementally sorted by sumFreeSize, ascending.
2451  VmaVector< VmaBlock*, VmaStlAllocator<VmaBlock*> > m_Blocks;
2452 
2453  VmaBlockVector(VmaAllocator hAllocator);
2454  ~VmaBlockVector();
2455 
2456  bool IsEmpty() const { return m_Blocks.empty(); }
2457 
2458  // Finds and removes given block from vector.
2459  void Remove(VmaBlock* pBlock);
2460 
2461  // Performs single step in sorting m_Blocks. They may not be fully sorted
2462  // after this call.
2463  void IncrementallySortBlocks();
2464 
2465  // Adds statistics of this BlockVector to pStats.
2466  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const;
2467 
2468 #if VMA_STATS_STRING_ENABLED
2469  void PrintDetailedMap(class VmaStringBuilder& sb) const;
2470 #endif
2471 
2472  void UnmapPersistentlyMappedMemory();
2473  VkResult MapPersistentlyMappedMemory();
2474 
2475 private:
2476  VmaAllocator m_hAllocator;
2477 };
2478 
2479 // Main allocator object.
2480 struct VmaAllocator_T
2481 {
2482  bool m_UseMutex;
2483  VkDevice m_hDevice;
2484  bool m_AllocationCallbacksSpecified;
2485  VkAllocationCallbacks m_AllocationCallbacks;
2486  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
2487  VkDeviceSize m_PreferredLargeHeapBlockSize;
2488  VkDeviceSize m_PreferredSmallHeapBlockSize;
2489  // Non-zero when we are inside UnmapPersistentlyMappedMemory...MapPersistentlyMappedMemory.
2490  // Counter to allow nested calls to these functions.
2491  uint32_t m_UnmapPersistentlyMappedMemoryCounter;
2492 
2493  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
2494  VkPhysicalDeviceMemoryProperties m_MemProps;
2495 
2496  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
2497  /* There can be at most one allocation that is completely empty - a
2498  hysteresis to avoid pessimistic case of alternating creation and destruction
2499  of a VkDeviceMemory. */
2500  bool m_HasEmptyBlock[VK_MAX_MEMORY_TYPES];
2501  VMA_MUTEX m_BlocksMutex[VK_MAX_MEMORY_TYPES];
2502 
2503  // Each vector is sorted by memory (handle value).
2504  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
2505  AllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
2506  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
2507 
2508  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
2509  ~VmaAllocator_T();
2510 
2511  const VkAllocationCallbacks* GetAllocationCallbacks() const
2512  {
2513  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
2514  }
2515 
2516  VkDeviceSize GetPreferredBlockSize(uint32_t memTypeIndex) const;
2517 
2518  VkDeviceSize GetBufferImageGranularity() const
2519  {
2520  return VMA_MAX(
2521  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
2522  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
2523  }
2524 
2525  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
2526  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
2527 
2528  // Main allocation function.
2529  VkResult AllocateMemory(
2530  const VkMemoryRequirements& vkMemReq,
2531  const VmaMemoryRequirements& vmaMemReq,
2532  VmaSuballocationType suballocType,
2533  VmaAllocation* pAllocation);
2534 
2535  // Main deallocation function.
2536  void FreeMemory(const VmaAllocation allocation);
2537 
2538  void CalculateStats(VmaStats* pStats);
2539 
2540 #if VMA_STATS_STRING_ENABLED
2541  void PrintDetailedMap(class VmaStringBuilder& sb);
2542 #endif
2543 
2544  void UnmapPersistentlyMappedMemory();
2545  VkResult MapPersistentlyMappedMemory();
2546 
2547  VkResult Defragment(
2548  VmaAllocation* pAllocations,
2549  size_t allocationCount,
2550  VkBool32* pAllocationsChanged,
2551  const VmaDefragmentationInfo* pDefragmentationInfo,
2552  VmaDefragmentationStats* pDefragmentationStats);
2553 
2554  static void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
2555 
2556 private:
2557  VkPhysicalDevice m_PhysicalDevice;
2558 
2559  VkResult AllocateMemoryOfType(
2560  const VkMemoryRequirements& vkMemReq,
2561  const VmaMemoryRequirements& vmaMemReq,
2562  uint32_t memTypeIndex,
2563  VmaSuballocationType suballocType,
2564  VmaAllocation* pAllocation);
2565 
2566  // Allocates and registers new VkDeviceMemory specifically for single allocation.
2567  VkResult AllocateOwnMemory(
2568  VkDeviceSize size,
2569  VmaSuballocationType suballocType,
2570  uint32_t memTypeIndex,
2571  bool map,
2572  void* pUserData,
2573  VmaAllocation* pAllocation);
2574 
2575  // Tries to free pMemory as Own Memory. Returns true if found and freed.
2576  void FreeOwnMemory(VmaAllocation allocation);
2577 };
2578 
2580 // Memory allocation #2 after VmaAllocator_T definition
2581 
2582 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
2583 {
2584  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
2585 }
2586 
2587 static void VmaFree(VmaAllocator hAllocator, void* ptr)
2588 {
2589  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
2590 }
2591 
2592 template<typename T>
2593 static T* VmaAllocate(VmaAllocator hAllocator)
2594 {
2595  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
2596 }
2597 
2598 template<typename T>
2599 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
2600 {
2601  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
2602 }
2603 
2604 template<typename T>
2605 static void vma_delete(VmaAllocator hAllocator, T* ptr)
2606 {
2607  if(ptr != VMA_NULL)
2608  {
2609  ptr->~T();
2610  VmaFree(hAllocator, ptr);
2611  }
2612 }
2613 
2614 template<typename T>
2615 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
2616 {
2617  if(ptr != VMA_NULL)
2618  {
2619  for(size_t i = count; i--; )
2620  ptr[i].~T();
2621  VmaFree(hAllocator, ptr);
2622  }
2623 }
2624 
2626 // VmaStringBuilder
2627 
2628 #if VMA_STATS_STRING_ENABLED
2629 
2630 class VmaStringBuilder
2631 {
2632 public:
2633  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
2634  size_t GetLength() const { return m_Data.size(); }
2635  const char* GetData() const { return m_Data.data(); }
2636 
2637  void Add(char ch) { m_Data.push_back(ch); }
2638  void Add(const char* pStr);
2639  void AddNewLine() { Add('\n'); }
2640  void AddNumber(uint32_t num);
2641  void AddNumber(uint64_t num);
2642  void AddBool(bool b) { Add(b ? "true" : "false"); }
2643  void AddNull() { Add("null"); }
2644  void AddString(const char* pStr);
2645 
2646 private:
2647  VmaVector< char, VmaStlAllocator<char> > m_Data;
2648 };
2649 
2650 void VmaStringBuilder::Add(const char* pStr)
2651 {
2652  const size_t strLen = strlen(pStr);
2653  if(strLen > 0)
2654  {
2655  const size_t oldCount = m_Data.size();
2656  m_Data.resize(oldCount + strLen);
2657  memcpy(m_Data.data() + oldCount, pStr, strLen);
2658  }
2659 }
2660 
2661 void VmaStringBuilder::AddNumber(uint32_t num)
2662 {
2663  char buf[11];
2664  VmaUint32ToStr(buf, sizeof(buf), num);
2665  Add(buf);
2666 }
2667 
2668 void VmaStringBuilder::AddNumber(uint64_t num)
2669 {
2670  char buf[21];
2671  VmaUint64ToStr(buf, sizeof(buf), num);
2672  Add(buf);
2673 }
2674 
2675 void VmaStringBuilder::AddString(const char* pStr)
2676 {
2677  Add('"');
2678  const size_t strLen = strlen(pStr);
2679  for(size_t i = 0; i < strLen; ++i)
2680  {
2681  char ch = pStr[i];
2682  if(ch == '\'')
2683  {
2684  Add("\\\\");
2685  }
2686  else if(ch == '"')
2687  {
2688  Add("\\\"");
2689  }
2690  else if(ch >= 32)
2691  {
2692  Add(ch);
2693  }
2694  else switch(ch)
2695  {
2696  case '\n':
2697  Add("\\n");
2698  break;
2699  case '\r':
2700  Add("\\r");
2701  break;
2702  case '\t':
2703  Add("\\t");
2704  break;
2705  default:
2706  VMA_ASSERT(0 && "Character not currently supported.");
2707  break;
2708  }
2709  }
2710  Add('"');
2711 }
2712 
2714 
2715 VkDeviceMemory VmaAllocation_T::GetMemory() const
2716 {
2717  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2718  m_BlockAllocation.m_Block->m_hMemory : m_OwnAllocation.m_hMemory;
2719 }
2720 
2721 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
2722 {
2723  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2724  m_BlockAllocation.m_Block->m_MemoryTypeIndex : m_OwnAllocation.m_MemoryTypeIndex;
2725 }
2726 
2727 VMA_BLOCK_VECTOR_TYPE VmaAllocation_T::GetBlockVectorType() const
2728 {
2729  return (m_Type == ALLOCATION_TYPE_BLOCK) ?
2730  m_BlockAllocation.m_Block->m_BlockVectorType :
2731  (m_OwnAllocation.m_PersistentMap ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED);
2732 }
2733 
2734 void* VmaAllocation_T::GetMappedData() const
2735 {
2736  switch(m_Type)
2737  {
2738  case ALLOCATION_TYPE_BLOCK:
2739  if(m_BlockAllocation.m_Block->m_pMappedData != VMA_NULL)
2740  {
2741  return (char*)m_BlockAllocation.m_Block->m_pMappedData + m_BlockAllocation.m_Offset;
2742  }
2743  else
2744  {
2745  return VMA_NULL;
2746  }
2747  break;
2748  case ALLOCATION_TYPE_OWN:
2749  return m_OwnAllocation.m_pMappedData;
2750  default:
2751  VMA_ASSERT(0);
2752  return VMA_NULL;
2753  }
2754 }
2755 
2756 // Correspond to values of enum VmaSuballocationType.
2757 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
2758  "FREE",
2759  "UNKNOWN",
2760  "BUFFER",
2761  "IMAGE_UNKNOWN",
2762  "IMAGE_LINEAR",
2763  "IMAGE_OPTIMAL",
2764 };
2765 
2766 static void VmaPrintStatInfo(VmaStringBuilder& sb, const VmaStatInfo& stat)
2767 {
2768  sb.Add("{ \"Allocations\": ");
2769  sb.AddNumber(stat.AllocationCount);
2770  sb.Add(", \"Suballocations\": ");
2771  sb.AddNumber(stat.SuballocationCount);
2772  sb.Add(", \"UnusedRanges\": ");
2773  sb.AddNumber(stat.UnusedRangeCount);
2774  sb.Add(", \"UsedBytes\": ");
2775  sb.AddNumber(stat.UsedBytes);
2776  sb.Add(", \"UnusedBytes\": ");
2777  sb.AddNumber(stat.UnusedBytes);
2778  sb.Add(", \"SuballocationSize\": { \"Min\": ");
2779  sb.AddNumber(stat.SuballocationSizeMin);
2780  sb.Add(", \"Avg\": ");
2781  sb.AddNumber(stat.SuballocationSizeAvg);
2782  sb.Add(", \"Max\": ");
2783  sb.AddNumber(stat.SuballocationSizeMax);
2784  sb.Add(" }, \"UnusedRangeSize\": { \"Min\": ");
2785  sb.AddNumber(stat.UnusedRangeSizeMin);
2786  sb.Add(", \"Avg\": ");
2787  sb.AddNumber(stat.UnusedRangeSizeAvg);
2788  sb.Add(", \"Max\": ");
2789  sb.AddNumber(stat.UnusedRangeSizeMax);
2790  sb.Add(" } }");
2791 }
2792 
2793 #endif // #if VMA_STATS_STRING_ENABLED
2794 
2795 struct VmaSuballocationItemSizeLess
2796 {
2797  bool operator()(
2798  const VmaSuballocationList::iterator lhs,
2799  const VmaSuballocationList::iterator rhs) const
2800  {
2801  return lhs->size < rhs->size;
2802  }
2803  bool operator()(
2804  const VmaSuballocationList::iterator lhs,
2805  VkDeviceSize rhsSize) const
2806  {
2807  return lhs->size < rhsSize;
2808  }
2809 };
2810 
2811 VmaBlock::VmaBlock(VmaAllocator hAllocator) :
2812  m_MemoryTypeIndex(UINT32_MAX),
2813  m_BlockVectorType(VMA_BLOCK_VECTOR_TYPE_COUNT),
2814  m_hMemory(VK_NULL_HANDLE),
2815  m_Size(0),
2816  m_PersistentMap(false),
2817  m_pMappedData(VMA_NULL),
2818  m_FreeCount(0),
2819  m_SumFreeSize(0),
2820  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
2821  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
2822 {
2823 }
2824 
2825 void VmaBlock::Init(
2826  uint32_t newMemoryTypeIndex,
2827  VMA_BLOCK_VECTOR_TYPE newBlockVectorType,
2828  VkDeviceMemory newMemory,
2829  VkDeviceSize newSize,
2830  bool persistentMap,
2831  void* pMappedData)
2832 {
2833  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2834 
2835  m_MemoryTypeIndex = newMemoryTypeIndex;
2836  m_BlockVectorType = newBlockVectorType;
2837  m_hMemory = newMemory;
2838  m_Size = newSize;
2839  m_PersistentMap = persistentMap;
2840  m_pMappedData = pMappedData;
2841  m_FreeCount = 1;
2842  m_SumFreeSize = newSize;
2843 
2844  m_Suballocations.clear();
2845  m_FreeSuballocationsBySize.clear();
2846 
2847  VmaSuballocation suballoc = {};
2848  suballoc.offset = 0;
2849  suballoc.size = newSize;
2850  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2851 
2852  m_Suballocations.push_back(suballoc);
2853  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
2854  --suballocItem;
2855  m_FreeSuballocationsBySize.push_back(suballocItem);
2856 }
2857 
2858 void VmaBlock::Destroy(VmaAllocator allocator)
2859 {
2860  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
2861  if(m_pMappedData != VMA_NULL)
2862  {
2863  vkUnmapMemory(allocator->m_hDevice, m_hMemory);
2864  m_pMappedData = VMA_NULL;
2865  }
2866 
2867  // Callback.
2868  if(allocator->m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
2869  {
2870  (*allocator->m_DeviceMemoryCallbacks.pfnFree)(allocator, m_MemoryTypeIndex, m_hMemory, m_Size);
2871  }
2872 
2873  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
2874  m_hMemory = VK_NULL_HANDLE;
2875 }
2876 
2877 bool VmaBlock::Validate() const
2878 {
2879  if((m_hMemory == VK_NULL_HANDLE) ||
2880  (m_Size == 0) ||
2881  m_Suballocations.empty())
2882  {
2883  return false;
2884  }
2885 
2886  // Expected offset of new suballocation as calculates from previous ones.
2887  VkDeviceSize calculatedOffset = 0;
2888  // Expected number of free suballocations as calculated from traversing their list.
2889  uint32_t calculatedFreeCount = 0;
2890  // Expected sum size of free suballocations as calculated from traversing their list.
2891  VkDeviceSize calculatedSumFreeSize = 0;
2892  // Expected number of free suballocations that should be registered in
2893  // m_FreeSuballocationsBySize calculated from traversing their list.
2894  size_t freeSuballocationsToRegister = 0;
2895  // True if previous visisted suballocation was free.
2896  bool prevFree = false;
2897 
2898  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2899  suballocItem != m_Suballocations.cend();
2900  ++suballocItem)
2901  {
2902  const VmaSuballocation& subAlloc = *suballocItem;
2903 
2904  // Actual offset of this suballocation doesn't match expected one.
2905  if(subAlloc.offset != calculatedOffset)
2906  {
2907  return false;
2908  }
2909 
2910  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
2911  // Two adjacent free suballocations are invalid. They should be merged.
2912  if(prevFree && currFree)
2913  {
2914  return false;
2915  }
2916  prevFree = currFree;
2917 
2918  if(currFree)
2919  {
2920  calculatedSumFreeSize += subAlloc.size;
2921  ++calculatedFreeCount;
2922  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2923  {
2924  ++freeSuballocationsToRegister;
2925  }
2926  }
2927 
2928  calculatedOffset += subAlloc.size;
2929  }
2930 
2931  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
2932  // match expected one.
2933  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
2934  {
2935  return false;
2936  }
2937 
2938  VkDeviceSize lastSize = 0;
2939  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
2940  {
2941  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
2942 
2943  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
2944  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
2945  {
2946  return false;
2947  }
2948  // They must be sorted by size ascending.
2949  if(suballocItem->size < lastSize)
2950  {
2951  return false;
2952  }
2953 
2954  lastSize = suballocItem->size;
2955  }
2956 
2957  // Check if totals match calculacted values.
2958  return
2959  (calculatedOffset == m_Size) &&
2960  (calculatedSumFreeSize == m_SumFreeSize) &&
2961  (calculatedFreeCount == m_FreeCount);
2962 }
2963 
2964 /*
2965 How many suitable free suballocations to analyze before choosing best one.
2966 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
2967  be chosen.
2968 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
2969  suballocations will be analized and best one will be chosen.
2970 - Any other value is also acceptable.
2971 */
2972 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
2973 
2974 bool VmaBlock::CreateAllocationRequest(
2975  VkDeviceSize bufferImageGranularity,
2976  VkDeviceSize allocSize,
2977  VkDeviceSize allocAlignment,
2978  VmaSuballocationType allocType,
2979  VmaAllocationRequest* pAllocationRequest)
2980 {
2981  VMA_ASSERT(allocSize > 0);
2982  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2983  VMA_ASSERT(pAllocationRequest != VMA_NULL);
2984  VMA_HEAVY_ASSERT(Validate());
2985 
2986  // There is not enough total free space in this allocation to fullfill the request: Early return.
2987  if(m_SumFreeSize < allocSize)
2988  {
2989  return false;
2990  }
2991 
2992  // Old brute-force algorithm, linearly searching suballocations.
2993  /*
2994  uint32_t suitableSuballocationsFound = 0;
2995  for(VmaSuballocationList::iterator suballocItem = suballocations.Front();
2996  suballocItem != VMA_NULL &&
2997  suitableSuballocationsFound < MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK;
2998  suballocItem = suballocItem->Next)
2999  {
3000  if(suballocItem->Value.type == VMA_SUBALLOCATION_TYPE_FREE)
3001  {
3002  VkDeviceSize offset = 0, cost = 0;
3003  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset, &cost))
3004  {
3005  ++suitableSuballocationsFound;
3006  if(cost < costLimit)
3007  {
3008  pAllocationRequest->freeSuballocationItem = suballocItem;
3009  pAllocationRequest->offset = offset;
3010  pAllocationRequest->cost = cost;
3011  if(cost == 0)
3012  return true;
3013  costLimit = cost;
3014  betterSuballocationFound = true;
3015  }
3016  }
3017  }
3018  }
3019  */
3020 
3021  // New algorithm, efficiently searching freeSuballocationsBySize.
3022  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
3023  if(freeSuballocCount > 0)
3024  {
3025  if(VMA_BEST_FIT)
3026  {
3027  // Find first free suballocation with size not less than allocSize.
3028  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3029  m_FreeSuballocationsBySize.data(),
3030  m_FreeSuballocationsBySize.data() + freeSuballocCount,
3031  allocSize,
3032  VmaSuballocationItemSizeLess());
3033  size_t index = it - m_FreeSuballocationsBySize.data();
3034  for(; index < freeSuballocCount; ++index)
3035  {
3036  VkDeviceSize offset = 0;
3037  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
3038  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
3039  {
3040  pAllocationRequest->freeSuballocationItem = suballocItem;
3041  pAllocationRequest->offset = offset;
3042  return true;
3043  }
3044  }
3045  }
3046  else
3047  {
3048  // Search staring from biggest suballocations.
3049  for(size_t index = freeSuballocCount; index--; )
3050  {
3051  VkDeviceSize offset = 0;
3052  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
3053  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
3054  {
3055  pAllocationRequest->freeSuballocationItem = suballocItem;
3056  pAllocationRequest->offset = offset;
3057  return true;
3058  }
3059  }
3060  }
3061  }
3062 
3063  return false;
3064 }
3065 
3066 bool VmaBlock::CheckAllocation(
3067  VkDeviceSize bufferImageGranularity,
3068  VkDeviceSize allocSize,
3069  VkDeviceSize allocAlignment,
3070  VmaSuballocationType allocType,
3071  VmaSuballocationList::const_iterator freeSuballocItem,
3072  VkDeviceSize* pOffset) const
3073 {
3074  VMA_ASSERT(allocSize > 0);
3075  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
3076  VMA_ASSERT(freeSuballocItem != m_Suballocations.cend());
3077  VMA_ASSERT(pOffset != VMA_NULL);
3078 
3079  const VmaSuballocation& suballoc = *freeSuballocItem;
3080  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
3081 
3082  // Size of this suballocation is too small for this request: Early return.
3083  if(suballoc.size < allocSize)
3084  {
3085  return false;
3086  }
3087 
3088  // Start from offset equal to beginning of this suballocation.
3089  *pOffset = suballoc.offset;
3090 
3091  // Apply VMA_DEBUG_MARGIN at the beginning.
3092  if((VMA_DEBUG_MARGIN > 0) && freeSuballocItem != m_Suballocations.cbegin())
3093  {
3094  *pOffset += VMA_DEBUG_MARGIN;
3095  }
3096 
3097  // Apply alignment.
3098  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
3099  *pOffset = VmaAlignUp(*pOffset, alignment);
3100 
3101  // Check previous suballocations for BufferImageGranularity conflicts.
3102  // Make bigger alignment if necessary.
3103  if(bufferImageGranularity > 1)
3104  {
3105  bool bufferImageGranularityConflict = false;
3106  VmaSuballocationList::const_iterator prevSuballocItem = freeSuballocItem;
3107  while(prevSuballocItem != m_Suballocations.cbegin())
3108  {
3109  --prevSuballocItem;
3110  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
3111  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
3112  {
3113  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
3114  {
3115  bufferImageGranularityConflict = true;
3116  break;
3117  }
3118  }
3119  else
3120  // Already on previous page.
3121  break;
3122  }
3123  if(bufferImageGranularityConflict)
3124  {
3125  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
3126  }
3127  }
3128 
3129  // Calculate padding at the beginning based on current offset.
3130  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
3131 
3132  // Calculate required margin at the end if this is not last suballocation.
3133  VmaSuballocationList::const_iterator next = freeSuballocItem;
3134  ++next;
3135  const VkDeviceSize requiredEndMargin =
3136  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
3137 
3138  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
3139  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
3140  {
3141  return false;
3142  }
3143 
3144  // Check next suballocations for BufferImageGranularity conflicts.
3145  // If conflict exists, allocation cannot be made here.
3146  if(bufferImageGranularity > 1)
3147  {
3148  VmaSuballocationList::const_iterator nextSuballocItem = freeSuballocItem;
3149  ++nextSuballocItem;
3150  while(nextSuballocItem != m_Suballocations.cend())
3151  {
3152  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
3153  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
3154  {
3155  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
3156  {
3157  return false;
3158  }
3159  }
3160  else
3161  {
3162  // Already on next page.
3163  break;
3164  }
3165  ++nextSuballocItem;
3166  }
3167  }
3168 
3169  // All tests passed: Success. pOffset is already filled.
3170  return true;
3171 }
3172 
3173 bool VmaBlock::IsEmpty() const
3174 {
3175  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
3176 }
3177 
3178 void VmaBlock::Alloc(
3179  const VmaAllocationRequest& request,
3180  VmaSuballocationType type,
3181  VkDeviceSize allocSize)
3182 {
3183  VMA_ASSERT(request.freeSuballocationItem != m_Suballocations.end());
3184  VmaSuballocation& suballoc = *request.freeSuballocationItem;
3185  // Given suballocation is a free block.
3186  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
3187  // Given offset is inside this suballocation.
3188  VMA_ASSERT(request.offset >= suballoc.offset);
3189  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
3190  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
3191  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
3192 
3193  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
3194  // it to become used.
3195  UnregisterFreeSuballocation(request.freeSuballocationItem);
3196 
3197  suballoc.offset = request.offset;
3198  suballoc.size = allocSize;
3199  suballoc.type = type;
3200 
3201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
3202  if(paddingEnd)
3203  {
3204  VmaSuballocation paddingSuballoc = {};
3205  paddingSuballoc.offset = request.offset + allocSize;
3206  paddingSuballoc.size = paddingEnd;
3207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3208  VmaSuballocationList::iterator next = request.freeSuballocationItem;
3209  ++next;
3210  const VmaSuballocationList::iterator paddingEndItem =
3211  m_Suballocations.insert(next, paddingSuballoc);
3212  RegisterFreeSuballocation(paddingEndItem);
3213  }
3214 
3215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
3216  if(paddingBegin)
3217  {
3218  VmaSuballocation paddingSuballoc = {};
3219  paddingSuballoc.offset = request.offset - paddingBegin;
3220  paddingSuballoc.size = paddingBegin;
3221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3222  const VmaSuballocationList::iterator paddingBeginItem =
3223  m_Suballocations.insert(request.freeSuballocationItem, paddingSuballoc);
3224  RegisterFreeSuballocation(paddingBeginItem);
3225  }
3226 
3227  // Update totals.
3228  m_FreeCount = m_FreeCount - 1;
3229  if(paddingBegin > 0)
3230  {
3231  ++m_FreeCount;
3232  }
3233  if(paddingEnd > 0)
3234  {
3235  ++m_FreeCount;
3236  }
3237  m_SumFreeSize -= allocSize;
3238 }
3239 
3240 void VmaBlock::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
3241 {
3242  // Change this suballocation to be marked as free.
3243  VmaSuballocation& suballoc = *suballocItem;
3244  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
3245 
3246  // Update totals.
3247  ++m_FreeCount;
3248  m_SumFreeSize += suballoc.size;
3249 
3250  // Merge with previous and/or next suballocation if it's also free.
3251  bool mergeWithNext = false;
3252  bool mergeWithPrev = false;
3253 
3254  VmaSuballocationList::iterator nextItem = suballocItem;
3255  ++nextItem;
3256  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
3257  {
3258  mergeWithNext = true;
3259  }
3260 
3261  VmaSuballocationList::iterator prevItem = suballocItem;
3262  if(suballocItem != m_Suballocations.begin())
3263  {
3264  --prevItem;
3265  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
3266  {
3267  mergeWithPrev = true;
3268  }
3269  }
3270 
3271  if(mergeWithNext)
3272  {
3273  UnregisterFreeSuballocation(nextItem);
3274  MergeFreeWithNext(suballocItem);
3275  }
3276 
3277  if(mergeWithPrev)
3278  {
3279  UnregisterFreeSuballocation(prevItem);
3280  MergeFreeWithNext(prevItem);
3281  RegisterFreeSuballocation(prevItem);
3282  }
3283  else
3284  RegisterFreeSuballocation(suballocItem);
3285 }
3286 
3287 void VmaBlock::Free(const VmaAllocation allocation)
3288 {
3289  const VkDeviceSize allocationOffset = allocation->GetOffset();
3290  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
3291  suballocItem != m_Suballocations.end();
3292  ++suballocItem)
3293  {
3294  VmaSuballocation& suballoc = *suballocItem;
3295  if(suballoc.offset == allocationOffset)
3296  {
3297  FreeSuballocation(suballocItem);
3298  VMA_HEAVY_ASSERT(Validate());
3299  return;
3300  }
3301  }
3302  VMA_ASSERT(0 && "Not found!");
3303 }
3304 
3305 #if VMA_STATS_STRING_ENABLED
3306 
3307 void VmaBlock::PrintDetailedMap(class VmaStringBuilder& sb) const
3308 {
3309  sb.Add("{\n\t\t\t\"Bytes\": ");
3310  sb.AddNumber(m_Size);
3311  sb.Add(",\n\t\t\t\"FreeBytes\": ");
3312  sb.AddNumber(m_SumFreeSize);
3313  sb.Add(",\n\t\t\t\"Suballocations\": ");
3314  sb.AddNumber(m_Suballocations.size());
3315  sb.Add(",\n\t\t\t\"FreeSuballocations\": ");
3316  sb.AddNumber(m_FreeCount);
3317  sb.Add(",\n\t\t\t\"SuballocationList\": [");
3318 
3319  size_t i = 0;
3320  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
3321  suballocItem != m_Suballocations.cend();
3322  ++suballocItem, ++i)
3323  {
3324  if(i > 0)
3325  {
3326  sb.Add(",\n\t\t\t\t{ \"Type\": ");
3327  }
3328  else
3329  {
3330  sb.Add("\n\t\t\t\t{ \"Type\": ");
3331  }
3332  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
3333  sb.Add(", \"Size\": ");
3334  sb.AddNumber(suballocItem->size);
3335  sb.Add(", \"Offset\": ");
3336  sb.AddNumber(suballocItem->offset);
3337  sb.Add(" }");
3338  }
3339 
3340  sb.Add("\n\t\t\t]\n\t\t}");
3341 }
3342 
3343 #endif // #if VMA_STATS_STRING_ENABLED
3344 
3345 void VmaBlock::MergeFreeWithNext(VmaSuballocationList::iterator item)
3346 {
3347  VMA_ASSERT(item != m_Suballocations.end());
3348  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3349 
3350  VmaSuballocationList::iterator nextItem = item;
3351  ++nextItem;
3352  VMA_ASSERT(nextItem != m_Suballocations.end());
3353  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
3354 
3355  item->size += nextItem->size;
3356  --m_FreeCount;
3357  m_Suballocations.erase(nextItem);
3358 }
3359 
3360 void VmaBlock::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
3361 {
3362  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3363  VMA_ASSERT(item->size > 0);
3364 
3365  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3366  {
3367  if(m_FreeSuballocationsBySize.empty())
3368  {
3369  m_FreeSuballocationsBySize.push_back(item);
3370  }
3371  else
3372  {
3373  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3374  m_FreeSuballocationsBySize.data(),
3375  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3376  item,
3377  VmaSuballocationItemSizeLess());
3378  size_t index = it - m_FreeSuballocationsBySize.data();
3379  VectorInsert(m_FreeSuballocationsBySize, index, item);
3380  }
3381  }
3382 }
3383 
3384 void VmaBlock::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
3385 {
3386  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
3387  VMA_ASSERT(item->size > 0);
3388 
3389  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
3390  {
3391  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
3392  m_FreeSuballocationsBySize.data(),
3393  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
3394  item,
3395  VmaSuballocationItemSizeLess());
3396  for(size_t index = it - m_FreeSuballocationsBySize.data();
3397  index < m_FreeSuballocationsBySize.size();
3398  ++index)
3399  {
3400  if(m_FreeSuballocationsBySize[index] == item)
3401  {
3402  VectorRemove(m_FreeSuballocationsBySize, index);
3403  return;
3404  }
3405  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
3406  }
3407  VMA_ASSERT(0 && "Not found.");
3408  }
3409 }
3410 
3411 static void InitStatInfo(VmaStatInfo& outInfo)
3412 {
3413  memset(&outInfo, 0, sizeof(outInfo));
3414  outInfo.SuballocationSizeMin = UINT64_MAX;
3415  outInfo.UnusedRangeSizeMin = UINT64_MAX;
3416 }
3417 
3418 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaBlock& alloc)
3419 {
3420  outInfo.AllocationCount = 1;
3421 
3422  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
3423  outInfo.SuballocationCount = rangeCount - alloc.m_FreeCount;
3424  outInfo.UnusedRangeCount = alloc.m_FreeCount;
3425 
3426  outInfo.UnusedBytes = alloc.m_SumFreeSize;
3427  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
3428 
3429  outInfo.SuballocationSizeMin = UINT64_MAX;
3430  outInfo.SuballocationSizeMax = 0;
3431  outInfo.UnusedRangeSizeMin = UINT64_MAX;
3432  outInfo.UnusedRangeSizeMax = 0;
3433 
3434  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
3435  suballocItem != alloc.m_Suballocations.cend();
3436  ++suballocItem)
3437  {
3438  const VmaSuballocation& suballoc = *suballocItem;
3439  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
3440  {
3441  outInfo.SuballocationSizeMin = VMA_MIN(outInfo.SuballocationSizeMin, suballoc.size);
3442  outInfo.SuballocationSizeMax = VMA_MAX(outInfo.SuballocationSizeMax, suballoc.size);
3443  }
3444  else
3445  {
3446  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
3447  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
3448  }
3449  }
3450 }
3451 
3452 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
3453 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
3454 {
3455  inoutInfo.AllocationCount += srcInfo.AllocationCount;
3456  inoutInfo.SuballocationCount += srcInfo.SuballocationCount;
3457  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
3458  inoutInfo.UsedBytes += srcInfo.UsedBytes;
3459  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
3460  inoutInfo.SuballocationSizeMin = VMA_MIN(inoutInfo.SuballocationSizeMin, srcInfo.SuballocationSizeMin);
3461  inoutInfo.SuballocationSizeMax = VMA_MAX(inoutInfo.SuballocationSizeMax, srcInfo.SuballocationSizeMax);
3462  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
3463  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
3464 }
3465 
3466 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
3467 {
3468  inoutInfo.SuballocationSizeAvg = (inoutInfo.SuballocationCount > 0) ?
3469  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.SuballocationCount) : 0;
3470  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
3471  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
3472 }
3473 
3474 VmaBlockVector::VmaBlockVector(VmaAllocator hAllocator) :
3475  m_hAllocator(hAllocator),
3476  m_Blocks(VmaStlAllocator<VmaBlock*>(hAllocator->GetAllocationCallbacks()))
3477 {
3478 }
3479 
3480 VmaBlockVector::~VmaBlockVector()
3481 {
3482  for(size_t i = m_Blocks.size(); i--; )
3483  {
3484  m_Blocks[i]->Destroy(m_hAllocator);
3485  vma_delete(m_hAllocator, m_Blocks[i]);
3486  }
3487 }
3488 
3489 void VmaBlockVector::Remove(VmaBlock* pBlock)
3490 {
3491  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
3492  {
3493  if(m_Blocks[blockIndex] == pBlock)
3494  {
3495  VectorRemove(m_Blocks, blockIndex);
3496  return;
3497  }
3498  }
3499  VMA_ASSERT(0);
3500 }
3501 
3502 void VmaBlockVector::IncrementallySortBlocks()
3503 {
3504  // Bubble sort only until first swap.
3505  for(size_t i = 1; i < m_Blocks.size(); ++i)
3506  {
3507  if(m_Blocks[i - 1]->m_SumFreeSize > m_Blocks[i]->m_SumFreeSize)
3508  {
3509  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
3510  return;
3511  }
3512  }
3513 }
3514 
3515 #if VMA_STATS_STRING_ENABLED
3516 
3517 void VmaBlockVector::PrintDetailedMap(class VmaStringBuilder& sb) const
3518 {
3519  for(size_t i = 0; i < m_Blocks.size(); ++i)
3520  {
3521  if(i > 0)
3522  {
3523  sb.Add(",\n\t\t");
3524  }
3525  else
3526  {
3527  sb.Add("\n\t\t");
3528  }
3529  m_Blocks[i]->PrintDetailedMap(sb);
3530  }
3531 }
3532 
3533 #endif // #if VMA_STATS_STRING_ENABLED
3534 
3535 void VmaBlockVector::UnmapPersistentlyMappedMemory()
3536 {
3537  for(size_t i = m_Blocks.size(); i--; )
3538  {
3539  VmaBlock* pBlock = m_Blocks[i];
3540  if(pBlock->m_pMappedData != VMA_NULL)
3541  {
3542  VMA_ASSERT(pBlock->m_PersistentMap != false);
3543  vkUnmapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory);
3544  pBlock->m_pMappedData = VMA_NULL;
3545  }
3546  }
3547 }
3548 
3549 VkResult VmaBlockVector::MapPersistentlyMappedMemory()
3550 {
3551  VkResult finalResult = VK_SUCCESS;
3552  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
3553  {
3554  VmaBlock* pBlock = m_Blocks[i];
3555  if(pBlock->m_PersistentMap)
3556  {
3557  VMA_ASSERT(pBlock->m_pMappedData == nullptr);
3558  VkResult localResult = vkMapMemory(m_hAllocator->m_hDevice, pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &pBlock->m_pMappedData);
3559  if(localResult != VK_SUCCESS)
3560  {
3561  finalResult = localResult;
3562  }
3563  }
3564  }
3565  return finalResult;
3566 }
3567 
3568 void VmaBlockVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const
3569 {
3570  for(uint32_t allocIndex = 0; allocIndex < m_Blocks.size(); ++allocIndex)
3571  {
3572  const VmaBlock* const pBlock = m_Blocks[allocIndex];
3573  VMA_ASSERT(pBlock);
3574  VMA_HEAVY_ASSERT(pBlock->Validate());
3575  VmaStatInfo allocationStatInfo;
3576  CalcAllocationStatInfo(allocationStatInfo, *pBlock);
3577  VmaAddStatInfo(pStats->total, allocationStatInfo);
3578  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
3579  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
3580  }
3581 }
3582 
3584 // VmaDefragmentator
3585 
3586 class VmaDefragmentator
3587 {
3588  VkDevice m_hDevice;
3589  const VkAllocationCallbacks* m_pAllocationCallbacks;
3590  VkDeviceSize m_BufferImageGranularity;
3591  uint32_t m_MemTypeIndex;
3592  VMA_BLOCK_VECTOR_TYPE m_BlockVectorType;
3593  VkDeviceSize m_BytesMoved;
3594  uint32_t m_AllocationsMoved;
3595 
3596  struct AllocationInfo
3597  {
3598  VmaAllocation m_hAllocation;
3599  VkBool32* m_pChanged;
3600 
3601  AllocationInfo() :
3602  m_hAllocation(VK_NULL_HANDLE),
3603  m_pChanged(VMA_NULL)
3604  {
3605  }
3606  };
3607 
3608  struct AllocationInfoSizeGreater
3609  {
3610  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3611  {
3612  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3613  }
3614  };
3615 
3616  // Used between AddAllocation and Defragment.
3617  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3618 
3619  struct BlockInfo
3620  {
3621  VmaBlock* m_pBlock;
3622  bool m_HasNonMovableAllocations;
3623  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3624 
3625  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3626  m_pBlock(VMA_NULL),
3627  m_HasNonMovableAllocations(true),
3628  m_Allocations(pAllocationCallbacks),
3629  m_pMappedDataForDefragmentation(VMA_NULL)
3630  {
3631  }
3632 
3633  void CalcHasNonMovableAllocations()
3634  {
3635  const size_t blockAllocCount =
3636  m_pBlock->m_Suballocations.size() - m_pBlock->m_FreeCount;
3637  const size_t defragmentAllocCount = m_Allocations.size();
3638  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3639  }
3640 
3641  void SortAllocationsBySizeDescecnding()
3642  {
3643  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3644  }
3645 
3646  VkResult EnsureMapping(VkDevice hDevice, void** ppMappedData)
3647  {
3648  // It has already been mapped for defragmentation.
3649  if(m_pMappedDataForDefragmentation)
3650  {
3651  *ppMappedData = m_pMappedDataForDefragmentation;
3652  return VK_SUCCESS;
3653  }
3654 
3655  // It is persistently mapped.
3656  if(m_pBlock->m_PersistentMap)
3657  {
3658  VMA_ASSERT(m_pBlock->m_pMappedData != VMA_NULL);
3659  *ppMappedData = m_pBlock->m_pMappedData;
3660  return VK_SUCCESS;
3661  }
3662 
3663  // Map on first usage.
3664  VkResult res = vkMapMemory(hDevice, m_pBlock->m_hMemory, 0, VK_WHOLE_SIZE, 0, &m_pMappedDataForDefragmentation);
3665  *ppMappedData = m_pMappedDataForDefragmentation;
3666  return res;
3667  }
3668 
3669  void Unmap(VkDevice hDevice)
3670  {
3671  if(m_pMappedDataForDefragmentation != VMA_NULL)
3672  {
3673  vkUnmapMemory(hDevice, m_pBlock->m_hMemory);
3674  }
3675  }
3676 
3677  private:
3678  // Not null if mapped for defragmentation only, not persistently mapped.
3679  void* m_pMappedDataForDefragmentation;
3680  };
3681 
3682  struct BlockPointerLess
3683  {
3684  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaBlock* pRhsBlock) const
3685  {
3686  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3687  }
3688  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3689  {
3690  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3691  }
3692  };
3693 
3694  // 1. Blocks with some non-movable allocations go first.
3695  // 2. Blocks with smaller sumFreeSize go first.
3696  struct BlockInfoCompareMoveDestination
3697  {
3698  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3699  {
3700  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3701  {
3702  return true;
3703  }
3704  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3705  {
3706  return false;
3707  }
3708  if(pLhsBlockInfo->m_pBlock->m_SumFreeSize < pRhsBlockInfo->m_pBlock->m_SumFreeSize)
3709  {
3710  return true;
3711  }
3712  return false;
3713  }
3714  };
3715 
3716  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3717  BlockInfoVector m_Blocks;
3718 
3719  VkResult DefragmentRound(
3720  VkDeviceSize maxBytesToMove,
3721  uint32_t maxAllocationsToMove);
3722 
3723  static bool MoveMakesSense(
3724  size_t dstBlockIndex, VkDeviceSize dstOffset,
3725  size_t srcBlockIndex, VkDeviceSize srcOffset);
3726 
3727 public:
3728  VmaDefragmentator(
3729  VkDevice hDevice,
3730  const VkAllocationCallbacks* pAllocationCallbacks,
3731  VkDeviceSize bufferImageGranularity,
3732  uint32_t memTypeIndex,
3733  VMA_BLOCK_VECTOR_TYPE blockVectorType);
3734 
3735  ~VmaDefragmentator();
3736 
3737  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3738  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3739 
3740  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3741 
3742  VkResult Defragment(
3743  VmaBlockVector* pBlockVector,
3744  VkDeviceSize maxBytesToMove,
3745  uint32_t maxAllocationsToMove);
3746 };
3747 
3748 VmaDefragmentator::VmaDefragmentator(
3749  VkDevice hDevice,
3750  const VkAllocationCallbacks* pAllocationCallbacks,
3751  VkDeviceSize bufferImageGranularity,
3752  uint32_t memTypeIndex,
3753  VMA_BLOCK_VECTOR_TYPE blockVectorType) :
3754  m_hDevice(hDevice),
3755  m_pAllocationCallbacks(pAllocationCallbacks),
3756  m_BufferImageGranularity(bufferImageGranularity),
3757  m_MemTypeIndex(memTypeIndex),
3758  m_BlockVectorType(blockVectorType),
3759  m_BytesMoved(0),
3760  m_AllocationsMoved(0),
3761  m_Allocations(VmaStlAllocator<AllocationInfo>(pAllocationCallbacks)),
3762  m_Blocks(VmaStlAllocator<BlockInfo*>(pAllocationCallbacks))
3763 {
3764 }
3765 
3766 VmaDefragmentator::~VmaDefragmentator()
3767 {
3768  for(size_t i = m_Blocks.size(); i--; )
3769  {
3770  vma_delete(m_pAllocationCallbacks, m_Blocks[i]);
3771  }
3772 }
3773 
3774 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
3775 {
3776  AllocationInfo allocInfo;
3777  allocInfo.m_hAllocation = hAlloc;
3778  allocInfo.m_pChanged = pChanged;
3779  m_Allocations.push_back(allocInfo);
3780 }
3781 
3782 VkResult VmaDefragmentator::DefragmentRound(
3783  VkDeviceSize maxBytesToMove,
3784  uint32_t maxAllocationsToMove)
3785 {
3786  if(m_Blocks.empty())
3787  {
3788  return VK_SUCCESS;
3789  }
3790 
3791  size_t srcBlockIndex = m_Blocks.size() - 1;
3792  size_t srcAllocIndex = SIZE_MAX;
3793  for(;;)
3794  {
3795  // 1. Find next allocation to move.
3796  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
3797  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
3798  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
3799  {
3800  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
3801  {
3802  // Finished: no more allocations to process.
3803  if(srcBlockIndex == 0)
3804  {
3805  return VK_SUCCESS;
3806  }
3807  else
3808  {
3809  --srcBlockIndex;
3810  srcAllocIndex = SIZE_MAX;
3811  }
3812  }
3813  else
3814  {
3815  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
3816  }
3817  }
3818 
3819  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
3820  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
3821 
3822  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
3823  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
3824  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
3825  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
3826 
3827  // 2. Try to find new place for this allocation in preceding or current block.
3828  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
3829  {
3830  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
3831  VmaAllocationRequest dstAllocRequest;
3832  if(pDstBlockInfo->m_pBlock->CreateAllocationRequest(
3833  m_BufferImageGranularity,
3834  size,
3835  alignment,
3836  suballocType,
3837  &dstAllocRequest) &&
3838  MoveMakesSense(
3839  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
3840  {
3841  // Reached limit on number of allocations or bytes to move.
3842  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
3843  (m_BytesMoved + size > maxBytesToMove))
3844  {
3845  return VK_INCOMPLETE;
3846  }
3847 
3848  void* pDstMappedData = VMA_NULL;
3849  VkResult res = pDstBlockInfo->EnsureMapping(m_hDevice, &pDstMappedData);
3850  if(res != VK_SUCCESS)
3851  {
3852  return res;
3853  }
3854 
3855  void* pSrcMappedData = VMA_NULL;
3856  res = pSrcBlockInfo->EnsureMapping(m_hDevice, &pSrcMappedData);
3857  if(res != VK_SUCCESS)
3858  {
3859  return res;
3860  }
3861 
3862  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
3863  memcpy(
3864  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
3865  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
3866  size);
3867 
3868  pDstBlockInfo->m_pBlock->Alloc(dstAllocRequest, suballocType, size);
3869  pSrcBlockInfo->m_pBlock->Free(allocInfo.m_hAllocation);
3870 
3871  allocInfo.m_hAllocation->ChangeBlockAllocation(pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
3872 
3873  if(allocInfo.m_pChanged != VMA_NULL)
3874  {
3875  *allocInfo.m_pChanged = VK_TRUE;
3876  }
3877 
3878  ++m_AllocationsMoved;
3879  m_BytesMoved += size;
3880 
3881  VectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
3882 
3883  break;
3884  }
3885  }
3886 
3887  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
3888 
3889  if(srcAllocIndex > 0)
3890  {
3891  --srcAllocIndex;
3892  }
3893  else
3894  {
3895  if(srcBlockIndex > 0)
3896  {
3897  --srcBlockIndex;
3898  srcAllocIndex = SIZE_MAX;
3899  }
3900  else
3901  {
3902  return VK_SUCCESS;
3903  }
3904  }
3905  }
3906 }
3907 
3908 VkResult VmaDefragmentator::Defragment(
3909  VmaBlockVector* pBlockVector,
3910  VkDeviceSize maxBytesToMove,
3911  uint32_t maxAllocationsToMove)
3912 {
3913  if(m_Allocations.empty())
3914  {
3915  return VK_SUCCESS;
3916  }
3917 
3918  // Create block info for each block.
3919  const size_t blockCount = pBlockVector->m_Blocks.size();
3920  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3921  {
3922  BlockInfo* pBlockInfo = vma_new(m_pAllocationCallbacks, BlockInfo)(m_pAllocationCallbacks);
3923  pBlockInfo->m_pBlock = pBlockVector->m_Blocks[blockIndex];
3924  m_Blocks.push_back(pBlockInfo);
3925  }
3926 
3927  // Sort them by m_pBlock pointer value.
3928  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
3929 
3930  // Move allocation infos from m_Allocations to appropriate m_Blocks[i].m_Allocations.
3931  for(size_t allocIndex = 0, allocCount = m_Allocations.size(); allocIndex < allocCount; ++allocIndex)
3932  {
3933  AllocationInfo& allocInfo = m_Allocations[allocIndex];
3934  VmaBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
3935  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
3936  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
3937  {
3938  (*it)->m_Allocations.push_back(allocInfo);
3939  }
3940  else
3941  {
3942  VMA_ASSERT(0);
3943  }
3944  }
3945  m_Allocations.clear();
3946 
3947  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3948  {
3949  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
3950  pBlockInfo->CalcHasNonMovableAllocations();
3951  pBlockInfo->SortAllocationsBySizeDescecnding();
3952  }
3953 
3954  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
3955  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
3956 
3957  // Execute defragmentation round (the main part).
3958  VkResult result = VK_SUCCESS;
3959  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
3960  {
3961  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
3962  }
3963 
3964  // Unmap blocks that were mapped for defragmentation.
3965  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
3966  {
3967  m_Blocks[blockIndex]->Unmap(m_hDevice);
3968  }
3969 
3970  return result;
3971 }
3972 
3973 bool VmaDefragmentator::MoveMakesSense(
3974  size_t dstBlockIndex, VkDeviceSize dstOffset,
3975  size_t srcBlockIndex, VkDeviceSize srcOffset)
3976 {
3977  if(dstBlockIndex < srcBlockIndex)
3978  {
3979  return true;
3980  }
3981  if(dstBlockIndex > srcBlockIndex)
3982  {
3983  return false;
3984  }
3985  if(dstOffset < srcOffset)
3986  {
3987  return true;
3988  }
3989  return false;
3990 }
3991 
3993 // VmaAllocator_T
3994 
3995 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
3996  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
3997  m_PhysicalDevice(pCreateInfo->physicalDevice),
3998  m_hDevice(pCreateInfo->device),
3999  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
4000  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
4001  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
4002  m_PreferredLargeHeapBlockSize(0),
4003  m_PreferredSmallHeapBlockSize(0),
4004  m_UnmapPersistentlyMappedMemoryCounter(0)
4005 {
4006  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
4007 
4008  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
4009  memset(&m_MemProps, 0, sizeof(m_MemProps));
4010  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
4011 
4012  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
4013  memset(&m_HasEmptyBlock, 0, sizeof(m_HasEmptyBlock));
4014  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
4015 
4016  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
4017  {
4018  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
4019  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
4020  }
4021 
4022  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
4023  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
4024  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
4025  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
4026 
4027  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
4028  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
4029 
4030  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
4031  {
4032  for(size_t j = 0; j < VMA_BLOCK_VECTOR_TYPE_COUNT; ++j)
4033  {
4034  m_pBlockVectors[i][j] = vma_new(this, VmaBlockVector)(this);
4035  m_pOwnAllocations[i][j] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
4036  }
4037  }
4038 }
4039 
4040 VmaAllocator_T::~VmaAllocator_T()
4041 {
4042  for(size_t i = GetMemoryTypeCount(); i--; )
4043  {
4044  for(size_t j = VMA_BLOCK_VECTOR_TYPE_COUNT; j--; )
4045  {
4046  vma_delete(this, m_pOwnAllocations[i][j]);
4047  vma_delete(this, m_pBlockVectors[i][j]);
4048  }
4049  }
4050 }
4051 
4052 VkDeviceSize VmaAllocator_T::GetPreferredBlockSize(uint32_t memTypeIndex) const
4053 {
4054  VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
4055  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
4056  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
4057 }
4058 
4059 VkResult VmaAllocator_T::AllocateMemoryOfType(
4060  const VkMemoryRequirements& vkMemReq,
4061  const VmaMemoryRequirements& vmaMemReq,
4062  uint32_t memTypeIndex,
4063  VmaSuballocationType suballocType,
4064  VmaAllocation* pAllocation)
4065 {
4066  VMA_ASSERT(pAllocation != VMA_NULL);
4067  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
4068 
4069  const VkDeviceSize preferredBlockSize = GetPreferredBlockSize(memTypeIndex);
4070  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
4071  const bool ownMemory =
4072  (vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT) != 0 ||
4073  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
4074  ((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) == 0 &&
4075  vkMemReq.size > preferredBlockSize / 2);
4076 
4077  if(ownMemory)
4078  {
4079  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) != 0)
4080  {
4081  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4082  }
4083  else
4084  {
4085  return AllocateOwnMemory(
4086  vkMemReq.size,
4087  suballocType,
4088  memTypeIndex,
4090  vmaMemReq.pUserData,
4091  pAllocation);
4092  }
4093  }
4094  else
4095  {
4096  uint32_t blockVectorType = VmaMemoryRequirementFlagsToBlockVectorType(vmaMemReq.flags);
4097 
4098  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4099  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4100  VMA_ASSERT(blockVector);
4101 
4102  // 1. Search existing allocations.
4103  // Forward order - prefer blocks with smallest amount of free space.
4104  for(size_t allocIndex = 0; allocIndex < blockVector->m_Blocks.size(); ++allocIndex )
4105  {
4106  VmaBlock* const pBlock = blockVector->m_Blocks[allocIndex];
4107  VMA_ASSERT(pBlock);
4108  VmaAllocationRequest allocRequest = {};
4109  // Check if can allocate from pBlock.
4110  if(pBlock->CreateAllocationRequest(
4111  GetBufferImageGranularity(),
4112  vkMemReq.size,
4113  vkMemReq.alignment,
4114  suballocType,
4115  &allocRequest))
4116  {
4117  // We no longer have an empty Allocation.
4118  if(pBlock->IsEmpty())
4119  {
4120  m_HasEmptyBlock[memTypeIndex] = false;
4121  }
4122  // Allocate from this pBlock.
4123  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size);
4124  *pAllocation = vma_new(this, VmaAllocation_T)();
4125  (*pAllocation)->InitBlockAllocation(
4126  pBlock,
4127  allocRequest.offset,
4128  vkMemReq.alignment,
4129  vkMemReq.size,
4130  suballocType,
4131  vmaMemReq.pUserData);
4132  VMA_HEAVY_ASSERT(pBlock->Validate());
4133  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)allocIndex);
4134  return VK_SUCCESS;
4135  }
4136  }
4137 
4138  // 2. Create new Allocation.
4139  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT) != 0)
4140  {
4141  VMA_DEBUG_LOG(" FAILED due to VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT");
4142  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4143  }
4144  else
4145  {
4146  // Start with full preferredBlockSize.
4147  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
4148  allocInfo.memoryTypeIndex = memTypeIndex;
4149  allocInfo.allocationSize = preferredBlockSize;
4150  VkDeviceMemory mem = VK_NULL_HANDLE;
4151  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4152  if(res < 0)
4153  {
4154  // 3. Try half the size.
4155  allocInfo.allocationSize /= 2;
4156  if(allocInfo.allocationSize >= vkMemReq.size)
4157  {
4158  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4159  if(res < 0)
4160  {
4161  // 4. Try quarter the size.
4162  allocInfo.allocationSize /= 2;
4163  if(allocInfo.allocationSize >= vkMemReq.size)
4164  {
4165  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
4166  }
4167  }
4168  }
4169  }
4170  if(res < 0)
4171  {
4172  // 5. Try OwnAlloc.
4173  res = AllocateOwnMemory(
4174  vkMemReq.size,
4175  suballocType,
4176  memTypeIndex,
4178  vmaMemReq.pUserData,
4179  pAllocation);
4180  if(res == VK_SUCCESS)
4181  {
4182  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
4183  VMA_DEBUG_LOG(" Allocated as OwnMemory");
4184  return VK_SUCCESS;
4185  }
4186  else
4187  {
4188  // Everything failed: Return error code.
4189  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
4190  return res;
4191  }
4192  }
4193 
4194  // New VkDeviceMemory successfully created.
4195 
4196  // Map memory if needed.
4197  void* pMappedData = VMA_NULL;
4198  const bool persistentMap = (vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0;
4199  if(persistentMap && m_UnmapPersistentlyMappedMemoryCounter == 0)
4200  {
4201  res = vkMapMemory(m_hDevice, mem, 0, VK_WHOLE_SIZE, 0, &pMappedData);
4202  if(res < 0)
4203  {
4204  VMA_DEBUG_LOG(" vkMapMemory FAILED");
4205  vkFreeMemory(m_hDevice, mem, GetAllocationCallbacks());
4206  return res;
4207  }
4208  }
4209 
4210  // Callback.
4211  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
4212  {
4213  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, memTypeIndex, mem, allocInfo.allocationSize);
4214  }
4215 
4216  // Create new Allocation for it.
4217  VmaBlock* const pBlock = vma_new(this, VmaBlock)(this);
4218  pBlock->Init(
4219  memTypeIndex,
4220  (VMA_BLOCK_VECTOR_TYPE)blockVectorType,
4221  mem,
4222  allocInfo.allocationSize,
4223  persistentMap,
4224  pMappedData);
4225 
4226  blockVector->m_Blocks.push_back(pBlock);
4227 
4228  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
4229  VmaAllocationRequest allocRequest = {};
4230  allocRequest.freeSuballocationItem = pBlock->m_Suballocations.begin();
4231  allocRequest.offset = 0;
4232  pBlock->Alloc(allocRequest, suballocType, vkMemReq.size);
4233  *pAllocation = vma_new(this, VmaAllocation_T)();
4234  (*pAllocation)->InitBlockAllocation(
4235  pBlock,
4236  allocRequest.offset,
4237  vkMemReq.alignment,
4238  vkMemReq.size,
4239  suballocType,
4240  vmaMemReq.pUserData);
4241  VMA_HEAVY_ASSERT(pBlock->Validate());
4242  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
4243  return VK_SUCCESS;
4244  }
4245  }
4246 }
4247 
4248 VkResult VmaAllocator_T::AllocateOwnMemory(
4249  VkDeviceSize size,
4250  VmaSuballocationType suballocType,
4251  uint32_t memTypeIndex,
4252  bool map,
4253  void* pUserData,
4254  VmaAllocation* pAllocation)
4255 {
4256  VMA_ASSERT(pAllocation);
4257 
4258  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
4259  allocInfo.memoryTypeIndex = memTypeIndex;
4260  allocInfo.allocationSize = size;
4261 
4262  // Allocate VkDeviceMemory.
4263  VkDeviceMemory hMemory = VK_NULL_HANDLE;
4264  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &hMemory);
4265  if(res < 0)
4266  {
4267  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
4268  return res;
4269  }
4270 
4271  void* pMappedData = nullptr;
4272  if(map)
4273  {
4274  if(m_UnmapPersistentlyMappedMemoryCounter == 0)
4275  {
4276  res = vkMapMemory(m_hDevice, hMemory, 0, VK_WHOLE_SIZE, 0, &pMappedData);
4277  if(res < 0)
4278  {
4279  VMA_DEBUG_LOG(" vkMapMemory FAILED");
4280  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
4281  return res;
4282  }
4283  }
4284  }
4285 
4286  // Callback.
4287  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
4288  {
4289  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, memTypeIndex, hMemory, size);
4290  }
4291 
4292  *pAllocation = vma_new(this, VmaAllocation_T)();
4293  (*pAllocation)->InitOwnAllocation(memTypeIndex, hMemory, suballocType, map, pMappedData, size, pUserData);
4294 
4295  // Register it in m_pOwnAllocations.
4296  {
4297  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4298  AllocationVectorType* pOwnAllocations = m_pOwnAllocations[memTypeIndex][map ? VMA_BLOCK_VECTOR_TYPE_MAPPED : VMA_BLOCK_VECTOR_TYPE_UNMAPPED];
4299  VMA_ASSERT(pOwnAllocations);
4300  VmaAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
4301  VmaAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
4302  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4303  pOwnAllocationsBeg,
4304  pOwnAllocationsEnd,
4305  *pAllocation,
4306  VmaPointerLess()) - pOwnAllocationsBeg;
4307  VectorInsert(*pOwnAllocations, indexToInsert, *pAllocation);
4308  }
4309 
4310  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
4311 
4312  return VK_SUCCESS;
4313 }
4314 
4315 VkResult VmaAllocator_T::AllocateMemory(
4316  const VkMemoryRequirements& vkMemReq,
4317  const VmaMemoryRequirements& vmaMemReq,
4318  VmaSuballocationType suballocType,
4319  VmaAllocation* pAllocation)
4320 {
4321  if((vmaMemReq.flags & VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT) != 0 &&
4323  {
4324  VMA_ASSERT(0 && "Specifying VMA_MEMORY_REQUIREMENT_OWN_MEMORY_BIT together with VMA_MEMORY_REQUIREMENT_NEVER_ALLOCATE_BIT makes no sense.");
4325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4326  }
4327 
4328  // Bit mask of memory Vulkan types acceptable for this allocation.
4329  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
4330  uint32_t memTypeIndex = UINT32_MAX;
4331  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
4332  if(res == VK_SUCCESS)
4333  {
4334  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pAllocation);
4335  // Succeeded on first try.
4336  if(res == VK_SUCCESS)
4337  {
4338  return res;
4339  }
4340  // Allocation from this memory type failed. Try other compatible memory types.
4341  else
4342  {
4343  for(;;)
4344  {
4345  // Remove old memTypeIndex from list of possibilities.
4346  memoryTypeBits &= ~(1u << memTypeIndex);
4347  // Find alternative memTypeIndex.
4348  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
4349  if(res == VK_SUCCESS)
4350  {
4351  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pAllocation);
4352  // Allocation from this alternative memory type succeeded.
4353  if(res == VK_SUCCESS)
4354  {
4355  return res;
4356  }
4357  // else: Allocation from this memory type failed. Try next one - next loop iteration.
4358  }
4359  // No other matching memory type index could be found.
4360  else
4361  {
4362  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
4363  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4364  }
4365  }
4366  }
4367  }
4368  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
4369  else
4370  return res;
4371 }
4372 
4373 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
4374 {
4375  VMA_ASSERT(allocation);
4376 
4377  if(allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK)
4378  {
4379  VmaBlock* pBlockToDelete = VMA_NULL;
4380 
4381  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
4382  const VMA_BLOCK_VECTOR_TYPE blockVectorType = allocation->GetBlockVectorType();
4383  {
4384  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4385 
4386  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4387  VmaBlock* pBlock = allocation->GetBlock();
4388 
4389  pBlock->Free(allocation);
4390  VMA_HEAVY_ASSERT(pBlock->Validate());
4391 
4392  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
4393 
4394  // pBlock became empty after this deallocation.
4395  if(pBlock->IsEmpty())
4396  {
4397  // Already has empty Allocation. We don't want to have two, so delete this one.
4398  if(m_HasEmptyBlock[memTypeIndex])
4399  {
4400  pBlockToDelete = pBlock;
4401  pBlockVector->Remove(pBlock);
4402  }
4403  // We now have first empty Allocation.
4404  else
4405  {
4406  m_HasEmptyBlock[memTypeIndex] = true;
4407  }
4408  }
4409  // Must be called after srcBlockIndex is used, because later it may become invalid!
4410  pBlockVector->IncrementallySortBlocks();
4411  }
4412  // Destruction of a free Allocation. Deferred until this point, outside of mutex
4413  // lock, for performance reason.
4414  if(pBlockToDelete != VMA_NULL)
4415  {
4416  VMA_DEBUG_LOG(" Deleted empty allocation");
4417  pBlockToDelete->Destroy(this);
4418  vma_delete(this, pBlockToDelete);
4419  }
4420 
4421  vma_delete(this, allocation);
4422  }
4423  else // VmaAllocation_T::ALLOCATION_TYPE_OWN
4424  {
4425  FreeOwnMemory(allocation);
4426  }
4427 }
4428 
4429 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
4430 {
4431  InitStatInfo(pStats->total);
4432  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
4433  InitStatInfo(pStats->memoryType[i]);
4434  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
4435  InitStatInfo(pStats->memoryHeap[i]);
4436 
4437  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4438  {
4439  VmaMutexLock allocationsLock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4440  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4441  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4442  {
4443  const VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4444  VMA_ASSERT(pBlockVector);
4445  pBlockVector->AddStats(pStats, memTypeIndex, heapIndex);
4446  }
4447  }
4448 
4449  VmaPostprocessCalcStatInfo(pStats->total);
4450  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
4451  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
4452  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
4453  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
4454 }
4455 
4456 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
4457 
4458 void VmaAllocator_T::UnmapPersistentlyMappedMemory()
4459 {
4460  if(m_UnmapPersistentlyMappedMemoryCounter++ == 0)
4461  {
4462  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
4463  {
4464  for(size_t memTypeIndex = m_MemProps.memoryTypeCount; memTypeIndex--; )
4465  {
4466  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
4467  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
4468  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4469  {
4470  // Process OwnAllocations.
4471  {
4472  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4473  AllocationVectorType* pOwnAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4474  for(size_t ownAllocIndex = pOwnAllocationsVector->size(); ownAllocIndex--; )
4475  {
4476  VmaAllocation hAlloc = (*pOwnAllocationsVector)[ownAllocIndex];
4477  hAlloc->OwnAllocUnmapPersistentlyMappedMemory(m_hDevice);
4478  }
4479  }
4480 
4481  // Process normal Allocations.
4482  {
4483  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4484  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4485  pBlockVector->UnmapPersistentlyMappedMemory();
4486  }
4487  }
4488  }
4489  }
4490  }
4491 }
4492 
4493 VkResult VmaAllocator_T::MapPersistentlyMappedMemory()
4494 {
4495  VMA_ASSERT(m_UnmapPersistentlyMappedMemoryCounter > 0);
4496  if(--m_UnmapPersistentlyMappedMemoryCounter == 0)
4497  {
4498  VkResult finalResult = VK_SUCCESS;
4499  if(m_PhysicalDeviceProperties.vendorID == VMA_VENDOR_ID_AMD)
4500  {
4501  for(size_t memTypeIndex = 0; memTypeIndex < m_MemProps.memoryTypeCount; ++memTypeIndex)
4502  {
4503  const VkMemoryPropertyFlags memFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
4504  if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0 &&
4505  (memFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4506  {
4507  // Process OwnAllocations.
4508  {
4509  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4510  AllocationVectorType* pAllocationsVector = m_pOwnAllocations[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4511  for(size_t ownAllocIndex = 0, ownAllocCount = pAllocationsVector->size(); ownAllocIndex < ownAllocCount; ++ownAllocIndex)
4512  {
4513  VmaAllocation hAlloc = (*pAllocationsVector)[ownAllocIndex];
4514  hAlloc->OwnAllocMapPersistentlyMappedMemory(m_hDevice);
4515  }
4516  }
4517 
4518  // Process normal Allocations.
4519  {
4520  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4521  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][VMA_BLOCK_VECTOR_TYPE_MAPPED];
4522  VkResult localResult = pBlockVector->MapPersistentlyMappedMemory();
4523  if(localResult != VK_SUCCESS)
4524  {
4525  finalResult = localResult;
4526  }
4527  }
4528  }
4529  }
4530  }
4531  return finalResult;
4532  }
4533  else
4534  return VK_SUCCESS;
4535 }
4536 
4537 VkResult VmaAllocator_T::Defragment(
4538  VmaAllocation* pAllocations,
4539  size_t allocationCount,
4540  VkBool32* pAllocationsChanged,
4541  const VmaDefragmentationInfo* pDefragmentationInfo,
4542  VmaDefragmentationStats* pDefragmentationStats)
4543 {
4544  if(pAllocationsChanged != VMA_NULL)
4545  {
4546  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
4547  }
4548  if(pDefragmentationStats != VMA_NULL)
4549  {
4550  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
4551  }
4552 
4553  if(m_UnmapPersistentlyMappedMemoryCounter > 0)
4554  {
4555  VMA_DEBUG_LOG("ERROR: Cannot defragment when inside vmaUnmapPersistentlyMappedMemory.");
4556  return VK_ERROR_MEMORY_MAP_FAILED;
4557  }
4558 
4559  // Initialize defragmentators per memory type.
4560  const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
4561  VmaDefragmentator* pDefragmentators[VK_MAX_MEMORY_TYPES][VMA_BLOCK_VECTOR_TYPE_COUNT];
4562  memset(pDefragmentators, 0, sizeof(pDefragmentators));
4563  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4564  {
4565  // Only HOST_VISIBLE memory types can be defragmented.
4566  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4567  {
4568  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4569  {
4570  pDefragmentators[memTypeIndex][blockVectorType] = vma_new(this, VmaDefragmentator)(
4571  m_hDevice,
4572  GetAllocationCallbacks(),
4573  bufferImageGranularity,
4574  memTypeIndex,
4575  (VMA_BLOCK_VECTOR_TYPE)blockVectorType);
4576  }
4577  }
4578  }
4579 
4580  // Dispatch pAllocations among defragmentators.
4581  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
4582  {
4583  VmaAllocation hAlloc = pAllocations[allocIndex];
4584  VMA_ASSERT(hAlloc);
4585  if(hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK)
4586  {
4587  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
4588  // Only HOST_VISIBLE memory types can be defragmented.
4589  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4590  {
4591  const VMA_BLOCK_VECTOR_TYPE blockVectorType = hAlloc->GetBlockVectorType();
4592  VkBool32* pChanged = (pAllocationsChanged != VMA_NULL) ?
4593  &pAllocationsChanged[allocIndex] : VMA_NULL;
4594  pDefragmentators[memTypeIndex][blockVectorType]->AddAllocation(hAlloc, pChanged);
4595  }
4596  // else: skip this allocation, cannot move it.
4597  }
4598  // else ALLOCATION_TYPE_OWN: skip this allocation, nothing to defragment.
4599  }
4600 
4601  VkResult result = VK_SUCCESS;
4602 
4603  // Main processing.
4604  VkDeviceSize maxBytesToMove = SIZE_MAX;
4605  uint32_t maxAllocationsToMove = UINT32_MAX;
4606  if(pDefragmentationInfo != VMA_NULL)
4607  {
4608  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
4609  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
4610  }
4611  for(uint32_t memTypeIndex = 0;
4612  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
4613  ++memTypeIndex)
4614  {
4615  // Only HOST_VISIBLE memory types can be defragmented.
4616  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4617  {
4618  VmaMutexLock lock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4619 
4620  for(uint32_t blockVectorType = 0;
4621  (blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT) && (result == VK_SUCCESS);
4622  ++blockVectorType)
4623  {
4624  VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex][blockVectorType];
4625 
4626  // Defragment.
4627  result = pDefragmentators[memTypeIndex][blockVectorType]->Defragment(pBlockVector, maxBytesToMove, maxAllocationsToMove);
4628 
4629  // Accumulate statistics.
4630  if(pDefragmentationStats != VMA_NULL)
4631  {
4632  const VkDeviceSize bytesMoved = pDefragmentators[memTypeIndex][blockVectorType]->GetBytesMoved();
4633  const uint32_t allocationsMoved = pDefragmentators[memTypeIndex][blockVectorType]->GetAllocationsMoved();
4634  pDefragmentationStats->bytesMoved += bytesMoved;
4635  pDefragmentationStats->allocationsMoved += allocationsMoved;
4636  VMA_ASSERT(bytesMoved <= maxBytesToMove);
4637  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
4638  maxBytesToMove -= bytesMoved;
4639  maxAllocationsToMove -= allocationsMoved;
4640  }
4641 
4642  // Free empty blocks.
4643  for(size_t blockIndex = pBlockVector->m_Blocks.size(); blockIndex--; )
4644  {
4645  VmaBlock* pBlock = pBlockVector->m_Blocks[blockIndex];
4646  if(pBlock->IsEmpty())
4647  {
4648  if(pDefragmentationStats != VMA_NULL)
4649  {
4650  ++pDefragmentationStats->deviceMemoryBlocksFreed;
4651  pDefragmentationStats->bytesFreed += pBlock->m_Size;
4652  }
4653 
4654  VectorRemove(pBlockVector->m_Blocks, blockIndex);
4655  pBlock->Destroy(this);
4656  vma_delete(this, pBlock);
4657  }
4658  }
4659 
4660  // All block vector types processed: we can be sure that all empty allocations have been freed.
4661  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_COUNT - 1)
4662  {
4663  m_HasEmptyBlock[memTypeIndex] = false;
4664  }
4665  }
4666  }
4667  }
4668 
4669  // Destroy defragmentators.
4670  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
4671  {
4672  for(size_t blockVectorType = VMA_BLOCK_VECTOR_TYPE_COUNT; blockVectorType--; )
4673  {
4674  vma_delete(this, pDefragmentators[memTypeIndex][blockVectorType]);
4675  }
4676  }
4677 
4678  return result;
4679 }
4680 
4681 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
4682 {
4683  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
4684  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
4685  pAllocationInfo->offset = hAllocation->GetOffset();
4686  pAllocationInfo->size = hAllocation->GetSize();
4687  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
4688  pAllocationInfo->pUserData = hAllocation->GetUserData();
4689 }
4690 
4691 void VmaAllocator_T::FreeOwnMemory(VmaAllocation allocation)
4692 {
4693  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_OWN);
4694 
4695  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
4696  {
4697  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4698  AllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex][allocation->GetBlockVectorType()];
4699  VMA_ASSERT(pOwnAllocations);
4700  VmaAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
4701  VmaAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
4702  VmaAllocation* const pOwnAllocationIt = VmaBinaryFindFirstNotLess(
4703  pOwnAllocationsBeg,
4704  pOwnAllocationsEnd,
4705  allocation,
4706  VmaPointerLess());
4707  if(pOwnAllocationIt != pOwnAllocationsEnd)
4708  {
4709  const size_t ownAllocationIndex = pOwnAllocationIt - pOwnAllocationsBeg;
4710  VectorRemove(*pOwnAllocations, ownAllocationIndex);
4711  }
4712  else
4713  {
4714  VMA_ASSERT(0);
4715  }
4716  }
4717 
4718  VkDeviceMemory hMemory = allocation->GetMemory();
4719 
4720  // Callback.
4721  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
4722  {
4723  (*m_DeviceMemoryCallbacks.pfnFree)(this, memTypeIndex, hMemory, allocation->GetSize());
4724  }
4725 
4726  if(allocation->GetMappedData() != VMA_NULL)
4727  {
4728  vkUnmapMemory(m_hDevice, hMemory);
4729  }
4730 
4731  vkFreeMemory(m_hDevice, hMemory, GetAllocationCallbacks());
4732 
4733  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
4734 
4735  vma_delete(this, allocation);
4736 }
4737 
4738 #if VMA_STATS_STRING_ENABLED
4739 
4740 void VmaAllocator_T::PrintDetailedMap(VmaStringBuilder& sb)
4741 {
4742  bool ownAllocationsStarted = false;
4743  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4744  {
4745  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex], m_UseMutex);
4746  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4747  {
4748  AllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex][blockVectorType];
4749  VMA_ASSERT(pOwnAllocVector);
4750  if(pOwnAllocVector->empty() == false)
4751  {
4752  if(ownAllocationsStarted)
4753  {
4754  sb.Add(",\n\t\"Type ");
4755  }
4756  else
4757  {
4758  sb.Add(",\n\"OwnAllocations\": {\n\t\"Type ");
4759  ownAllocationsStarted = true;
4760  }
4761  sb.AddNumber(memTypeIndex);
4762  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
4763  {
4764  sb.Add(" Mapped");
4765  }
4766  sb.Add("\": [");
4767 
4768  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
4769  {
4770  const VmaAllocation hAlloc = (*pOwnAllocVector)[i];
4771  if(i > 0)
4772  {
4773  sb.Add(",\n\t\t{ \"Size\": ");
4774  }
4775  else
4776  {
4777  sb.Add("\n\t\t{ \"Size\": ");
4778  }
4779  sb.AddNumber(hAlloc->GetSize());
4780  sb.Add(", \"Type\": ");
4781  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
4782  sb.Add(" }");
4783  }
4784 
4785  sb.Add("\n\t]");
4786  }
4787  }
4788  }
4789  if(ownAllocationsStarted)
4790  {
4791  sb.Add("\n}");
4792  }
4793 
4794  {
4795  bool allocationsStarted = false;
4796  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
4797  {
4798  VmaMutexLock globalAllocationsLock(m_BlocksMutex[memTypeIndex], m_UseMutex);
4799  for(uint32_t blockVectorType = 0; blockVectorType < VMA_BLOCK_VECTOR_TYPE_COUNT; ++blockVectorType)
4800  {
4801  if(m_pBlockVectors[memTypeIndex][blockVectorType]->IsEmpty() == false)
4802  {
4803  if(allocationsStarted)
4804  {
4805  sb.Add(",\n\t\"Type ");
4806  }
4807  else
4808  {
4809  sb.Add(",\n\"Allocations\": {\n\t\"Type ");
4810  allocationsStarted = true;
4811  }
4812  sb.AddNumber(memTypeIndex);
4813  if(blockVectorType == VMA_BLOCK_VECTOR_TYPE_MAPPED)
4814  {
4815  sb.Add(" Mapped");
4816  }
4817  sb.Add("\": [");
4818 
4819  m_pBlockVectors[memTypeIndex][blockVectorType]->PrintDetailedMap(sb);
4820 
4821  sb.Add("\n\t]");
4822  }
4823  }
4824  }
4825  if(allocationsStarted)
4826  {
4827  sb.Add("\n}");
4828  }
4829  }
4830 }
4831 
4832 #endif // #if VMA_STATS_STRING_ENABLED
4833 
4834 static VkResult AllocateMemoryForImage(
4835  VmaAllocator allocator,
4836  VkImage image,
4837  const VmaMemoryRequirements* pMemoryRequirements,
4838  VmaSuballocationType suballocType,
4839  VmaAllocation* pAllocation)
4840 {
4841  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pMemoryRequirements && pAllocation);
4842 
4843  VkMemoryRequirements vkMemReq = {};
4844  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
4845 
4846  return allocator->AllocateMemory(
4847  vkMemReq,
4848  *pMemoryRequirements,
4849  suballocType,
4850  pAllocation);
4851 }
4852 
4854 // Public interface
4855 
4856 VkResult vmaCreateAllocator(
4857  const VmaAllocatorCreateInfo* pCreateInfo,
4858  VmaAllocator* pAllocator)
4859 {
4860  VMA_ASSERT(pCreateInfo && pAllocator);
4861  VMA_DEBUG_LOG("vmaCreateAllocator");
4862  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
4863  return VK_SUCCESS;
4864 }
4865 
4866 void vmaDestroyAllocator(
4867  VmaAllocator allocator)
4868 {
4869  if(allocator != VK_NULL_HANDLE)
4870  {
4871  VMA_DEBUG_LOG("vmaDestroyAllocator");
4872  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
4873  vma_delete(&allocationCallbacks, allocator);
4874  }
4875 }
4876 
4878  VmaAllocator allocator,
4879  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
4880 {
4881  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
4882  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
4883 }
4884 
4886  VmaAllocator allocator,
4887  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
4888 {
4889  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
4890  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
4891 }
4892 
4894  VmaAllocator allocator,
4895  uint32_t memoryTypeIndex,
4896  VkMemoryPropertyFlags* pFlags)
4897 {
4898  VMA_ASSERT(allocator && pFlags);
4899  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
4900  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
4901 }
4902 
4903 void vmaCalculateStats(
4904  VmaAllocator allocator,
4905  VmaStats* pStats)
4906 {
4907  VMA_ASSERT(allocator && pStats);
4908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
4909  allocator->CalculateStats(pStats);
4910 }
4911 
4912 #if VMA_STATS_STRING_ENABLED
4913 
4914 void vmaBuildStatsString(
4915  VmaAllocator allocator,
4916  char** ppStatsString,
4917  VkBool32 detailedMap)
4918 {
4919  VMA_ASSERT(allocator && ppStatsString);
4920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
4921 
4922  VmaStringBuilder sb(allocator);
4923  {
4924  VmaStats stats;
4925  allocator->CalculateStats(&stats);
4926 
4927  sb.Add("{\n\"Total\": ");
4928  VmaPrintStatInfo(sb, stats.total);
4929 
4930  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
4931  {
4932  sb.Add(",\n\"Heap ");
4933  sb.AddNumber(heapIndex);
4934  sb.Add("\": {\n\t\"Size\": ");
4935  sb.AddNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
4936  sb.Add(",\n\t\"Flags\": ");
4937  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
4938  {
4939  sb.AddString("DEVICE_LOCAL");
4940  }
4941  else
4942  {
4943  sb.AddString("");
4944  }
4945  if(stats.memoryHeap[heapIndex].AllocationCount > 0)
4946  {
4947  sb.Add(",\n\t\"Stats:\": ");
4948  VmaPrintStatInfo(sb, stats.memoryHeap[heapIndex]);
4949  }
4950 
4951  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
4952  {
4953  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
4954  {
4955  sb.Add(",\n\t\"Type ");
4956  sb.AddNumber(typeIndex);
4957  sb.Add("\": {\n\t\t\"Flags\": \"");
4958  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
4959  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
4960  {
4961  sb.Add(" DEVICE_LOCAL");
4962  }
4963  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
4964  {
4965  sb.Add(" HOST_VISIBLE");
4966  }
4967  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
4968  {
4969  sb.Add(" HOST_COHERENT");
4970  }
4971  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
4972  {
4973  sb.Add(" HOST_CACHED");
4974  }
4975  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
4976  {
4977  sb.Add(" LAZILY_ALLOCATED");
4978  }
4979  sb.Add("\"");
4980  if(stats.memoryType[typeIndex].AllocationCount > 0)
4981  {
4982  sb.Add(",\n\t\t\"Stats\": ");
4983  VmaPrintStatInfo(sb, stats.memoryType[typeIndex]);
4984  }
4985  sb.Add("\n\t}");
4986  }
4987  }
4988  sb.Add("\n}");
4989  }
4990  if(detailedMap == VK_TRUE)
4991  {
4992  allocator->PrintDetailedMap(sb);
4993  }
4994  sb.Add("\n}\n");
4995  }
4996 
4997  const size_t len = sb.GetLength();
4998  char* const pChars = vma_new_array(allocator, char, len + 1);
4999  if(len > 0)
5000  {
5001  memcpy(pChars, sb.GetData(), len);
5002  }
5003  pChars[len] = '\0';
5004  *ppStatsString = pChars;
5005 }
5006 
5007 void vmaFreeStatsString(
5008  VmaAllocator allocator,
5009  char* pStatsString)
5010 {
5011  if(pStatsString != VMA_NULL)
5012  {
5013  VMA_ASSERT(allocator);
5014  size_t len = strlen(pStatsString);
5015  vma_delete_array(allocator, pStatsString, len + 1);
5016  }
5017 }
5018 
5019 #endif // #if VMA_STATS_STRING_ENABLED
5020 
5023 VkResult vmaFindMemoryTypeIndex(
5024  VmaAllocator allocator,
5025  uint32_t memoryTypeBits,
5026  const VmaMemoryRequirements* pMemoryRequirements,
5027  uint32_t* pMemoryTypeIndex)
5028 {
5029  VMA_ASSERT(allocator != VK_NULL_HANDLE);
5030  VMA_ASSERT(pMemoryRequirements != VMA_NULL);
5031  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
5032 
5033  uint32_t requiredFlags = pMemoryRequirements->requiredFlags;
5034  uint32_t preferredFlags = pMemoryRequirements->preferredFlags;
5035  if(preferredFlags == 0)
5036  {
5037  preferredFlags = requiredFlags;
5038  }
5039  // preferredFlags, if not 0, must be a superset of requiredFlags.
5040  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
5041 
5042  // Convert usage to requiredFlags and preferredFlags.
5043  switch(pMemoryRequirements->usage)
5044  {
5046  break;
5048  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
5049  break;
5051  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
5052  break;
5054  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5055  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
5056  break;
5058  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5059  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
5060  break;
5061  default:
5062  break;
5063  }
5064 
5065  if((pMemoryRequirements->flags & VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT) != 0)
5066  {
5067  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5068  }
5069 
5070  *pMemoryTypeIndex = UINT32_MAX;
5071  uint32_t minCost = UINT32_MAX;
5072  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
5073  memTypeIndex < allocator->GetMemoryTypeCount();
5074  ++memTypeIndex, memTypeBit <<= 1)
5075  {
5076  // This memory type is acceptable according to memoryTypeBits bitmask.
5077  if((memTypeBit & memoryTypeBits) != 0)
5078  {
5079  const VkMemoryPropertyFlags currFlags =
5080  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
5081  // This memory type contains requiredFlags.
5082  if((requiredFlags & ~currFlags) == 0)
5083  {
5084  // Calculate cost as number of bits from preferredFlags not present in this memory type.
5085  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
5086  // Remember memory type with lowest cost.
5087  if(currCost < minCost)
5088  {
5089  *pMemoryTypeIndex = memTypeIndex;
5090  if(currCost == 0)
5091  {
5092  return VK_SUCCESS;
5093  }
5094  minCost = currCost;
5095  }
5096  }
5097  }
5098  }
5099  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
5100 }
5101 
5102 VkResult vmaAllocateMemory(
5103  VmaAllocator allocator,
5104  const VkMemoryRequirements* pVkMemoryRequirements,
5105  const VmaMemoryRequirements* pVmaMemoryRequirements,
5106  VmaAllocation* pAllocation,
5107  VmaAllocationInfo* pAllocationInfo)
5108 {
5109  VMA_ASSERT(allocator && pVkMemoryRequirements && pVmaMemoryRequirements && pAllocation);
5110 
5111  VMA_DEBUG_LOG("vmaAllocateMemory");
5112 
5113  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5114 
5115  return allocator->AllocateMemory(
5116  *pVkMemoryRequirements,
5117  *pVmaMemoryRequirements,
5118  VMA_SUBALLOCATION_TYPE_UNKNOWN,
5119  pAllocation);
5120 
5121  if(pAllocationInfo)
5122  {
5123  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5124  }
5125 }
5126 
5128  VmaAllocator allocator,
5129  VkBuffer buffer,
5130  const VmaMemoryRequirements* pMemoryRequirements,
5131  VmaAllocation* pAllocation,
5132  VmaAllocationInfo* pAllocationInfo)
5133 {
5134  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pMemoryRequirements && pAllocation);
5135 
5136  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
5137 
5138  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5139 
5140  VkMemoryRequirements vkMemReq = {};
5141  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
5142 
5143  return allocator->AllocateMemory(
5144  vkMemReq,
5145  *pMemoryRequirements,
5146  VMA_SUBALLOCATION_TYPE_BUFFER,
5147  pAllocation);
5148 
5149  if(pAllocationInfo)
5150  {
5151  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5152  }
5153 }
5154 
5155 VkResult vmaAllocateMemoryForImage(
5156  VmaAllocator allocator,
5157  VkImage image,
5158  const VmaMemoryRequirements* pMemoryRequirements,
5159  VmaAllocation* pAllocation,
5160  VmaAllocationInfo* pAllocationInfo)
5161 {
5162  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements && pAllocation);
5163 
5164  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
5165 
5166  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5167 
5168  return AllocateMemoryForImage(
5169  allocator,
5170  image,
5171  pMemoryRequirements,
5172  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
5173  pAllocation);
5174 
5175  if(pAllocationInfo)
5176  {
5177  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5178  }
5179 }
5180 
5181 void vmaFreeMemory(
5182  VmaAllocator allocator,
5183  VmaAllocation allocation)
5184 {
5185  VMA_ASSERT(allocator && allocation);
5186 
5187  VMA_DEBUG_LOG("vmaFreeMemory");
5188 
5189  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5190 
5191  allocator->FreeMemory(allocation);
5192 }
5193 
5195  VmaAllocator allocator,
5196  VmaAllocation allocation,
5197  VmaAllocationInfo* pAllocationInfo)
5198 {
5199  VMA_ASSERT(allocator && allocation && pAllocationInfo);
5200 
5201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5202 
5203  allocator->GetAllocationInfo(allocation, pAllocationInfo);
5204 }
5205 
5207  VmaAllocator allocator,
5208  VmaAllocation allocation,
5209  void* pUserData)
5210 {
5211  VMA_ASSERT(allocator && allocation);
5212 
5213  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5214 
5215  allocation->SetUserData(pUserData);
5216 }
5217 
5218 VkResult vmaMapMemory(
5219  VmaAllocator allocator,
5220  VmaAllocation allocation,
5221  void** ppData)
5222 {
5223  VMA_ASSERT(allocator && allocation && ppData);
5224 
5225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5226 
5227  return vkMapMemory(allocator->m_hDevice, allocation->GetMemory(),
5228  allocation->GetOffset(), allocation->GetSize(), 0, ppData);
5229 }
5230 
5231 void vmaUnmapMemory(
5232  VmaAllocator allocator,
5233  VmaAllocation allocation)
5234 {
5235  VMA_ASSERT(allocator && allocation);
5236 
5237  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5238 
5239  vkUnmapMemory(allocator->m_hDevice, allocation->GetMemory());
5240 }
5241 
5242 void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
5243 {
5244  VMA_ASSERT(allocator);
5245 
5246  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5247 
5248  allocator->UnmapPersistentlyMappedMemory();
5249 }
5250 
5251 VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
5252 {
5253  VMA_ASSERT(allocator);
5254 
5255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5256 
5257  return allocator->MapPersistentlyMappedMemory();
5258 }
5259 
5260 VkResult vmaDefragment(
5261  VmaAllocator allocator,
5262  VmaAllocation* pAllocations,
5263  size_t allocationCount,
5264  VkBool32* pAllocationsChanged,
5265  const VmaDefragmentationInfo *pDefragmentationInfo,
5266  VmaDefragmentationStats* pDefragmentationStats)
5267 {
5268  VMA_ASSERT(allocator && pAllocations);
5269 
5270  VMA_DEBUG_LOG("vmaDefragment");
5271 
5272  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5273 
5274  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
5275 }
5276 
5277 VkResult vmaCreateBuffer(
5278  VmaAllocator allocator,
5279  const VkBufferCreateInfo* pCreateInfo,
5280  const VmaMemoryRequirements* pMemoryRequirements,
5281  VkBuffer* pBuffer,
5282  VmaAllocation* pAllocation,
5283  VmaAllocationInfo* pAllocationInfo)
5284 {
5285  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements && pBuffer && pAllocation);
5286 
5287  VMA_DEBUG_LOG("vmaCreateBuffer");
5288 
5289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5290 
5291  *pBuffer = VK_NULL_HANDLE;
5292  *pAllocation = VK_NULL_HANDLE;
5293 
5294  // 1. Create VkBuffer.
5295  VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
5296  if(res >= 0)
5297  {
5298  // 2. vkGetBufferMemoryRequirements.
5299  VkMemoryRequirements vkMemReq = {};
5300  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
5301 
5302  // 3. Allocate memory using allocator.
5303  res = allocator->AllocateMemory(
5304  vkMemReq,
5305  *pMemoryRequirements,
5306  VMA_SUBALLOCATION_TYPE_BUFFER,
5307  pAllocation);
5308  if(res >= 0)
5309  {
5310  // 3. Bind buffer with memory.
5311  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
5312  if(res >= 0)
5313  {
5314  // All steps succeeded.
5315  if(pAllocationInfo != VMA_NULL)
5316  {
5317  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5318  }
5319  return VK_SUCCESS;
5320  }
5321  allocator->FreeMemory(*pAllocation);
5322  *pAllocation = VK_NULL_HANDLE;
5323  return res;
5324  }
5325  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
5326  *pBuffer = VK_NULL_HANDLE;
5327  return res;
5328  }
5329  return res;
5330 }
5331 
5332 void vmaDestroyBuffer(
5333  VmaAllocator allocator,
5334  VkBuffer buffer,
5335  VmaAllocation allocation)
5336 {
5337  if(buffer != VK_NULL_HANDLE)
5338  {
5339  VMA_ASSERT(allocator);
5340 
5341  VMA_DEBUG_LOG("vmaDestroyBuffer");
5342 
5343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5344 
5345  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
5346 
5347  allocator->FreeMemory(allocation);
5348  }
5349 }
5350 
5351 VkResult vmaCreateImage(
5352  VmaAllocator allocator,
5353  const VkImageCreateInfo* pCreateInfo,
5354  const VmaMemoryRequirements* pMemoryRequirements,
5355  VkImage* pImage,
5356  VmaAllocation* pAllocation,
5357  VmaAllocationInfo* pAllocationInfo)
5358 {
5359  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements && pImage && pAllocation);
5360 
5361  VMA_DEBUG_LOG("vmaCreateImage");
5362 
5363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5364 
5365  // 1. Create VkImage.
5366  VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage);
5367  if(res >= 0)
5368  {
5369  VkMappedMemoryRange mem = {};
5370  VmaSuballocationType suballocType = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
5371  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
5372  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
5373 
5374  // 2. Allocate memory using allocator.
5375  res = AllocateMemoryForImage(allocator, *pImage, pMemoryRequirements, suballocType, pAllocation);
5376  if(res >= 0)
5377  {
5378  // 3. Bind image with memory.
5379  res = vkBindImageMemory(allocator->m_hDevice, *pImage, (*pAllocation)->GetMemory(), (*pAllocation)->GetOffset());
5380  if(res >= 0)
5381  {
5382  // All steps succeeded.
5383  if(pAllocationInfo != VMA_NULL)
5384  {
5385  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
5386  }
5387  return VK_SUCCESS;
5388  }
5389  allocator->FreeMemory(*pAllocation);
5390  *pAllocation = VK_NULL_HANDLE;
5391  return res;
5392  }
5393  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
5394  *pImage = VK_NULL_HANDLE;
5395  return res;
5396  }
5397  return res;
5398 }
5399 
5400 void vmaDestroyImage(
5401  VmaAllocator allocator,
5402  VkImage image,
5403  VmaAllocation allocation)
5404 {
5405  if(image != VK_NULL_HANDLE)
5406  {
5407  VMA_ASSERT(allocator);
5408 
5409  VMA_DEBUG_LOG("vmaDestroyImage");
5410 
5411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
5412 
5413  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
5414 
5415  allocator->FreeMemory(allocation);
5416  }
5417 }
5418 
5419 #endif // #ifdef VMA_IMPLEMENTATION
VmaMemoryRequirementFlagBits
Flags to be passed as VmaMemoryRequirements::flags.
Definition: vk_mem_alloc.h:336
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:345
struct VmaMemoryRequirements VmaMemoryRequirements
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:214
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Memory will be used for writing on device and readback on host.
Definition: vk_mem_alloc.h:331
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:374
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:431
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:567
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation.
void vmaUnmapPersistentlyMappedMemory(VmaAllocator allocator)
Unmaps persistently mapped memory of types that is HOST_COHERENT and DEVICE_LOCAL.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:177
const VkAllocationCallbacks * pAllocationCallbacks
Custom allocation callbacks.
Definition: vk_mem_alloc.h:226
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaMemoryRequirements *pMemoryRequirements, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:208
VkDeviceSize preferredSmallHeapBlockSize
Size of a single memory block to allocate for resources from a small heap <= 512 MB.
Definition: vk_mem_alloc.h:223
VmaMemoryRequirementFlags flags
Definition: vk_mem_alloc.h:369
VkFlags VmaAllocatorFlags
Definition: vk_mem_alloc.h:205
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:571
VmaStatInfo total
Definition: vk_mem_alloc.h:284
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:579
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:562
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:217
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:441
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:354
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:557
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:575
Definition: vk_mem_alloc.h:363
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:385
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:280
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:383
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:577
VmaMemoryUsage
Definition: vk_mem_alloc.h:317
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:201
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VmaAllocatorFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:196
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:268
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:188
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:378
Definition: vk_mem_alloc.h:367
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:192
VkResult vmaMapPersistentlyMappedMemory(VmaAllocator allocator)
Maps back persistently mapped memory of types that is HOST_COHERENT and DEVICE_LOCAL.
VkFlags VmaMemoryRequirementFlags
Definition: vk_mem_alloc.h:365
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:276
VkDeviceSize SuballocationSizeMax
Definition: vk_mem_alloc.h:275
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:171
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaMemoryRequirements *pVmaMemoryRequirements, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:229
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:282
uint32_t AllocationCount
Definition: vk_mem_alloc.h:270
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:190
VmaAllocatorFlags flags
Flags for created allocator. Use VmaAllocatorFlagBits enum.
Definition: vk_mem_alloc.h:211
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Definition: vk_mem_alloc.h:273
void * pUserData
Custom general-purpose pointer that was passed as VmaMemoryRequirements::pUserData or set using vmaSe...
Definition: vk_mem_alloc.h:452
VkDeviceSize preferredLargeHeapBlockSize
Size of a single memory block to allocate for resources.
Definition: vk_mem_alloc.h:220
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaMemoryRequirements *pMemoryRequirements, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
uint32_t UnusedRangeCount
Definition: vk_mem_alloc.h:272
Memory will be mapped on host. Could be used for transfer to device.
Definition: vk_mem_alloc.h:325
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
uint32_t SuballocationCount
Definition: vk_mem_alloc.h:271
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:276
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:436
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:573
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkDeviceSize SuballocationSizeMin
Definition: vk_mem_alloc.h:275
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
void * pMappedData
Pointer to the beginning of this allocation as mapped data. Null if this alloaction is not persistent...
Definition: vk_mem_alloc.h:447
VkDeviceSize SuballocationSizeAvg
Definition: vk_mem_alloc.h:275
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
Set to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:361
No intended memory usage specified.
Definition: vk_mem_alloc.h:320
Definition: vk_mem_alloc.h:332
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:419
Memory will be used for frequent (dynamic) updates from host and reads on device. ...
Definition: vk_mem_alloc.h:328
Definition: vk_mem_alloc.h:203
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Memory will be used on device only, no need to be mapped on host.
Definition: vk_mem_alloc.h:322
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Definition: vk_mem_alloc.h:274
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:283
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaMemoryRequirements *pMemoryRequirements, uint32_t *pMemoryTypeIndex)
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:276
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:424