Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
151 #include <vulkan/vulkan.h>
152 
154 
158 VK_DEFINE_HANDLE(VmaAllocator)
159 
160 typedef struct VmaAllocatorCreateInfo
162 {
164 
165  VkPhysicalDevice physicalDevice;
167 
168  VkDevice device;
170 
173 
176 
177  const VkAllocationCallbacks* pAllocationCallbacks;
179 
181 VkResult vmaCreateAllocator(
182  const VmaAllocatorCreateInfo* pCreateInfo,
183  VmaAllocator* pAllocator);
184 
187  VmaAllocator allocator);
188 
194  VmaAllocator allocator,
195  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
196 
202  VmaAllocator allocator,
203  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
204 
212  VmaAllocator allocator,
213  uint32_t memoryTypeIndex,
214  VkMemoryPropertyFlags* pFlags);
215 
216 typedef struct VmaStatInfo
217 {
218  uint32_t AllocationCount;
221  VkDeviceSize UsedBytes;
222  VkDeviceSize UnusedBytes;
223  VkDeviceSize SuballocationSizeMin, SuballocationSizeAvg, SuballocationSizeMax;
224  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
225 } VmaStatInfo;
226 
228 struct VmaStats
229 {
230  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
231  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
233 };
234 
236 void vmaCalculateStats(
237  VmaAllocator allocator,
238  VmaStats* pStats);
239 
240 #define VMA_STATS_STRING_ENABLED 1
241 
242 #if VMA_STATS_STRING_ENABLED
243 
245 
248  VmaAllocator allocator,
249  char** ppStatsString,
250  VkBool32 detailedMap);
251 
252 void vmaFreeStatsString(
253  VmaAllocator allocator,
254  char* pStatsString);
255 
256 #endif // #if VMA_STATS_STRING_ENABLED
257 
260 
265 typedef enum VmaMemoryUsage
266 {
279 
280 typedef struct VmaMemoryRequirements
281 {
290  VkBool32 ownMemory;
299  VkMemoryPropertyFlags requiredFlags;
304  VkMemoryPropertyFlags preferredFlags;
311  VkBool32 neverAllocate;
313 
328 VkResult vmaFindMemoryTypeIndex(
329  VmaAllocator allocator,
330  uint32_t memoryTypeBits,
331  const VmaMemoryRequirements* pMemoryRequirements,
332  uint32_t* pMemoryTypeIndex);
333 
336 
353 VkResult vmaAllocateMemory(
354  VmaAllocator allocator,
355  const VkMemoryRequirements* pVkMemoryRequirements,
356  const VmaMemoryRequirements* pVmaMemoryRequirements,
357  VkMappedMemoryRange* pMemory,
358  uint32_t* pMemoryTypeIndex);
359 
368  VmaAllocator allocator,
369  VkBuffer buffer,
370  const VmaMemoryRequirements* pMemoryRequirements,
371  VkMappedMemoryRange* pMemory,
372  uint32_t* pMemoryTypeIndex);
373 
376  VmaAllocator allocator,
377  VkImage image,
378  const VmaMemoryRequirements* pMemoryRequirements,
379  VkMappedMemoryRange* pMemory,
380  uint32_t* pMemoryTypeIndex);
381 
383 void vmaFreeMemory(
384  VmaAllocator allocator,
385  const VkMappedMemoryRange* pMemory);
386 
392 VkResult vmaMapMemory(
393  VmaAllocator allocator,
394  const VkMappedMemoryRange* pMemory,
395  void** ppData);
396 
397 void vmaUnmapMemory(
398  VmaAllocator allocator,
399  const VkMappedMemoryRange* pMemory);
400 
403 
425 VkResult vmaCreateBuffer(
426  VmaAllocator allocator,
427  const VkBufferCreateInfo* pCreateInfo,
428  const VmaMemoryRequirements* pMemoryRequirements,
429  VkBuffer* pBuffer,
430  VkMappedMemoryRange* pMemory,
431  uint32_t* pMemoryTypeIndex);
432 
433 void vmaDestroyBuffer(
434  VmaAllocator allocator,
435  VkBuffer buffer);
436 
438 VkResult vmaCreateImage(
439  VmaAllocator allocator,
440  const VkImageCreateInfo* pCreateInfo,
441  const VmaMemoryRequirements* pMemoryRequirements,
442  VkImage* pImage,
443  VkMappedMemoryRange* pMemory,
444  uint32_t* pMemoryTypeIndex);
445 
446 void vmaDestroyImage(
447  VmaAllocator allocator,
448  VkImage image);
449 
452 #ifdef VMA_IMPLEMENTATION
453 
454 #include <cstdint>
455 #include <cstdlib>
456 
457 /*******************************************************************************
458 CONFIGURATION SECTION
459 
460 Define some of these macros before each #include of this header or change them
461 here if you need other then default behavior depending on your environment.
462 */
463 
464 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
465 //#define VMA_USE_STL_CONTAINERS 1
466 
467 /* Set this macro to 1 to make the library including and using STL containers:
468 std::pair, std::vector, std::list, std::unordered_map.
469 
470 Set it to 0 or undefined to make the library using its own implementation of
471 the containers.
472 */
473 #if VMA_USE_STL_CONTAINERS
474  #define VMA_USE_STL_VECTOR 1
475  #define VMA_USE_STL_UNORDERED_MAP 1
476  #define VMA_USE_STL_LIST 1
477 #endif
478 
479 #if VMA_USE_STL_VECTOR
480  #include <vector>
481 #endif
482 
483 #if VMA_USE_STL_UNORDERED_MAP
484  #include <unordered_map>
485 #endif
486 
487 #if VMA_USE_STL_LIST
488  #include <list>
489 #endif
490 
491 /*
492 Following headers are used in this CONFIGURATION section only, so feel free to
493 remove them if not needed.
494 */
495 #include <cassert> // for assert
496 #include <algorithm> // for min, max
497 #include <mutex> // for std::mutex
498 
499 #if !defined(_WIN32)
500  #include <malloc.h> // for aligned_alloc()
501 #endif
502 
503 // Normal assert to check for programmer's errors, especially in Debug configuration.
504 #ifndef VMA_ASSERT
505  #ifdef _DEBUG
506  #define VMA_ASSERT(expr) assert(expr)
507  #else
508  #define VMA_ASSERT(expr)
509  #endif
510 #endif
511 
512 // Assert that will be called very often, like inside data structures e.g. operator[].
513 // Making it non-empty can make program slow.
514 #ifndef VMA_HEAVY_ASSERT
515  #ifdef _DEBUG
516  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
517  #else
518  #define VMA_HEAVY_ASSERT(expr)
519  #endif
520 #endif
521 
522 #ifndef VMA_NULL
523  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
524  #define VMA_NULL nullptr
525 #endif
526 
527 #ifndef VMA_ALIGN_OF
528  #define VMA_ALIGN_OF(type) (__alignof(type))
529 #endif
530 
531 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
532  #if defined(_WIN32)
533  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
534  #else
535  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
536  #endif
537 #endif
538 
539 #ifndef VMA_SYSTEM_FREE
540  #if defined(_WIN32)
541  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
542  #else
543  #define VMA_SYSTEM_FREE(ptr) free(ptr)
544  #endif
545 #endif
546 
547 #ifndef VMA_MIN
548  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
549 #endif
550 
551 #ifndef VMA_MAX
552  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
553 #endif
554 
555 #ifndef VMA_SWAP
556  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
557 #endif
558 
559 #ifndef VMA_DEBUG_LOG
560  #define VMA_DEBUG_LOG(format, ...)
561  /*
562  #define VMA_DEBUG_LOG(format, ...) do { \
563  printf(format, __VA_ARGS__); \
564  printf("\n"); \
565  } while(false)
566  */
567 #endif
568 
569 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
570 #if VMA_STATS_STRING_ENABLED
571  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
572  {
573  _ultoa_s(num, outStr, strLen, 10);
574  }
575  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
576  {
577  _ui64toa_s(num, outStr, strLen, 10);
578  }
579 #endif
580 
581 #ifndef VMA_MUTEX
582  class VmaMutex
583  {
584  public:
585  VmaMutex() { }
586  ~VmaMutex() { }
587  void Lock() { m_Mutex.lock(); }
588  void Unlock() { m_Mutex.unlock(); }
589  private:
590  std::mutex m_Mutex;
591  };
592  #define VMA_MUTEX VmaMutex
593 #endif
594 
595 #ifndef VMA_BEST_FIT
596 
608  #define VMA_BEST_FIT (1)
609 #endif
610 
611 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
612 
616  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
617 #endif
618 
619 #ifndef VMA_DEBUG_ALIGNMENT
620 
624  #define VMA_DEBUG_ALIGNMENT (1)
625 #endif
626 
627 #ifndef VMA_DEBUG_MARGIN
628 
632  #define VMA_DEBUG_MARGIN (0)
633 #endif
634 
635 #ifndef VMA_DEBUG_GLOBAL_MUTEX
636 
640  #define VMA_DEBUG_GLOBAL_MUTEX (0)
641 #endif
642 
643 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
644 
648  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
649 #endif
650 
651 #ifndef VMA_SMALL_HEAP_MAX_SIZE
652  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
654 #endif
655 
656 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
657  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
659 #endif
660 
661 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
662  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
664 #endif
665 
666 /*******************************************************************************
667 END OF CONFIGURATION
668 */
669 
670 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
671  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
672 
673 // Returns number of bits set to 1 in (v).
674 static inline uint32_t CountBitsSet(uint32_t v)
675 {
676  uint32_t c = v - ((v >> 1) & 0x55555555);
677  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
678  c = ((c >> 4) + c) & 0x0F0F0F0F;
679  c = ((c >> 8) + c) & 0x00FF00FF;
680  c = ((c >> 16) + c) & 0x0000FFFF;
681  return c;
682 }
683 
684 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
685 // Use types like uint32_t, uint64_t as T.
686 template <typename T>
687 static inline T VmaAlignUp(T val, T align)
688 {
689  return (val + align - 1) / align * align;
690 }
691 
692 // Division with mathematical rounding to nearest number.
693 template <typename T>
694 inline T VmaRoundDiv(T x, T y)
695 {
696  return (x + (y / (T)2)) / y;
697 }
698 /*
699 Returns true if two memory blocks occupy overlapping pages.
700 ResourceA must be in less memory offset than ResourceB.
701 
702 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
703 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
704 */
705 static inline bool VmaBlocksOnSamePage(
706  VkDeviceSize resourceAOffset,
707  VkDeviceSize resourceASize,
708  VkDeviceSize resourceBOffset,
709  VkDeviceSize pageSize)
710 {
711  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
712  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
713  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
714  VkDeviceSize resourceBStart = resourceBOffset;
715  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
716  return resourceAEndPage == resourceBStartPage;
717 }
718 
719 enum VmaSuballocationType
720 {
721  VMA_SUBALLOCATION_TYPE_FREE = 0,
722  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
723  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
724  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
725  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
726  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
727  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
728 };
729 
730 /*
731 Returns true if given suballocation types could conflict and must respect
732 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
733 or linear image and another one is optimal image. If type is unknown, behave
734 conservatively.
735 */
736 static inline bool VmaIsBufferImageGranularityConflict(
737  VmaSuballocationType suballocType1,
738  VmaSuballocationType suballocType2)
739 {
740  if(suballocType1 > suballocType2)
741  VMA_SWAP(suballocType1, suballocType2);
742 
743  switch(suballocType1)
744  {
745  case VMA_SUBALLOCATION_TYPE_FREE:
746  return false;
747  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
748  return true;
749  case VMA_SUBALLOCATION_TYPE_BUFFER:
750  return
751  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
752  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
753  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
754  return
755  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
756  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
757  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
758  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
759  return
760  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
761  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
762  return false;
763  default:
764  VMA_ASSERT(0);
765  return true;
766  }
767 }
768 
769 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
770 struct VmaMutexLock
771 {
772 public:
773  VmaMutexLock(VMA_MUTEX& mutex) : m_Mutex(mutex) { mutex.Lock(); }
774  ~VmaMutexLock() { m_Mutex.Unlock(); }
775 
776 private:
777  VMA_MUTEX& m_Mutex;
778 };
779 
780 #if VMA_DEBUG_GLOBAL_MUTEX
781  static VMA_MUTEX gDebugGlobalMutex;
782  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex);
783 #else
784  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
785 #endif
786 
787 // Minimum size of a free suballocation to register it in the free suballocation collection.
788 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
789 
790 /*
791 Performs binary search and returns iterator to first element that is greater or
792 equal to (key), according to comparison (cmp).
793 
794 Cmp should return true if first argument is less than second argument.
795 
796 Returned value is the found element, if present in the collection or place where
797 new element with value (key) should be inserted.
798 */
799 template <typename IterT, typename KeyT, typename CmpT>
800 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
801 {
802  size_t down = 0, up = (end - beg);
803  while(down < up)
804  {
805  const size_t mid = (down + up) / 2;
806  if(cmp(*(beg+mid), key))
807  down = mid + 1;
808  else
809  up = mid;
810  }
811  return beg + down;
812 }
813 
815 // Memory allocation
816 
817 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
818 {
819  if((pAllocationCallbacks != VMA_NULL) &&
820  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
821  {
822  return (*pAllocationCallbacks->pfnAllocation)(
823  pAllocationCallbacks->pUserData,
824  size,
825  alignment,
826  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
827  }
828  else
829  {
830  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
831  }
832 }
833 
834 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
835 {
836  if((pAllocationCallbacks != VMA_NULL) &&
837  (pAllocationCallbacks->pfnFree != VMA_NULL))
838  {
839  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
840  }
841  else
842  {
843  VMA_SYSTEM_FREE(ptr);
844  }
845 }
846 
847 template<typename T>
848 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
849 {
850  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
851 }
852 
853 template<typename T>
854 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
855 {
856  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
857 }
858 
859 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
860 
861 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
862 
863 template<typename T>
864 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
865 {
866  ptr->~T();
867  VmaFree(pAllocationCallbacks, ptr);
868 }
869 
870 template<typename T>
871 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
872 {
873  if(ptr != VMA_NULL)
874  {
875  for(size_t i = count; i--; )
876  ptr[i].~T();
877  VmaFree(pAllocationCallbacks, ptr);
878  }
879 }
880 
881 // STL-compatible allocator.
882 template<typename T>
883 class VmaStlAllocator
884 {
885 public:
886  const VkAllocationCallbacks* const m_pCallbacks;
887  typedef T value_type;
888 
889  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
890  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
891 
892  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
893  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
894 
895  template<typename U>
896  bool operator==(const VmaStlAllocator<U>& rhs) const
897  {
898  return m_pCallbacks == rhs.m_pCallbacks;
899  }
900  template<typename U>
901  bool operator!=(const VmaStlAllocator<U>& rhs) const
902  {
903  return m_pCallbacks != rhs.m_pCallbacks;
904  }
905 
906  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
907 };
908 
909 #if VMA_USE_STL_VECTOR
910 
911 #define VmaVector std::vector
912 
913 template<typename T, typename allocatorT>
914 static void VectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
915 {
916  vec.insert(vec.begin() + index, item);
917 }
918 
919 template<typename T, typename allocatorT>
920 static void VectorRemove(std::vector<T, allocatorT>& vec, size_t index)
921 {
922  vec.erase(vec.begin() + index);
923 }
924 
925 #else // #if VMA_USE_STL_VECTOR
926 
927 /* Class with interface compatible with subset of std::vector.
928 T must be POD because constructors and destructors are not called and memcpy is
929 used for these objects. */
930 template<typename T, typename AllocatorT>
931 class VmaVector
932 {
933 public:
934  VmaVector(const AllocatorT& allocator) :
935  m_Allocator(allocator),
936  m_pArray(VMA_NULL),
937  m_Count(0),
938  m_Capacity(0)
939  {
940  }
941 
942  VmaVector(AllocatorT& allocator) :
943  m_Allocator(allocator),
944  m_pArray(VMA_NULL),
945  m_Count(0),
946  m_Capacity(0)
947  {
948  }
949 
950  VmaVector(size_t count, AllocatorT& allocator) :
951  m_Allocator(allocator),
952  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, count) : VMA_NULL),
953  m_Count(count),
954  m_Capacity(count)
955  {
956  }
957 
958  VmaVector(const VmaVector<T, AllocatorT>& src) :
959  m_Allocator(src.m_Allocator),
960  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src->m_pCallbacks, src.m_Count) : VMA_NULL),
961  m_Count(src.m_Count),
962  m_Capacity(src.m_Count)
963  {
964  if(m_Count != 0)
965  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
966  }
967 
968  ~VmaVector()
969  {
970  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
971  }
972 
973  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
974  {
975  if(&rhs != this)
976  {
977  Resize(rhs.m_Count);
978  if(m_Count != 0)
979  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
980  }
981  return *this;
982  }
983 
984  bool empty() const { return m_Count == 0; }
985  size_t size() const { return m_Count; }
986  T* data() { return m_pArray; }
987  const T* data() const { return m_pArray; }
988 
989  T& operator[](size_t index)
990  {
991  VMA_HEAVY_ASSERT(index < m_Count);
992  return m_pArray[index];
993  }
994  const T& operator[](size_t index) const
995  {
996  VMA_HEAVY_ASSERT(index < m_Count);
997  return m_pArray[index];
998  }
999 
1000  T& front()
1001  {
1002  VMA_HEAVY_ASSERT(m_Count > 0);
1003  return m_pArray[0];
1004  }
1005  const T& front() const
1006  {
1007  VMA_HEAVY_ASSERT(m_Count > 0);
1008  return m_pArray[0];
1009  }
1010  T& back()
1011  {
1012  VMA_HEAVY_ASSERT(m_Count > 0);
1013  return m_pArray[m_Count - 1];
1014  }
1015  const T& back() const
1016  {
1017  VMA_HEAVY_ASSERT(m_Count > 0);
1018  return m_pArray[m_Count - 1];
1019  }
1020 
1021  void reserve(size_t newCapacity, bool freeMemory = false)
1022  {
1023  newCapacity = VMA_MAX(newCapacity, m_Count);
1024 
1025  if((newCapacity < m_Capacity) && !freeMemory)
1026  newCapacity = m_Capacity;
1027 
1028  if(newCapacity != m_Capacity)
1029  {
1030  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1031  if(m_Count != 0)
1032  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1033  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1034  m_Capacity = newCapacity;
1035  m_pArray = newArray;
1036  }
1037  }
1038 
1039  void resize(size_t newCount, bool freeMemory = false)
1040  {
1041  size_t newCapacity = m_Capacity;
1042  if(newCount > m_Capacity)
1043  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1044  else if(freeMemory)
1045  newCapacity = newCount;
1046 
1047  if(newCapacity != m_Capacity)
1048  {
1049  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1050  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1051  if(elementsToCopy != 0)
1052  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1053  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1054  m_Capacity = newCapacity;
1055  m_pArray = newArray;
1056  }
1057 
1058  m_Count = newCount;
1059  }
1060 
1061  void clear(bool freeMemory = false)
1062  {
1063  resize(0, freeMemory);
1064  }
1065 
1066  void insert(size_t index, const T& src)
1067  {
1068  VMA_HEAVY_ASSERT(index <= m_Count);
1069  const size_t oldCount = size();
1070  resize(oldCount + 1);
1071  if(index < oldCount)
1072  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1073  m_pArray[index] = src;
1074  }
1075 
1076  void remove(size_t index)
1077  {
1078  VMA_HEAVY_ASSERT(index < m_Count);
1079  const size_t oldCount = size();
1080  if(index < oldCount - 1)
1081  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1082  resize(oldCount - 1);
1083  }
1084 
1085  void push_back(const T& src)
1086  {
1087  const size_t newIndex = size();
1088  resize(newIndex + 1);
1089  m_pArray[newIndex] = src;
1090  }
1091 
1092  void pop_back()
1093  {
1094  VMA_HEAVY_ASSERT(m_Count > 0);
1095  resize(size() - 1);
1096  }
1097 
1098  void push_front(const T& src)
1099  {
1100  insert(0, src);
1101  }
1102 
1103  void pop_front()
1104  {
1105  VMA_HEAVY_ASSERT(m_Count > 0);
1106  remove(0);
1107  }
1108 
1109  typedef T* iterator;
1110 
1111  iterator begin() { return m_pArray; }
1112  iterator end() { return m_pArray + m_Count; }
1113 
1114 private:
1115  AllocatorT m_Allocator;
1116  T* m_pArray;
1117  size_t m_Count;
1118  size_t m_Capacity;
1119 };
1120 
1121 template<typename T, typename allocatorT>
1122 static void VectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1123 {
1124  vec.insert(index, item);
1125 }
1126 
1127 template<typename T, typename allocatorT>
1128 static void VectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1129 {
1130  vec.remove(index);
1131 }
1132 
1133 #endif // #if VMA_USE_STL_VECTOR
1134 
1136 // class VmaPoolAllocator
1137 
1138 /*
1139 Allocator for objects of type T using a list of arrays (pools) to speed up
1140 allocation. Number of elements that can be allocated is not bounded because
1141 allocator can create multiple blocks.
1142 */
1143 template<typename T>
1144 class VmaPoolAllocator
1145 {
1146 public:
1147  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
1148  ~VmaPoolAllocator();
1149  void Clear();
1150  T* Alloc();
1151  void Free(T* ptr);
1152 
1153 private:
1154  union Item
1155  {
1156  uint32_t NextFreeIndex;
1157  T Value;
1158  };
1159 
1160  struct ItemBlock
1161  {
1162  Item* pItems;
1163  uint32_t FirstFreeIndex;
1164  };
1165 
1166  const VkAllocationCallbacks* m_pAllocationCallbacks;
1167  size_t m_ItemsPerBlock;
1168  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
1169 
1170  ItemBlock& CreateNewBlock();
1171 };
1172 
1173 template<typename T>
1174 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
1175  m_pAllocationCallbacks(pAllocationCallbacks),
1176  m_ItemsPerBlock(itemsPerBlock),
1177  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
1178 {
1179  VMA_ASSERT(itemsPerBlock > 0);
1180 }
1181 
1182 template<typename T>
1183 VmaPoolAllocator<T>::~VmaPoolAllocator()
1184 {
1185  Clear();
1186 }
1187 
1188 template<typename T>
1189 void VmaPoolAllocator<T>::Clear()
1190 {
1191  for(size_t i = m_ItemBlocks.size(); i--; )
1192  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
1193  m_ItemBlocks.clear();
1194 }
1195 
1196 template<typename T>
1197 T* VmaPoolAllocator<T>::Alloc()
1198 {
1199  for(size_t i = m_ItemBlocks.size(); i--; )
1200  {
1201  ItemBlock& block = m_ItemBlocks[i];
1202  // This block has some free items: Use first one.
1203  if(block.FirstFreeIndex != UINT32_MAX)
1204  {
1205  Item* const pItem = &block.pItems[block.FirstFreeIndex];
1206  block.FirstFreeIndex = pItem->NextFreeIndex;
1207  return &pItem->Value;
1208  }
1209  }
1210 
1211  // No block has free item: Create new one and use it.
1212  ItemBlock& newBlock = CreateNewBlock();
1213  Item* const pItem = &newBlock.pItems[0];
1214  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1215  return &pItem->Value;
1216 }
1217 
1218 template<typename T>
1219 void VmaPoolAllocator<T>::Free(T* ptr)
1220 {
1221  // Search all memory blocks to find ptr.
1222  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
1223  {
1224  ItemBlock& block = m_ItemBlocks[i];
1225 
1226  // Casting to union.
1227  Item* pItemPtr;
1228  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1229 
1230  // Check if pItemPtr is in address range of this block.
1231  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
1232  {
1233  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
1234  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1235  block.FirstFreeIndex = index;
1236  return;
1237  }
1238  }
1239  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1240 }
1241 
1242 template<typename T>
1243 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
1244 {
1245  ItemBlock newBlock = {
1246  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
1247 
1248  m_ItemBlocks.push_back(newBlock);
1249 
1250  // Setup singly-linked list of all free items in this block.
1251  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
1252  newBlock.pItems[i].NextFreeIndex = i + 1;
1253  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
1254  return m_ItemBlocks.back();
1255 }
1256 
1258 // class VmaRawList, VmaList
1259 
1260 #if VMA_USE_STL_LIST
1261 
1262 #define VmaList std::list
1263 
1264 #else // #if VMA_USE_STL_LIST
1265 
1266 template<typename T>
1267 struct VmaListItem
1268 {
1269  VmaListItem* pPrev;
1270  VmaListItem* pNext;
1271  T Value;
1272 };
1273 
1274 // Doubly linked list.
1275 template<typename T>
1276 class VmaRawList
1277 {
1278 public:
1279  typedef VmaListItem<T> ItemType;
1280 
1281  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
1282  ~VmaRawList();
1283  void Clear();
1284 
1285  size_t GetCount() const { return m_Count; }
1286  bool IsEmpty() const { return m_Count == 0; }
1287 
1288  ItemType* Front() { return m_pFront; }
1289  const ItemType* Front() const { return m_pFront; }
1290  ItemType* Back() { return m_pBack; }
1291  const ItemType* Back() const { return m_pBack; }
1292 
1293  ItemType* PushBack();
1294  ItemType* PushFront();
1295  ItemType* PushBack(const T& value);
1296  ItemType* PushFront(const T& value);
1297  void PopBack();
1298  void PopFront();
1299 
1300  // Item can be null - it means PushBack.
1301  ItemType* InsertBefore(ItemType* pItem);
1302  // Item can be null - it means PushFront.
1303  ItemType* InsertAfter(ItemType* pItem);
1304 
1305  ItemType* InsertBefore(ItemType* pItem, const T& value);
1306  ItemType* InsertAfter(ItemType* pItem, const T& value);
1307 
1308  void Remove(ItemType* pItem);
1309 
1310 private:
1311  const VkAllocationCallbacks* const m_pAllocationCallbacks;
1312  VmaPoolAllocator<ItemType> m_ItemAllocator;
1313  ItemType* m_pFront;
1314  ItemType* m_pBack;
1315  size_t m_Count;
1316 
1317  // Declared not defined, to block copy constructor and assignment operator.
1318  VmaRawList(const VmaRawList<T>& src);
1319  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
1320 };
1321 
1322 template<typename T>
1323 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
1324  m_pAllocationCallbacks(pAllocationCallbacks),
1325  m_ItemAllocator(pAllocationCallbacks, 128),
1326  m_pFront(VMA_NULL),
1327  m_pBack(VMA_NULL),
1328  m_Count(0)
1329 {
1330 }
1331 
1332 template<typename T>
1333 VmaRawList<T>::~VmaRawList()
1334 {
1335  // Intentionally not calling Clear, because that would be unnecessary
1336  // computations to return all items to m_ItemAllocator as free.
1337 }
1338 
1339 template<typename T>
1340 void VmaRawList<T>::Clear()
1341 {
1342  if(IsEmpty() == false)
1343  {
1344  ItemType* pItem = m_pBack;
1345  while(pItem != VMA_NULL)
1346  {
1347  ItemType* const pPrevItem = pItem->pPrev;
1348  m_ItemAllocator.Free(pItem);
1349  pItem = pPrevItem;
1350  }
1351  m_pFront = VMA_NULL;
1352  m_pBack = VMA_NULL;
1353  m_Count = 0;
1354  }
1355 }
1356 
1357 template<typename T>
1358 VmaListItem<T>* VmaRawList<T>::PushBack()
1359 {
1360  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1361  pNewItem->pNext = VMA_NULL;
1362  if(IsEmpty())
1363  {
1364  pNewItem->pPrev = VMA_NULL;
1365  m_pFront = pNewItem;
1366  m_pBack = pNewItem;
1367  m_Count = 1;
1368  }
1369  else
1370  {
1371  pNewItem->pPrev = m_pBack;
1372  m_pBack->pNext = pNewItem;
1373  m_pBack = pNewItem;
1374  ++m_Count;
1375  }
1376  return pNewItem;
1377 }
1378 
1379 template<typename T>
1380 VmaListItem<T>* VmaRawList<T>::PushFront()
1381 {
1382  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1383  pNewItem->pPrev = VMA_NULL;
1384  if(IsEmpty())
1385  {
1386  pNewItem->pNext = VMA_NULL;
1387  m_pFront = pNewItem;
1388  m_pBack = pNewItem;
1389  m_Count = 1;
1390  }
1391  else
1392  {
1393  pNewItem->pNext = m_pFront;
1394  m_pFront->pPrev = pNewItem;
1395  m_pFront = pNewItem;
1396  ++m_Count;
1397  }
1398  return pNewItem;
1399 }
1400 
1401 template<typename T>
1402 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
1403 {
1404  ItemType* const pNewItem = PushBack();
1405  pNewItem->Value = value;
1406  return pNewItem;
1407 }
1408 
1409 template<typename T>
1410 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
1411 {
1412  ItemType* const pNewItem = PushFront();
1413  pNewItem->Value = value;
1414  return pNewItem;
1415 }
1416 
1417 template<typename T>
1418 void VmaRawList<T>::PopBack()
1419 {
1420  VMA_HEAVY_ASSERT(m_Count > 0);
1421  ItemType* const pBackItem = m_pBack;
1422  ItemType* const pPrevItem = pBackItem->pPrev;
1423  if(pPrevItem != VMA_NULL)
1424  pPrevItem->pNext = VMA_NULL;
1425  m_pBack = pPrevItem;
1426  m_ItemAllocator.Free(pBackItem);
1427  --m_Count;
1428 }
1429 
1430 template<typename T>
1431 void VmaRawList<T>::PopFront()
1432 {
1433  VMA_HEAVY_ASSERT(m_Count > 0);
1434  ItemType* const pFrontItem = m_pFront;
1435  ItemType* const pNextItem = pFrontItem->pNext;
1436  if(pNextItem != VMA_NULL)
1437  pNextItem->pPrev = VMA_NULL;
1438  m_pFront = pNextItem;
1439  m_ItemAllocator.Free(pFrontItem);
1440  --m_Count;
1441 }
1442 
1443 template<typename T>
1444 void VmaRawList<T>::Remove(ItemType* pItem)
1445 {
1446  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
1447  VMA_HEAVY_ASSERT(m_Count > 0);
1448 
1449  if(pItem->pPrev != VMA_NULL)
1450  pItem->pPrev->pNext = pItem->pNext;
1451  else
1452  {
1453  VMA_HEAVY_ASSERT(m_pFront == pItem);
1454  m_pFront = pItem->pNext;
1455  }
1456 
1457  if(pItem->pNext != VMA_NULL)
1458  pItem->pNext->pPrev = pItem->pPrev;
1459  else
1460  {
1461  VMA_HEAVY_ASSERT(m_pBack == pItem);
1462  m_pBack = pItem->pPrev;
1463  }
1464 
1465  m_ItemAllocator.Free(pItem);
1466  --m_Count;
1467 }
1468 
1469 template<typename T>
1470 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
1471 {
1472  if(pItem != VMA_NULL)
1473  {
1474  ItemType* const prevItem = pItem->pPrev;
1475  ItemType* const newItem = m_ItemAllocator.Alloc();
1476  newItem->pPrev = prevItem;
1477  newItem->pNext = pItem;
1478  pItem->pPrev = newItem;
1479  if(prevItem != VMA_NULL)
1480  prevItem->pNext = newItem;
1481  else
1482  {
1483  VMA_HEAVY_ASSERT(m_pFront = pItem);
1484  m_pFront = newItem;
1485  }
1486  ++m_Count;
1487  return newItem;
1488  }
1489  else
1490  return PushBack();
1491 }
1492 
1493 template<typename T>
1494 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
1495 {
1496  if(pItem != VMA_NULL)
1497  {
1498  ItemType* const nextItem = pItem->pNext;
1499  ItemType* const newItem = m_ItemAllocator.Alloc();
1500  newItem->pNext = nextItem;
1501  newItem->pPrev = pItem;
1502  pItem->pNext = newItem;
1503  if(nextItem != VMA_NULL)
1504  nextItem->pPrev = newItem;
1505  else
1506  {
1507  VMA_HEAVY_ASSERT(m_pBack = pItem);
1508  m_pBack = newItem;
1509  }
1510  ++m_Count;
1511  return newItem;
1512  }
1513  else
1514  return PushFront();
1515 }
1516 
1517 template<typename T>
1518 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
1519 {
1520  ItemType* const newItem = InsertBefore(pItem);
1521  newItem->Value = value;
1522  return newItem;
1523 }
1524 
1525 template<typename T>
1526 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
1527 {
1528  ItemType* const newItem = InsertAfter(pItem);
1529  newItem->Value = value;
1530  return newItem;
1531 }
1532 
1533 template<typename T, typename AllocatorT>
1534 class VmaList
1535 {
1536 public:
1537  class iterator
1538  {
1539  public:
1540  iterator() :
1541  m_pList(VMA_NULL),
1542  m_pItem(VMA_NULL)
1543  {
1544  }
1545 
1546  T& operator*() const
1547  {
1548  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1549  return m_pItem->Value;
1550  }
1551  T* operator->() const
1552  {
1553  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1554  return &m_pItem->Value;
1555  }
1556 
1557  iterator& operator++()
1558  {
1559  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1560  m_pItem = m_pItem->pNext;
1561  return *this;
1562  }
1563  iterator& operator--()
1564  {
1565  if(m_pItem != VMA_NULL)
1566  m_pItem = m_pItem->pPrev;
1567  else
1568  {
1569  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
1570  m_pItem = m_pList->Back();
1571  }
1572  return *this;
1573  }
1574 
1575  iterator operator++(int)
1576  {
1577  iterator result = *this;
1578  ++*this;
1579  return result;
1580  }
1581  iterator operator--(int)
1582  {
1583  iterator result = *this;
1584  --*this;
1585  return result;
1586  }
1587 
1588  bool operator==(const iterator& rhs) const
1589  {
1590  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1591  return m_pItem == rhs.m_pItem;
1592  }
1593  bool operator!=(const iterator& rhs) const
1594  {
1595  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1596  return m_pItem != rhs.m_pItem;
1597  }
1598 
1599  private:
1600  VmaRawList<T>* m_pList;
1601  VmaListItem<T>* m_pItem;
1602 
1603  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
1604  m_pList(pList),
1605  m_pItem(pItem)
1606  {
1607  }
1608 
1609  friend class VmaList<T, AllocatorT>;
1610  friend class VmaList<T, AllocatorT>:: const_iterator;
1611  };
1612 
1613  class const_iterator
1614  {
1615  public:
1616  const_iterator() :
1617  m_pList(VMA_NULL),
1618  m_pItem(VMA_NULL)
1619  {
1620  }
1621 
1622  const_iterator(const iterator& src) :
1623  m_pList(src.m_pList),
1624  m_pItem(src.m_pItem)
1625  {
1626  }
1627 
1628  const T& operator*() const
1629  {
1630  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1631  return m_pItem->Value;
1632  }
1633  const T* operator->() const
1634  {
1635  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1636  return &m_pItem->Value;
1637  }
1638 
1639  const_iterator& operator++()
1640  {
1641  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1642  m_pItem = m_pItem->pNext;
1643  return *this;
1644  }
1645  const_iterator& operator--()
1646  {
1647  if(m_pItem != VMA_NULL)
1648  m_pItem = m_pItem->pPrev;
1649  else
1650  {
1651  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
1652  m_pItem = m_pList->Back();
1653  }
1654  return *this;
1655  }
1656 
1657  const_iterator operator++(int)
1658  {
1659  const_iterator result = *this;
1660  ++*this;
1661  return result;
1662  }
1663  const_iterator operator--(int)
1664  {
1665  const_iterator result = *this;
1666  --*this;
1667  return result;
1668  }
1669 
1670  bool operator==(const const_iterator& rhs) const
1671  {
1672  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1673  return m_pItem == rhs.m_pItem;
1674  }
1675  bool operator!=(const const_iterator& rhs) const
1676  {
1677  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1678  return m_pItem != rhs.m_pItem;
1679  }
1680 
1681  private:
1682  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
1683  m_pList(pList),
1684  m_pItem(pItem)
1685  {
1686  }
1687 
1688  const VmaRawList<T>* m_pList;
1689  const VmaListItem<T>* m_pItem;
1690 
1691  friend class VmaList<T, AllocatorT>;
1692  };
1693 
1694  VmaList(AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
1695  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
1696 
1697  bool empty() const { return m_RawList.IsEmpty(); }
1698  size_t size() const { return m_RawList.GetCount(); }
1699 
1700  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
1701  iterator end() { return iterator(&m_RawList, VMA_NULL); }
1702 
1703  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
1704  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
1705 
1706  void clear() { m_RawList.Clear(); }
1707  void push_back(const T& value) { m_RawList.PushBack(value); }
1708  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
1709  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
1710 
1711 private:
1712  VmaRawList<T> m_RawList;
1713 };
1714 
1715 #endif // #if VMA_USE_STL_LIST
1716 
1718 // class VmaMap
1719 
1720 #if VMA_USE_STL_UNORDERED_MAP
1721 
1722 #define VmaPair std::pair
1723 
1724 #define VMA_MAP_TYPE(KeyT, ValueT) \
1725  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
1726 
1727 #else // #if VMA_USE_STL_UNORDERED_MAP
1728 
1729 template<typename T1, typename T2>
1730 struct VmaPair
1731 {
1732  T1 first;
1733  T2 second;
1734 
1735  VmaPair() : first(), second() { }
1736  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
1737 };
1738 
1739 /* Class compatible with subset of interface of std::unordered_map.
1740 KeyT, ValueT must be POD because they will be stored in VmaVector.
1741 */
1742 template<typename KeyT, typename ValueT>
1743 class VmaMap
1744 {
1745 public:
1746  typedef VmaPair<KeyT, ValueT> PairType;
1747  typedef PairType* iterator;
1748 
1749  VmaMap(VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
1750  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
1751 
1752  iterator begin() { return m_Vector.begin(); }
1753  iterator end() { return m_Vector.end(); }
1754 
1755  void insert(const PairType& pair);
1756  iterator find(const KeyT& key);
1757  void erase(iterator it);
1758 
1759 private:
1760  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
1761 };
1762 
1763 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
1764 
1765 template<typename FirstT, typename SecondT>
1766 struct VmaPairFirstLess
1767 {
1768  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
1769  {
1770  return lhs.first < rhs.first;
1771  }
1772  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
1773  {
1774  return lhs.first < rhsFirst;
1775  }
1776 };
1777 
1778 template<typename KeyT, typename ValueT>
1779 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
1780 {
1781  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
1782  m_Vector.data(),
1783  m_Vector.data() + m_Vector.size(),
1784  pair,
1785  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
1786  VectorInsert(m_Vector, indexToInsert, pair);
1787 }
1788 
1789 template<typename KeyT, typename ValueT>
1790 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
1791 {
1792  PairType* it = VmaBinaryFindFirstNotLess(
1793  m_Vector.data(),
1794  m_Vector.data() + m_Vector.size(),
1795  key,
1796  VmaPairFirstLess<KeyT, ValueT>());
1797  if((it != m_Vector.end()) && (it->first == key))
1798  return it;
1799  else
1800  return m_Vector.end();
1801 }
1802 
1803 template<typename KeyT, typename ValueT>
1804 void VmaMap<KeyT, ValueT>::erase(iterator it)
1805 {
1806  VectorRemove(m_Vector, it - m_Vector.begin());
1807 }
1808 
1809 #endif // #if VMA_USE_STL_UNORDERED_MAP
1810 
1811 /*
1812 Represents a region of VmaAllocation that is either assigned and returned as
1813 allocated memory block or free.
1814 */
1815 struct VmaSuballocation
1816 {
1817  VkDeviceSize offset;
1818  VkDeviceSize size;
1819  VmaSuballocationType type;
1820 };
1821 
1822 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
1823 
1824 // Parameters of an allocation.
1825 struct VmaAllocationRequest
1826 {
1827  VmaSuballocationList::iterator freeSuballocationItem;
1828  VkDeviceSize offset;
1829 };
1830 
1831 /* Single block of memory - VkDeviceMemory with all the data about its regions
1832 assigned or free. */
1833 class VmaAllocation
1834 {
1835 public:
1836  VkDeviceMemory m_hMemory;
1837  VkDeviceSize m_Size;
1838  uint32_t m_FreeCount;
1839  VkDeviceSize m_SumFreeSize;
1840  VmaSuballocationList m_Suballocations;
1841  // Suballocations that are free and have size greater than certain threshold.
1842  // Sorted by size, ascending.
1843  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
1844 
1845  VmaAllocation(VmaAllocator hAllocator);
1846 
1847  ~VmaAllocation()
1848  {
1849  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
1850  }
1851 
1852  // Always call after construction.
1853  void Init(VkDeviceMemory newMemory, VkDeviceSize newSize);
1854  // Always call before destruction.
1855  void Destroy(VmaAllocator allocator);
1856 
1857  // Validates all data structures inside this object. If not valid, returns false.
1858  bool Validate() const;
1859 
1860  // Tries to find a place for suballocation with given parameters inside this allocation.
1861  // If succeeded, fills pAllocationRequest and returns true.
1862  // If failed, returns false.
1863  bool CreateAllocationRequest(
1864  VkDeviceSize bufferImageGranularity,
1865  VkDeviceSize allocSize,
1866  VkDeviceSize allocAlignment,
1867  VmaSuballocationType allocType,
1868  VmaAllocationRequest* pAllocationRequest);
1869 
1870  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
1871  // If yes, fills pOffset and returns true. If no, returns false.
1872  bool CheckAllocation(
1873  VkDeviceSize bufferImageGranularity,
1874  VkDeviceSize allocSize,
1875  VkDeviceSize allocAlignment,
1876  VmaSuballocationType allocType,
1877  VmaSuballocationList::const_iterator freeSuballocItem,
1878  VkDeviceSize* pOffset) const;
1879 
1880  // Returns true if this allocation is empty - contains only single free suballocation.
1881  bool IsEmpty() const;
1882 
1883  // Makes actual allocation based on request. Request must already be checked
1884  // and valid.
1885  void Alloc(
1886  const VmaAllocationRequest& request,
1887  VmaSuballocationType type,
1888  VkDeviceSize allocSize);
1889 
1890  // Frees suballocation assigned to given memory region.
1891  void Free(const VkMappedMemoryRange* pMemory);
1892 
1893 #if VMA_STATS_STRING_ENABLED
1894  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1895 #endif
1896 
1897 private:
1898  // Given free suballocation, it merges it with following one, which must also be free.
1899  void MergeFreeWithNext(VmaSuballocationList::iterator item);
1900  // Releases given suballocation, making it free. Merges it with adjacent free
1901  // suballocations if applicable.
1902  void FreeSuballocation(VmaSuballocationList::iterator suballocItem);
1903  // Given free suballocation, it inserts it into sorted list of
1904  // m_FreeSuballocationsBySize if it's suitable.
1905  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
1906  // Given free suballocation, it removes it from sorted list of
1907  // m_FreeSuballocationsBySize if it's suitable.
1908  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
1909 };
1910 
1911 // Allocation for an object that has its own private VkDeviceMemory.
1912 struct VmaOwnAllocation
1913 {
1914  VkDeviceMemory m_hMemory;
1915  VkDeviceSize m_Size;
1916  VmaSuballocationType m_Type;
1917 };
1918 
1919 struct VmaOwnAllocationMemoryHandleLess
1920 {
1921  bool operator()(const VmaOwnAllocation& lhs, const VmaOwnAllocation& rhs) const
1922  {
1923  return lhs.m_hMemory < rhs.m_hMemory;
1924  }
1925  bool operator()(const VmaOwnAllocation& lhs, VkDeviceMemory rhsMem) const
1926  {
1927  return lhs.m_hMemory < rhsMem;
1928  }
1929 };
1930 
1931 /* Sequence of VmaAllocation. Represents memory blocks allocated for a specific
1932 Vulkan memory type. */
1933 struct VmaAllocationVector
1934 {
1935  // Incrementally sorted by sumFreeSize, ascending.
1936  VmaVector< VmaAllocation*, VmaStlAllocator<VmaAllocation*> > m_Allocations;
1937 
1938  VmaAllocationVector(VmaAllocator hAllocator);
1939  ~VmaAllocationVector();
1940 
1941  bool IsEmpty() const { return m_Allocations.empty(); }
1942 
1943  // Tries to free memory from any if its Allocations.
1944  // Returns index of Allocation that the memory was freed from, or -1 if not found.
1945  size_t Free(const VkMappedMemoryRange* pMemory);
1946 
1947  // Performs single step in sorting m_Allocations. They may not be fully sorted
1948  // after this call.
1949  void IncrementallySortAllocations();
1950 
1951  // Adds statistics of this AllocationVector to pStats.
1952  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const;
1953 
1954 #if VMA_STATS_STRING_ENABLED
1955  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1956 #endif
1957 
1958 private:
1959  VmaAllocator m_hAllocator;
1960 };
1961 
1962 // Main allocator object.
1963 struct VmaAllocator_T
1964 {
1965  VkDevice m_hDevice;
1966  bool m_AllocationCallbacksSpecified;
1967  VkAllocationCallbacks m_AllocationCallbacks;
1968  VkDeviceSize m_PreferredLargeHeapBlockSize;
1969  VkDeviceSize m_PreferredSmallHeapBlockSize;
1970 
1971  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
1972  VkPhysicalDeviceMemoryProperties m_MemProps;
1973 
1974  VmaAllocationVector* m_pAllocations[VK_MAX_MEMORY_TYPES];
1975  /* There can be at most one allocation that is completely empty - a
1976  hysteresis to avoid pessimistic case of alternating creation and destruction
1977  of a VkDeviceMemory. */
1978  bool m_HasEmptyAllocation[VK_MAX_MEMORY_TYPES];
1979  VMA_MUTEX m_AllocationsMutex[VK_MAX_MEMORY_TYPES];
1980 
1981  // Each vector is sorted by memory (handle value).
1982  typedef VmaVector< VmaOwnAllocation, VmaStlAllocator<VmaOwnAllocation> > OwnAllocationVectorType;
1983  OwnAllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES];
1984  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
1985 
1986  // Sorted by first (VkBuffer handle value).
1987  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange) m_BufferToMemoryMap;
1988  VMA_MUTEX m_BufferToMemoryMapMutex;
1989  // Sorted by first (VkImage handle value).
1990  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange) m_ImageToMemoryMap;
1991  VMA_MUTEX m_ImageToMemoryMapMutex;
1992 
1993  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
1994  ~VmaAllocator_T();
1995 
1996  const VkAllocationCallbacks* GetAllocationCallbacks() const
1997  {
1998  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
1999  }
2000 
2001  VkDeviceSize GetPreferredBlockSize(uint32_t memTypeIndex) const;
2002 
2003  VkDeviceSize GetBufferImageGranularity() const
2004  {
2005  return VMA_MAX(
2006  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
2007  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
2008  }
2009 
2010  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
2011  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
2012 
2013  // Main allocation function.
2014  VkResult AllocateMemory(
2015  const VkMemoryRequirements& vkMemReq,
2016  const VmaMemoryRequirements& vmaMemReq,
2017  VmaSuballocationType suballocType,
2018  VkMappedMemoryRange* pMemory,
2019  uint32_t* pMemoryTypeIndex);
2020 
2021  // Main deallocation function.
2022  void FreeMemory(const VkMappedMemoryRange* pMemory);
2023 
2024  void CalculateStats(VmaStats* pStats);
2025 
2026 #if VMA_STATS_STRING_ENABLED
2027  void PrintDetailedMap(class VmaStringBuilder& sb);
2028 #endif
2029 
2030 private:
2031  VkPhysicalDevice m_PhysicalDevice;
2032 
2033  VkResult AllocateMemoryOfType(
2034  const VkMemoryRequirements& vkMemReq,
2035  const VmaMemoryRequirements& vmaMemReq,
2036  uint32_t memTypeIndex,
2037  VmaSuballocationType suballocType,
2038  VkMappedMemoryRange* pMemory);
2039 
2040  // Allocates and registers new VkDeviceMemory specifically for single allocation.
2041  VkResult AllocateOwnMemory(
2042  VkDeviceSize size,
2043  VmaSuballocationType suballocType,
2044  uint32_t memTypeIndex,
2045  VkMappedMemoryRange* pMemory);
2046 
2047  // Tries to free pMemory as Own Memory. Returns true if found and freed.
2048  bool FreeOwnMemory(const VkMappedMemoryRange* pMemory);
2049 };
2050 
2052 // Memory allocation #2 after VmaAllocator_T definition
2053 
2054 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
2055 {
2056  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
2057 }
2058 
2059 static void VmaFree(VmaAllocator hAllocator, void* ptr)
2060 {
2061  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
2062 }
2063 
2064 template<typename T>
2065 static T* VmaAllocate(VmaAllocator hAllocator)
2066 {
2067  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
2068 }
2069 
2070 template<typename T>
2071 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
2072 {
2073  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
2074 }
2075 
2076 template<typename T>
2077 static void vma_delete(VmaAllocator hAllocator, T* ptr)
2078 {
2079  if(ptr != VMA_NULL)
2080  {
2081  ptr->~T();
2082  VmaFree(hAllocator, ptr);
2083  }
2084 }
2085 
2086 template<typename T>
2087 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
2088 {
2089  if(ptr != VMA_NULL)
2090  {
2091  for(size_t i = count; i--; )
2092  ptr[i].~T();
2093  VmaFree(hAllocator, ptr);
2094  }
2095 }
2096 
2098 // VmaStringBuilder
2099 
2100 #if VMA_STATS_STRING_ENABLED
2101 
2102 class VmaStringBuilder
2103 {
2104 public:
2105  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
2106  size_t GetLength() const { return m_Data.size(); }
2107  const char* GetData() const { return m_Data.data(); }
2108 
2109  void Add(char ch) { m_Data.push_back(ch); }
2110  void Add(const char* pStr);
2111  void AddNewLine() { Add('\n'); }
2112  void AddNumber(uint32_t num);
2113  void AddNumber(uint64_t num);
2114  void AddBool(bool b) { Add(b ? "true" : "false"); }
2115  void AddNull() { Add("null"); }
2116  void AddString(const char* pStr);
2117 
2118 private:
2119  VmaVector< char, VmaStlAllocator<char> > m_Data;
2120 };
2121 
2122 void VmaStringBuilder::Add(const char* pStr)
2123 {
2124  const size_t strLen = strlen(pStr);
2125  if(strLen > 0)
2126  {
2127  const size_t oldCount = m_Data.size();
2128  m_Data.resize(oldCount + strLen);
2129  memcpy(m_Data.data() + oldCount, pStr, strLen);
2130  }
2131 }
2132 
2133 void VmaStringBuilder::AddNumber(uint32_t num)
2134 {
2135  char buf[11];
2136  VmaUint32ToStr(buf, sizeof(buf), num);
2137  Add(buf);
2138 }
2139 
2140 void VmaStringBuilder::AddNumber(uint64_t num)
2141 {
2142  char buf[21];
2143  VmaUint64ToStr(buf, sizeof(buf), num);
2144  Add(buf);
2145 }
2146 
2147 void VmaStringBuilder::AddString(const char* pStr)
2148 {
2149  Add('"');
2150  const size_t strLen = strlen(pStr);
2151  for(size_t i = 0; i < strLen; ++i)
2152  {
2153  char ch = pStr[i];
2154  if(ch == '\'')
2155  Add("\\\\");
2156  else if(ch == '"')
2157  Add("\\\"");
2158  else if(ch >= 32)
2159  Add(ch);
2160  else switch(ch)
2161  {
2162  case '\n':
2163  Add("\\n");
2164  break;
2165  case '\r':
2166  Add("\\r");
2167  break;
2168  case '\t':
2169  Add("\\t");
2170  break;
2171  default:
2172  VMA_ASSERT(0 && "Character not currently supported.");
2173  break;
2174  }
2175  }
2176  Add('"');
2177 }
2178 
2180 
2181 // Correspond to values of enum VmaSuballocationType.
2182 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
2183  "FREE",
2184  "UNKNOWN",
2185  "BUFFER",
2186  "IMAGE_UNKNOWN",
2187  "IMAGE_LINEAR",
2188  "IMAGE_OPTIMAL",
2189 };
2190 
2191 static void VmaPrintStatInfo(VmaStringBuilder& sb, const VmaStatInfo& stat)
2192 {
2193  sb.Add("{ \"Allocations\": ");
2194  sb.AddNumber(stat.AllocationCount);
2195  sb.Add(", \"Suballocations\": ");
2196  sb.AddNumber(stat.SuballocationCount);
2197  sb.Add(", \"UnusedRanges\": ");
2198  sb.AddNumber(stat.UnusedRangeCount);
2199  sb.Add(", \"UsedBytes\": ");
2200  sb.AddNumber(stat.UsedBytes);
2201  sb.Add(", \"UnusedBytes\": ");
2202  sb.AddNumber(stat.UnusedBytes);
2203  sb.Add(", \"SuballocationSize\": { \"Min\": ");
2204  sb.AddNumber(stat.SuballocationSizeMin);
2205  sb.Add(", \"Avg\": ");
2206  sb.AddNumber(stat.SuballocationSizeAvg);
2207  sb.Add(", \"Max\": ");
2208  sb.AddNumber(stat.SuballocationSizeMax);
2209  sb.Add(" }, \"UnusedRangeSize\": { \"Min\": ");
2210  sb.AddNumber(stat.UnusedRangeSizeMin);
2211  sb.Add(", \"Avg\": ");
2212  sb.AddNumber(stat.UnusedRangeSizeAvg);
2213  sb.Add(", \"Max\": ");
2214  sb.AddNumber(stat.UnusedRangeSizeMax);
2215  sb.Add(" } }");
2216 }
2217 
2218 #endif // #if VMA_STATS_STRING_ENABLED
2219 
2220 struct VmaSuballocationItemSizeLess
2221 {
2222  bool operator()(
2223  const VmaSuballocationList::iterator lhs,
2224  const VmaSuballocationList::iterator rhs) const
2225  {
2226  return lhs->size < rhs->size;
2227  }
2228  bool operator()(
2229  const VmaSuballocationList::iterator lhs,
2230  VkDeviceSize rhsSize) const
2231  {
2232  return lhs->size < rhsSize;
2233  }
2234 };
2235 
2236 VmaAllocation::VmaAllocation(VmaAllocator hAllocator) :
2237  m_hMemory(VK_NULL_HANDLE),
2238  m_Size(0),
2239  m_FreeCount(0),
2240  m_SumFreeSize(0),
2241  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
2242  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
2243 {
2244 }
2245 
2246 void VmaAllocation::Init(VkDeviceMemory newMemory, VkDeviceSize newSize)
2247 {
2248  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2249 
2250  m_hMemory = newMemory;
2251  m_Size = newSize;
2252  m_FreeCount = 1;
2253  m_SumFreeSize = newSize;
2254 
2255  m_Suballocations.clear();
2256  m_FreeSuballocationsBySize.clear();
2257 
2258  VmaSuballocation suballoc = {};
2259  suballoc.offset = 0;
2260  suballoc.size = newSize;
2261  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2262 
2263  m_Suballocations.push_back(suballoc);
2264  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
2265  --suballocItem;
2266  m_FreeSuballocationsBySize.push_back(suballocItem);
2267 }
2268 
2269 void VmaAllocation::Destroy(VmaAllocator allocator)
2270 {
2271  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
2272  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
2273  m_hMemory = VK_NULL_HANDLE;
2274 }
2275 
2276 bool VmaAllocation::Validate() const
2277 {
2278  if((m_hMemory == VK_NULL_HANDLE) ||
2279  (m_Size == 0) ||
2280  m_Suballocations.empty())
2281  {
2282  return false;
2283  }
2284 
2285  // Expected offset of new suballocation as calculates from previous ones.
2286  VkDeviceSize calculatedOffset = 0;
2287  // Expected number of free suballocations as calculated from traversing their list.
2288  uint32_t calculatedFreeCount = 0;
2289  // Expected sum size of free suballocations as calculated from traversing their list.
2290  VkDeviceSize calculatedSumFreeSize = 0;
2291  // Expected number of free suballocations that should be registered in
2292  // m_FreeSuballocationsBySize calculated from traversing their list.
2293  size_t freeSuballocationsToRegister = 0;
2294  // True if previous visisted suballocation was free.
2295  bool prevFree = false;
2296 
2297  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2298  suballocItem != m_Suballocations.cend();
2299  ++suballocItem)
2300  {
2301  const VmaSuballocation& subAlloc = *suballocItem;
2302 
2303  // Actual offset of this suballocation doesn't match expected one.
2304  if(subAlloc.offset != calculatedOffset)
2305  return false;
2306 
2307  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
2308  // Two adjacent free suballocations are invalid. They should be merged.
2309  if(prevFree && currFree)
2310  return false;
2311  prevFree = currFree;
2312 
2313  if(currFree)
2314  {
2315  calculatedSumFreeSize += subAlloc.size;
2316  ++calculatedFreeCount;
2317  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2318  ++freeSuballocationsToRegister;
2319  }
2320 
2321  calculatedOffset += subAlloc.size;
2322  }
2323 
2324  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
2325  // match expected one.
2326  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
2327  return false;
2328 
2329  VkDeviceSize lastSize = 0;
2330  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
2331  {
2332  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
2333 
2334  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
2335  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
2336  return false;
2337  // They must be sorted by size ascending.
2338  if(suballocItem->size < lastSize)
2339  return false;
2340 
2341  lastSize = suballocItem->size;
2342  }
2343 
2344  // Check if totals match calculacted values.
2345  return
2346  (calculatedOffset == m_Size) &&
2347  (calculatedSumFreeSize == m_SumFreeSize) &&
2348  (calculatedFreeCount == m_FreeCount);
2349 }
2350 
2351 /*
2352 How many suitable free suballocations to analyze before choosing best one.
2353 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
2354  be chosen.
2355 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
2356  suballocations will be analized and best one will be chosen.
2357 - Any other value is also acceptable.
2358 */
2359 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
2360 
2361 bool VmaAllocation::CreateAllocationRequest(
2362  VkDeviceSize bufferImageGranularity,
2363  VkDeviceSize allocSize,
2364  VkDeviceSize allocAlignment,
2365  VmaSuballocationType allocType,
2366  VmaAllocationRequest* pAllocationRequest)
2367 {
2368  VMA_ASSERT(allocSize > 0);
2369  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2370  VMA_ASSERT(pAllocationRequest != VMA_NULL);
2371  VMA_HEAVY_ASSERT(Validate());
2372 
2373  // There is not enough total free space in this allocation to fullfill the request: Early return.
2374  if(m_SumFreeSize < allocSize)
2375  return false;
2376 
2377  // Old brute-force algorithm, linearly searching suballocations.
2378  /*
2379  uint32_t suitableSuballocationsFound = 0;
2380  for(VmaSuballocationList::iterator suballocItem = suballocations.Front();
2381  suballocItem != VMA_NULL &&
2382  suitableSuballocationsFound < MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK;
2383  suballocItem = suballocItem->Next)
2384  {
2385  if(suballocItem->Value.type == VMA_SUBALLOCATION_TYPE_FREE)
2386  {
2387  VkDeviceSize offset = 0, cost = 0;
2388  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset, &cost))
2389  {
2390  ++suitableSuballocationsFound;
2391  if(cost < costLimit)
2392  {
2393  pAllocationRequest->freeSuballocationItem = suballocItem;
2394  pAllocationRequest->offset = offset;
2395  pAllocationRequest->cost = cost;
2396  if(cost == 0)
2397  return true;
2398  costLimit = cost;
2399  betterSuballocationFound = true;
2400  }
2401  }
2402  }
2403  }
2404  */
2405 
2406  // New algorithm, efficiently searching freeSuballocationsBySize.
2407  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
2408  if(freeSuballocCount > 0)
2409  {
2410  if(VMA_BEST_FIT)
2411  {
2412  // Find first free suballocation with size not less than allocSize.
2413  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2414  m_FreeSuballocationsBySize.data(),
2415  m_FreeSuballocationsBySize.data() + freeSuballocCount,
2416  allocSize,
2417  VmaSuballocationItemSizeLess());
2418  size_t index = it - m_FreeSuballocationsBySize.data();
2419  for(; index < freeSuballocCount; ++index)
2420  {
2421  VkDeviceSize offset = 0;
2422  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2423  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2424  {
2425  pAllocationRequest->freeSuballocationItem = suballocItem;
2426  pAllocationRequest->offset = offset;
2427  return true;
2428  }
2429  }
2430  }
2431  else
2432  {
2433  // Search staring from biggest suballocations.
2434  for(size_t index = freeSuballocCount; index--; )
2435  {
2436  VkDeviceSize offset = 0;
2437  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2438  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2439  {
2440  pAllocationRequest->freeSuballocationItem = suballocItem;
2441  pAllocationRequest->offset = offset;
2442  return true;
2443  }
2444  }
2445  }
2446  }
2447 
2448  return false;
2449 }
2450 
2451 bool VmaAllocation::CheckAllocation(
2452  VkDeviceSize bufferImageGranularity,
2453  VkDeviceSize allocSize,
2454  VkDeviceSize allocAlignment,
2455  VmaSuballocationType allocType,
2456  VmaSuballocationList::const_iterator freeSuballocItem,
2457  VkDeviceSize* pOffset) const
2458 {
2459  VMA_ASSERT(allocSize > 0);
2460  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2461  VMA_ASSERT(freeSuballocItem != m_Suballocations.cend());
2462  VMA_ASSERT(pOffset != VMA_NULL);
2463 
2464  const VmaSuballocation& suballoc = *freeSuballocItem;
2465  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2466 
2467  // Size of this suballocation is too small for this request: Early return.
2468  if(suballoc.size < allocSize)
2469  return false;
2470 
2471  // Start from offset equal to beginning of this suballocation.
2472  *pOffset = suballoc.offset;
2473 
2474  // Apply VMA_DEBUG_MARGIN at the beginning.
2475  if((VMA_DEBUG_MARGIN > 0) && freeSuballocItem != m_Suballocations.cbegin())
2476  *pOffset += VMA_DEBUG_MARGIN;
2477 
2478  // Apply alignment.
2479  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
2480  *pOffset = VmaAlignUp(*pOffset, alignment);
2481 
2482  // Check previous suballocations for BufferImageGranularity conflicts.
2483  // Make bigger alignment if necessary.
2484  if(bufferImageGranularity > 1)
2485  {
2486  bool bufferImageGranularityConflict = false;
2487  VmaSuballocationList::const_iterator prevSuballocItem = freeSuballocItem;
2488  while(prevSuballocItem != m_Suballocations.cbegin())
2489  {
2490  --prevSuballocItem;
2491  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
2492  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
2493  {
2494  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
2495  {
2496  bufferImageGranularityConflict = true;
2497  break;
2498  }
2499  }
2500  else
2501  // Already on previous page.
2502  break;
2503  }
2504  if(bufferImageGranularityConflict)
2505  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
2506  }
2507 
2508  // Calculate padding at the beginning based on current offset.
2509  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
2510 
2511  // Calculate required margin at the end if this is not last suballocation.
2512  VmaSuballocationList::const_iterator next = freeSuballocItem;
2513  ++next;
2514  const VkDeviceSize requiredEndMargin =
2515  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
2516 
2517  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
2518  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
2519  return false;
2520 
2521  // Check next suballocations for BufferImageGranularity conflicts.
2522  // If conflict exists, allocation cannot be made here.
2523  if(bufferImageGranularity > 1)
2524  {
2525  VmaSuballocationList::const_iterator nextSuballocItem = freeSuballocItem;
2526  ++nextSuballocItem;
2527  while(nextSuballocItem != m_Suballocations.cend())
2528  {
2529  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
2530  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
2531  {
2532  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
2533  return false;
2534  }
2535  else
2536  // Already on next page.
2537  break;
2538  ++nextSuballocItem;
2539  }
2540  }
2541 
2542  // All tests passed: Success. pOffset is already filled.
2543  return true;
2544 }
2545 
2546 bool VmaAllocation::IsEmpty() const
2547 {
2548  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
2549 }
2550 
2551 void VmaAllocation::Alloc(
2552  const VmaAllocationRequest& request,
2553  VmaSuballocationType type,
2554  VkDeviceSize allocSize)
2555 {
2556  VMA_ASSERT(request.freeSuballocationItem != m_Suballocations.end());
2557  VmaSuballocation& suballoc = *request.freeSuballocationItem;
2558  // Given suballocation is a free block.
2559  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2560  // Given offset is inside this suballocation.
2561  VMA_ASSERT(request.offset >= suballoc.offset);
2562  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
2563  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
2564  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
2565 
2566  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
2567  // it to become used.
2568  UnregisterFreeSuballocation(request.freeSuballocationItem);
2569 
2570  suballoc.offset = request.offset;
2571  suballoc.size = allocSize;
2572  suballoc.type = type;
2573 
2574  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
2575  if(paddingEnd)
2576  {
2577  VmaSuballocation paddingSuballoc = {};
2578  paddingSuballoc.offset = request.offset + allocSize;
2579  paddingSuballoc.size = paddingEnd;
2580  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2581  VmaSuballocationList::iterator next = request.freeSuballocationItem;
2582  ++next;
2583  const VmaSuballocationList::iterator paddingEndItem =
2584  m_Suballocations.insert(next, paddingSuballoc);
2585  RegisterFreeSuballocation(paddingEndItem);
2586  }
2587 
2588  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
2589  if(paddingBegin)
2590  {
2591  VmaSuballocation paddingSuballoc = {};
2592  paddingSuballoc.offset = request.offset - paddingBegin;
2593  paddingSuballoc.size = paddingBegin;
2594  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2595  const VmaSuballocationList::iterator paddingBeginItem =
2596  m_Suballocations.insert(request.freeSuballocationItem, paddingSuballoc);
2597  RegisterFreeSuballocation(paddingBeginItem);
2598  }
2599 
2600  // Update totals.
2601  m_FreeCount = m_FreeCount - 1;
2602  if(paddingBegin > 0)
2603  ++m_FreeCount;
2604  if(paddingEnd > 0)
2605  ++m_FreeCount;
2606  m_SumFreeSize -= allocSize;
2607 }
2608 
2609 void VmaAllocation::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
2610 {
2611  // Change this suballocation to be marked as free.
2612  VmaSuballocation& suballoc = *suballocItem;
2613  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2614 
2615  // Update totals.
2616  ++m_FreeCount;
2617  m_SumFreeSize += suballoc.size;
2618 
2619  // Merge with previous and/or next suballocation if it's also free.
2620  bool mergeWithNext = false;
2621  bool mergeWithPrev = false;
2622 
2623  VmaSuballocationList::iterator nextItem = suballocItem;
2624  ++nextItem;
2625  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
2626  mergeWithNext = true;
2627 
2628  VmaSuballocationList::iterator prevItem = suballocItem;
2629  if(suballocItem != m_Suballocations.begin())
2630  {
2631  --prevItem;
2632  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
2633  mergeWithPrev = true;
2634  }
2635 
2636  if(mergeWithNext)
2637  {
2638  UnregisterFreeSuballocation(nextItem);
2639  MergeFreeWithNext(suballocItem);
2640  }
2641 
2642  if(mergeWithPrev)
2643  {
2644  UnregisterFreeSuballocation(prevItem);
2645  MergeFreeWithNext(prevItem);
2646  RegisterFreeSuballocation(prevItem);
2647  }
2648  else
2649  RegisterFreeSuballocation(suballocItem);
2650 }
2651 
2652 void VmaAllocation::Free(const VkMappedMemoryRange* pMemory)
2653 {
2654  // If suballocation to free has offset smaller than half of allocation size, search forward.
2655  // Otherwise search backward.
2656  const bool forwardDirection = pMemory->offset < (m_Size / 2);
2657  if(forwardDirection)
2658  {
2659  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2660  suballocItem != m_Suballocations.end();
2661  ++suballocItem)
2662  {
2663  VmaSuballocation& suballoc = *suballocItem;
2664  if(suballoc.offset == pMemory->offset)
2665  {
2666  FreeSuballocation(suballocItem);
2667  VMA_HEAVY_ASSERT(Validate());
2668  return;
2669  }
2670  }
2671  VMA_ASSERT(0 && "Not found!");
2672  }
2673  else
2674  {
2675  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2676  suballocItem != m_Suballocations.end();
2677  ++suballocItem)
2678  {
2679  VmaSuballocation& suballoc = *suballocItem;
2680  if(suballoc.offset == pMemory->offset)
2681  {
2682  FreeSuballocation(suballocItem);
2683  VMA_HEAVY_ASSERT(Validate());
2684  return;
2685  }
2686  }
2687  VMA_ASSERT(0 && "Not found!");
2688  }
2689 }
2690 
2691 #if VMA_STATS_STRING_ENABLED
2692 
2693 void VmaAllocation::PrintDetailedMap(class VmaStringBuilder& sb) const
2694 {
2695  sb.Add("{\n\t\t\t\"Bytes\": ");
2696  sb.AddNumber(m_Size);
2697  sb.Add(",\n\t\t\t\"FreeBytes\": ");
2698  sb.AddNumber(m_SumFreeSize);
2699  sb.Add(",\n\t\t\t\"Suballocations\": ");
2700  sb.AddNumber(m_Suballocations.size());
2701  sb.Add(",\n\t\t\t\"FreeSuballocations\": ");
2702  sb.AddNumber(m_FreeCount);
2703  sb.Add(",\n\t\t\t\"SuballocationList\": [");
2704 
2705  size_t i = 0;
2706  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2707  suballocItem != m_Suballocations.cend();
2708  ++suballocItem, ++i)
2709  {
2710  if(i > 0)
2711  sb.Add(",\n\t\t\t\t{ \"Type\": ");
2712  else
2713  sb.Add("\n\t\t\t\t{ \"Type\": ");
2714  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
2715  sb.Add(", \"Size\": ");
2716  sb.AddNumber(suballocItem->size);
2717  sb.Add(", \"Offset\": ");
2718  sb.AddNumber(suballocItem->offset);
2719  sb.Add(" }");
2720  }
2721 
2722  sb.Add("\n\t\t\t]\n\t\t}");
2723 }
2724 
2725 #endif // #if VMA_STATS_STRING_ENABLED
2726 
2727 void VmaAllocation::MergeFreeWithNext(VmaSuballocationList::iterator item)
2728 {
2729  VMA_ASSERT(item != m_Suballocations.end());
2730  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2731 
2732  VmaSuballocationList::iterator nextItem = item;
2733  ++nextItem;
2734  VMA_ASSERT(nextItem != m_Suballocations.end());
2735  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
2736 
2737  item->size += nextItem->size;
2738  --m_FreeCount;
2739  m_Suballocations.erase(nextItem);
2740 }
2741 
2742 void VmaAllocation::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
2743 {
2744  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2745  VMA_ASSERT(item->size > 0);
2746 
2747  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2748  {
2749  if(m_FreeSuballocationsBySize.empty())
2750  m_FreeSuballocationsBySize.push_back(item);
2751  else
2752  {
2753  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2754  m_FreeSuballocationsBySize.data(),
2755  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2756  item,
2757  VmaSuballocationItemSizeLess());
2758  size_t index = it - m_FreeSuballocationsBySize.data();
2759  VectorInsert(m_FreeSuballocationsBySize, index, item);
2760  }
2761  }
2762 }
2763 
2764 void VmaAllocation::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
2765 {
2766  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2767  VMA_ASSERT(item->size > 0);
2768 
2769  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2770  {
2771  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2772  m_FreeSuballocationsBySize.data(),
2773  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2774  item,
2775  VmaSuballocationItemSizeLess());
2776  for(size_t index = it - m_FreeSuballocationsBySize.data();
2777  index < m_FreeSuballocationsBySize.size();
2778  ++index)
2779  {
2780  if(m_FreeSuballocationsBySize[index] == item)
2781  {
2782  VectorRemove(m_FreeSuballocationsBySize, index);
2783  return;
2784  }
2785  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
2786  }
2787  VMA_ASSERT(0 && "Not found.");
2788  }
2789 }
2790 
2791 static void InitStatInfo(VmaStatInfo& outInfo)
2792 {
2793  memset(&outInfo, 0, sizeof(outInfo));
2794  outInfo.SuballocationSizeMin = UINT64_MAX;
2795  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2796 }
2797 
2798 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaAllocation& alloc)
2799 {
2800  outInfo.AllocationCount = 1;
2801 
2802  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
2803  outInfo.SuballocationCount = rangeCount - alloc.m_FreeCount;
2804  outInfo.UnusedRangeCount = alloc.m_FreeCount;
2805 
2806  outInfo.UnusedBytes = alloc.m_SumFreeSize;
2807  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
2808 
2809  outInfo.SuballocationSizeMin = UINT64_MAX;
2810  outInfo.SuballocationSizeMax = 0;
2811  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2812  outInfo.UnusedRangeSizeMax = 0;
2813 
2814  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
2815  suballocItem != alloc.m_Suballocations.cend();
2816  ++suballocItem)
2817  {
2818  const VmaSuballocation& suballoc = *suballocItem;
2819  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
2820  {
2821  outInfo.SuballocationSizeMin = VMA_MIN(outInfo.SuballocationSizeMin, suballoc.size);
2822  outInfo.SuballocationSizeMax = VMA_MAX(outInfo.SuballocationSizeMax, suballoc.size);
2823  }
2824  else
2825  {
2826  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
2827  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
2828  }
2829  }
2830 }
2831 
2832 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
2833 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
2834 {
2835  inoutInfo.AllocationCount += srcInfo.AllocationCount;
2836  inoutInfo.SuballocationCount += srcInfo.SuballocationCount;
2837  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
2838  inoutInfo.UsedBytes += srcInfo.UsedBytes;
2839  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
2840  inoutInfo.SuballocationSizeMin = VMA_MIN(inoutInfo.SuballocationSizeMin, srcInfo.SuballocationSizeMin);
2841  inoutInfo.SuballocationSizeMax = VMA_MAX(inoutInfo.SuballocationSizeMax, srcInfo.SuballocationSizeMax);
2842  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
2843  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
2844 }
2845 
2846 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
2847 {
2848  inoutInfo.SuballocationSizeAvg = (inoutInfo.SuballocationCount > 0) ?
2849  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.SuballocationCount) : 0;
2850  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
2851  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
2852 }
2853 
2854 VmaAllocationVector::VmaAllocationVector(VmaAllocator hAllocator) :
2855  m_hAllocator(hAllocator),
2856  m_Allocations(VmaStlAllocator<VmaAllocation*>(hAllocator->GetAllocationCallbacks()))
2857 {
2858 }
2859 
2860 VmaAllocationVector::~VmaAllocationVector()
2861 {
2862  for(size_t i = m_Allocations.size(); i--; )
2863  {
2864  m_Allocations[i]->Destroy(m_hAllocator);
2865  vma_delete(m_hAllocator, m_Allocations[i]);
2866  }
2867 }
2868 
2869 size_t VmaAllocationVector::Free(const VkMappedMemoryRange* pMemory)
2870 {
2871  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2872  {
2873  VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2874  VMA_ASSERT(pAlloc);
2875  if(pAlloc->m_hMemory == pMemory->memory)
2876  {
2877  pAlloc->Free(pMemory);
2878  VMA_HEAVY_ASSERT(pAlloc->Validate());
2879  return allocIndex;
2880  }
2881  }
2882 
2883  return (size_t)-1;
2884 }
2885 
2886 void VmaAllocationVector::IncrementallySortAllocations()
2887 {
2888  // Bubble sort only until first swap.
2889  for(size_t i = 1; i < m_Allocations.size(); ++i)
2890  {
2891  if(m_Allocations[i - 1]->m_SumFreeSize > m_Allocations[i]->m_SumFreeSize)
2892  {
2893  VMA_SWAP(m_Allocations[i - 1], m_Allocations[i]);
2894  return;
2895  }
2896  }
2897 }
2898 
2899 #if VMA_STATS_STRING_ENABLED
2900 
2901 void VmaAllocationVector::PrintDetailedMap(class VmaStringBuilder& sb) const
2902 {
2903  for(size_t i = 0; i < m_Allocations.size(); ++i)
2904  {
2905  if(i > 0)
2906  sb.Add(",\n\t\t");
2907  else
2908  sb.Add("\n\t\t");
2909  m_Allocations[i]->PrintDetailedMap(sb);
2910  }
2911 }
2912 
2913 #endif // #if VMA_STATS_STRING_ENABLED
2914 
2915 void VmaAllocationVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const
2916 {
2917  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2918  {
2919  const VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2920  VMA_ASSERT(pAlloc);
2921  VMA_HEAVY_ASSERT(pAlloc->Validate());
2922  VmaStatInfo allocationStatInfo;
2923  CalcAllocationStatInfo(allocationStatInfo, *pAlloc);
2924  VmaAddStatInfo(pStats->total, allocationStatInfo);
2925  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
2926  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
2927  }
2928 }
2929 
2931 // VmaAllocator_T
2932 
2933 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
2934  m_PhysicalDevice(pCreateInfo->physicalDevice),
2935  m_hDevice(pCreateInfo->device),
2936  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
2937  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
2938  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
2939  m_PreferredLargeHeapBlockSize(0),
2940  m_PreferredSmallHeapBlockSize(0),
2941  m_BufferToMemoryMap(VmaStlAllocator< VmaPair<VkBuffer, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks)),
2942  m_ImageToMemoryMap(VmaStlAllocator< VmaPair<VkImage, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks))
2943 {
2944  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
2945 
2946  memset(&m_MemProps, 0, sizeof(m_MemProps));
2947  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
2948 
2949  memset(&m_pAllocations, 0, sizeof(m_pAllocations));
2950  memset(&m_HasEmptyAllocation, 0, sizeof(m_HasEmptyAllocation));
2951  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
2952 
2953  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
2954  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
2955  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
2956  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
2957 
2958  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
2959  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
2960 
2961  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
2962  {
2963  m_pAllocations[i] = vma_new(this, VmaAllocationVector)(this);
2964  m_pOwnAllocations[i] = vma_new(this, OwnAllocationVectorType)(VmaStlAllocator<VmaOwnAllocation>(GetAllocationCallbacks()));
2965  }
2966 }
2967 
2968 VmaAllocator_T::~VmaAllocator_T()
2969 {
2970  for(VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = m_ImageToMemoryMap.begin();
2971  it != m_ImageToMemoryMap.end();
2972  ++it)
2973  {
2974  vkDestroyImage(m_hDevice, it->first, GetAllocationCallbacks());
2975  }
2976 
2977  for(VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = m_BufferToMemoryMap.begin();
2978  it != m_BufferToMemoryMap.end();
2979  ++it)
2980  {
2981  vkDestroyBuffer(m_hDevice, it->first, GetAllocationCallbacks());
2982  }
2983 
2984  for(uint32_t typeIndex = 0; typeIndex < GetMemoryTypeCount(); ++typeIndex)
2985  {
2986  OwnAllocationVectorType* pOwnAllocations = m_pOwnAllocations[typeIndex];
2987  VMA_ASSERT(pOwnAllocations);
2988  for(size_t allocationIndex = 0; allocationIndex < pOwnAllocations->size(); ++allocationIndex)
2989  {
2990  const VmaOwnAllocation& ownAlloc = (*pOwnAllocations)[allocationIndex];
2991  vkFreeMemory(m_hDevice, ownAlloc.m_hMemory, GetAllocationCallbacks());
2992  }
2993  }
2994 
2995  for(size_t i = GetMemoryTypeCount(); i--; )
2996  {
2997  vma_delete(this, m_pAllocations[i]);
2998  vma_delete(this, m_pOwnAllocations[i]);
2999  }
3000 }
3001 
3002 VkDeviceSize VmaAllocator_T::GetPreferredBlockSize(uint32_t memTypeIndex) const
3003 {
3004  VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
3005  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
3006  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
3007 }
3008 
3009 VkResult VmaAllocator_T::AllocateMemoryOfType(
3010  const VkMemoryRequirements& vkMemReq,
3011  const VmaMemoryRequirements& vmaMemReq,
3012  uint32_t memTypeIndex,
3013  VmaSuballocationType suballocType,
3014  VkMappedMemoryRange* pMemory)
3015 {
3016  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
3017 
3018  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3019  pMemory->pNext = VMA_NULL;
3020  pMemory->size = vkMemReq.size;
3021 
3022  const VkDeviceSize preferredBlockSize = GetPreferredBlockSize(memTypeIndex);
3023  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
3024  const bool ownMemory =
3025  vmaMemReq.ownMemory ||
3026  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
3027  ((vmaMemReq.neverAllocate == false) && (vkMemReq.size > preferredBlockSize / 2));
3028 
3029  if(ownMemory)
3030  {
3031  if(vmaMemReq.neverAllocate)
3032  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3033  else
3034  return AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
3035  }
3036  else
3037  {
3038  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
3039  VmaAllocationVector* const allocationVector = m_pAllocations[memTypeIndex];
3040  VMA_ASSERT(allocationVector);
3041 
3042  // 1. Search existing allocations.
3043  // Forward order - prefer blocks with smallest amount of free space.
3044  for(size_t allocIndex = 0; allocIndex < allocationVector->m_Allocations.size(); ++allocIndex )
3045  {
3046  VmaAllocation* const pAlloc = allocationVector->m_Allocations[allocIndex];
3047  VMA_ASSERT(pAlloc);
3048  VmaAllocationRequest allocRequest = {};
3049  // Check if can allocate from pAlloc.
3050  if(pAlloc->CreateAllocationRequest(
3051  GetBufferImageGranularity(),
3052  vkMemReq.size,
3053  vkMemReq.alignment,
3054  suballocType,
3055  &allocRequest))
3056  {
3057  // We no longer have an empty Allocation.
3058  if(pAlloc->IsEmpty())
3059  m_HasEmptyAllocation[memTypeIndex] = false;
3060  // Allocate from this pAlloc.
3061  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
3062  // Return VkDeviceMemory and offset (size already filled above).
3063  pMemory->memory = pAlloc->m_hMemory;
3064  pMemory->offset = allocRequest.offset;
3065  VMA_HEAVY_ASSERT(pAlloc->Validate());
3066  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)allocIndex);
3067  return VK_SUCCESS;
3068  }
3069  }
3070 
3071  // 2. Create new Allocation.
3072  if(vmaMemReq.neverAllocate)
3073  {
3074  VMA_DEBUG_LOG(" FAILED due to VmaMemoryRequirements::neverAllocate");
3075  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3076  }
3077  else
3078  {
3079  // Start with full preferredBlockSize.
3080  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3081  allocInfo.memoryTypeIndex = memTypeIndex;
3082  allocInfo.allocationSize = preferredBlockSize;
3083  VkDeviceMemory mem = VK_NULL_HANDLE;
3084  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3085  if(res < 0)
3086  {
3087  // 3. Try half the size.
3088  allocInfo.allocationSize /= 2;
3089  if(allocInfo.allocationSize >= vkMemReq.size)
3090  {
3091  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3092  if(res < 0)
3093  {
3094  // 4. Try quarter the size.
3095  allocInfo.allocationSize /= 2;
3096  if(allocInfo.allocationSize >= vkMemReq.size)
3097  {
3098  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3099  }
3100  }
3101  }
3102  }
3103  if(res < 0)
3104  {
3105  // 5. Try OwnAlloc.
3106  res = AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
3107  if(res == VK_SUCCESS)
3108  {
3109  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
3110  VMA_DEBUG_LOG(" Allocated as OwnMemory");
3111  return VK_SUCCESS;
3112  }
3113  else
3114  {
3115  // Everything failed: Return error code.
3116  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3117  return res;
3118  }
3119  }
3120 
3121  // New VkDeviceMemory successfully created. Create new Allocation for it.
3122  VmaAllocation* const pAlloc = vma_new(this, VmaAllocation)(this);
3123  pAlloc->Init(mem, allocInfo.allocationSize);
3124 
3125  allocationVector->m_Allocations.push_back(pAlloc);
3126 
3127  // Allocate from pAlloc. Because it is empty, allocRequest can be trivially filled.
3128  VmaAllocationRequest allocRequest = {};
3129  allocRequest.freeSuballocationItem = pAlloc->m_Suballocations.begin();
3130  allocRequest.offset = 0;
3131  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
3132  pMemory->memory = mem;
3133  pMemory->offset = allocRequest.offset;
3134  VMA_HEAVY_ASSERT(pAlloc->Validate());
3135  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
3136  return VK_SUCCESS;
3137  }
3138  }
3139 }
3140 
3141 VkResult VmaAllocator_T::AllocateOwnMemory(
3142  VkDeviceSize size,
3143  VmaSuballocationType suballocType,
3144  uint32_t memTypeIndex,
3145  VkMappedMemoryRange* pMemory)
3146 {
3147  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3148  allocInfo.memoryTypeIndex = memTypeIndex;
3149  allocInfo.allocationSize = size;
3150 
3151  // Allocate VkDeviceMemory.
3152  VmaOwnAllocation ownAlloc = {};
3153  ownAlloc.m_Size = size;
3154  ownAlloc.m_Type = suballocType;
3155  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &ownAlloc.m_hMemory);
3156  if(res < 0)
3157  {
3158  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3159  return res;
3160  }
3161 
3162  // Register it in m_pOwnAllocations.
3163  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3164  OwnAllocationVectorType* ownAllocations = m_pOwnAllocations[memTypeIndex];
3165  VMA_ASSERT(ownAllocations);
3166  VmaOwnAllocation* const pOwnAllocationsBeg = ownAllocations->data();
3167  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + ownAllocations->size();
3168  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3169  pOwnAllocationsBeg,
3170  pOwnAllocationsEnd,
3171  ownAlloc,
3172  VmaOwnAllocationMemoryHandleLess()) - pOwnAllocationsBeg;
3173  VectorInsert(*ownAllocations, indexToInsert, ownAlloc);
3174 
3175  // Return parameters of the allocation.
3176  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3177  pMemory->pNext = VMA_NULL;
3178  pMemory->memory = ownAlloc.m_hMemory;
3179  pMemory->offset = 0;
3180  pMemory->size = size;
3181 
3182  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
3183 
3184  return VK_SUCCESS;
3185 }
3186 
3187 VkResult VmaAllocator_T::AllocateMemory(
3188  const VkMemoryRequirements& vkMemReq,
3189  const VmaMemoryRequirements& vmaMemReq,
3190  VmaSuballocationType suballocType,
3191  VkMappedMemoryRange* pMemory,
3192  uint32_t* pMemoryTypeIndex)
3193 {
3194  if(vmaMemReq.ownMemory && vmaMemReq.neverAllocate)
3195  {
3196  VMA_ASSERT(0 && "Specifying VmaMemoryRequirements::ownMemory && VmaMemoryRequirements::neverAllocate makes no sense.");
3197  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3198  }
3199 
3200  // Bit mask of memory Vulkan types acceptable for this allocation.
3201  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
3202  uint32_t memTypeIndex = UINT32_MAX;
3203  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3204  if(res == VK_SUCCESS)
3205  {
3206  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3207  // Succeeded on first try.
3208  if(res == VK_SUCCESS)
3209  {
3210  if(pMemoryTypeIndex != VMA_NULL)
3211  *pMemoryTypeIndex = memTypeIndex;
3212  return res;
3213  }
3214  // Allocation from this memory type failed. Try other compatible memory types.
3215  else
3216  {
3217  for(;;)
3218  {
3219  // Remove old memTypeIndex from list of possibilities.
3220  memoryTypeBits &= ~(1u << memTypeIndex);
3221  // Find alternative memTypeIndex.
3222  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3223  if(res == VK_SUCCESS)
3224  {
3225  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3226  // Allocation from this alternative memory type succeeded.
3227  if(res == VK_SUCCESS)
3228  {
3229  if(pMemoryTypeIndex != VMA_NULL)
3230  *pMemoryTypeIndex = memTypeIndex;
3231  return res;
3232  }
3233  // else: Allocation from this memory type failed. Try next one - next loop iteration.
3234  }
3235  // No other matching memory type index could be found.
3236  else
3237  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
3238  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3239  }
3240  }
3241  }
3242  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
3243  else
3244  return res;
3245 }
3246 
3247 void VmaAllocator_T::FreeMemory(const VkMappedMemoryRange* pMemory)
3248 {
3249  uint32_t memTypeIndex = 0;
3250  bool found = false;
3251  VmaAllocation* allocationToDelete = VMA_NULL;
3252  // Check all memory types because we don't know which one does pMemory come from.
3253  for(; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3254  {
3255  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
3256  VmaAllocationVector* const pAllocationVector = m_pAllocations[memTypeIndex];
3257  VMA_ASSERT(pAllocationVector);
3258  // Try to free pMemory from pAllocationVector.
3259  const size_t allocIndex = pAllocationVector->Free(pMemory);
3260  if(allocIndex != (size_t)-1)
3261  {
3262  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
3263  found = true;
3264  VmaAllocation* const pAlloc = pAllocationVector->m_Allocations[allocIndex];
3265  VMA_ASSERT(pAlloc);
3266  // pAlloc became empty after this deallocation.
3267  if(pAlloc->IsEmpty())
3268  {
3269  // Already has empty Allocation. We don't want to have two, so delete this one.
3270  if(m_HasEmptyAllocation[memTypeIndex])
3271  {
3272  allocationToDelete = pAlloc;
3273  VectorRemove(pAllocationVector->m_Allocations, allocIndex);
3274  break;
3275  }
3276  // We now have first empty Allocation.
3277  else
3278  m_HasEmptyAllocation[memTypeIndex] = true;
3279  }
3280  // Must be called after allocIndex is used, because later it may become invalid!
3281  pAllocationVector->IncrementallySortAllocations();
3282  break;
3283  }
3284  }
3285  if(found)
3286  {
3287  // Destruction of a free Allocation. Deferred until this point, outside of mutex
3288  // lock, for performance reason.
3289  if(allocationToDelete != VMA_NULL)
3290  {
3291  VMA_DEBUG_LOG(" Deleted empty allocation");
3292  allocationToDelete->Destroy(this);
3293  vma_delete(this, allocationToDelete);
3294  }
3295  return;
3296  }
3297 
3298  // pMemory not found in allocations. Try free it as Own Memory.
3299  if(FreeOwnMemory(pMemory))
3300  return;
3301 
3302  // pMemory not found as Own Memory either.
3303  VMA_ASSERT(0 && "Not found. Trying to free memory not allocated using this allocator (or some other bug).");
3304 }
3305 
3306 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
3307 {
3308  InitStatInfo(pStats->total);
3309  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
3310  InitStatInfo(pStats->memoryType[i]);
3311  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
3312  InitStatInfo(pStats->memoryHeap[i]);
3313 
3314  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3315  {
3316  VmaMutexLock allocationsLock(m_AllocationsMutex[memTypeIndex]);
3317  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3318  const VmaAllocationVector* const allocVector = m_pAllocations[memTypeIndex];
3319  VMA_ASSERT(allocVector);
3320  allocVector->AddStats(pStats, memTypeIndex, heapIndex);
3321  }
3322 
3323  VmaPostprocessCalcStatInfo(pStats->total);
3324  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
3325  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
3326  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
3327  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
3328 }
3329 
3330 bool VmaAllocator_T::FreeOwnMemory(const VkMappedMemoryRange* pMemory)
3331 {
3332  VkDeviceMemory vkMemory = VK_NULL_HANDLE;
3333 
3334  // Check all memory types because we don't know which one does pMemory come from.
3335  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3336  {
3337  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3338  OwnAllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex];
3339  VMA_ASSERT(pOwnAllocations);
3340  VmaOwnAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
3341  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
3342  VmaOwnAllocation* const pOwnAllocationIt = VmaBinaryFindFirstNotLess(
3343  pOwnAllocationsBeg,
3344  pOwnAllocationsEnd,
3345  pMemory->memory,
3346  VmaOwnAllocationMemoryHandleLess());
3347  if((pOwnAllocationIt != pOwnAllocationsEnd) &&
3348  (pOwnAllocationIt->m_hMemory == pMemory->memory))
3349  {
3350  VMA_ASSERT(pMemory->size == pOwnAllocationIt->m_Size && pMemory->offset == 0);
3351  vkMemory = pOwnAllocationIt->m_hMemory;
3352  const size_t ownAllocationIndex = pOwnAllocationIt - pOwnAllocationsBeg;
3353  VectorRemove(*pOwnAllocations, ownAllocationIndex);
3354  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
3355  break;
3356  }
3357  }
3358 
3359  // Found. Free VkDeviceMemory deferred until this point, outside of mutex lock,
3360  // for performance reason.
3361  if(vkMemory != VK_NULL_HANDLE)
3362  {
3363  vkFreeMemory(m_hDevice, vkMemory, GetAllocationCallbacks());
3364  return true;
3365  }
3366  else
3367  return false;
3368 }
3369 
3370 #if VMA_STATS_STRING_ENABLED
3371 
3372 void VmaAllocator_T::PrintDetailedMap(VmaStringBuilder& sb)
3373 {
3374  bool ownAllocationsStarted = false;
3375  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3376  {
3377  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex]);
3378  OwnAllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex];
3379  VMA_ASSERT(pOwnAllocVector);
3380  if(pOwnAllocVector->empty() == false)
3381  {
3382  if(ownAllocationsStarted)
3383  sb.Add(",\n\t\"Type ");
3384  else
3385  {
3386  sb.Add(",\n\"OwnAllocations\": {\n\t\"Type ");
3387  ownAllocationsStarted = true;
3388  }
3389  sb.AddNumber(memTypeIndex);
3390  sb.Add("\": [");
3391 
3392  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
3393  {
3394  const VmaOwnAllocation& ownAlloc = (*pOwnAllocVector)[i];
3395  if(i > 0)
3396  sb.Add(",\n\t\t{ \"Size\": ");
3397  else
3398  sb.Add("\n\t\t{ \"Size\": ");
3399  sb.AddNumber(ownAlloc.m_Size);
3400  sb.Add(", \"Type\": ");
3401  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[ownAlloc.m_Type]);
3402  sb.Add(" }");
3403  }
3404 
3405  sb.Add("\n\t]");
3406  }
3407  }
3408  if(ownAllocationsStarted)
3409  sb.Add("\n}");
3410 
3411  {
3412  bool allocationsStarted = false;
3413  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3414  {
3415  VmaMutexLock globalAllocationsLock(m_AllocationsMutex[memTypeIndex]);
3416  if(m_pAllocations[memTypeIndex]->IsEmpty() == false)
3417  {
3418  if(allocationsStarted)
3419  sb.Add(",\n\t\"Type ");
3420  else
3421  {
3422  sb.Add(",\n\"Allocations\": {\n\t\"Type ");
3423  allocationsStarted = true;
3424  }
3425  sb.AddNumber(memTypeIndex);
3426  sb.Add("\": [");
3427 
3428  m_pAllocations[memTypeIndex]->PrintDetailedMap(sb);
3429 
3430  sb.Add("\n\t]");
3431  }
3432  }
3433  if(allocationsStarted)
3434  sb.Add("\n}");
3435  }
3436 }
3437 
3438 #endif // #if VMA_STATS_STRING_ENABLED
3439 
3440 static VkResult AllocateMemoryForImage(
3441  VmaAllocator allocator,
3442  VkImage image,
3443  const VmaMemoryRequirements* pMemoryRequirements,
3444  VmaSuballocationType suballocType,
3445  VkMappedMemoryRange* pMemory,
3446  uint32_t* pMemoryTypeIndex)
3447 {
3448  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3449 
3450  VkMemoryRequirements vkMemReq = {};
3451  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
3452 
3453  return allocator->AllocateMemory(
3454  vkMemReq,
3455  *pMemoryRequirements,
3456  suballocType,
3457  pMemory,
3458  pMemoryTypeIndex);
3459 }
3460 
3462 // Public interface
3463 
3464 VkResult vmaCreateAllocator(
3465  const VmaAllocatorCreateInfo* pCreateInfo,
3466  VmaAllocator* pAllocator)
3467 {
3468  VMA_ASSERT(pCreateInfo && pAllocator);
3469  VMA_DEBUG_LOG("vmaCreateAllocator");
3470  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
3471  return VK_SUCCESS;
3472 }
3473 
3474 void vmaDestroyAllocator(
3475  VmaAllocator allocator)
3476 {
3477  if(allocator != VK_NULL_HANDLE)
3478  {
3479  VMA_DEBUG_LOG("vmaDestroyAllocator");
3480  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
3481  vma_delete(&allocationCallbacks, allocator);
3482  }
3483 }
3484 
3486  VmaAllocator allocator,
3487  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
3488 {
3489  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
3490  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
3491 }
3492 
3494  VmaAllocator allocator,
3495  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
3496 {
3497  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
3498  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
3499 }
3500 
3502  VmaAllocator allocator,
3503  uint32_t memoryTypeIndex,
3504  VkMemoryPropertyFlags* pFlags)
3505 {
3506  VMA_ASSERT(allocator && pFlags);
3507  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
3508  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
3509 }
3510 
3511 void vmaCalculateStats(
3512  VmaAllocator allocator,
3513  VmaStats* pStats)
3514 {
3515  VMA_ASSERT(allocator && pStats);
3516  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3517  allocator->CalculateStats(pStats);
3518 }
3519 
3520 #if VMA_STATS_STRING_ENABLED
3521 
3522 void vmaBuildStatsString(
3523  VmaAllocator allocator,
3524  char** ppStatsString,
3525  VkBool32 detailedMap)
3526 {
3527  VMA_ASSERT(allocator && ppStatsString);
3528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3529 
3530  VmaStringBuilder sb(allocator);
3531  {
3532  VmaStats stats;
3533  allocator->CalculateStats(&stats);
3534 
3535  sb.Add("{\n\"Total\": ");
3536  VmaPrintStatInfo(sb, stats.total);
3537 
3538  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
3539  {
3540  sb.Add(",\n\"Heap ");
3541  sb.AddNumber(heapIndex);
3542  sb.Add("\": {\n\t\"Size\": ");
3543  sb.AddNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
3544  sb.Add(",\n\t\"Flags\": ");
3545  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
3546  sb.AddString("DEVICE_LOCAL");
3547  else
3548  sb.AddString("");
3549  if(stats.memoryHeap[heapIndex].AllocationCount > 0)
3550  {
3551  sb.Add(",\n\t\"Stats:\": ");
3552  VmaPrintStatInfo(sb, stats.memoryHeap[heapIndex]);
3553  }
3554 
3555  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
3556  {
3557  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
3558  {
3559  sb.Add(",\n\t\"Type ");
3560  sb.AddNumber(typeIndex);
3561  sb.Add("\": {\n\t\t\"Flags\": \"");
3562  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
3563  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
3564  sb.Add(" DEVICE_LOCAL");
3565  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
3566  sb.Add(" HOST_VISIBLE");
3567  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
3568  sb.Add(" HOST_COHERENT");
3569  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
3570  sb.Add(" HOST_CACHED");
3571  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
3572  sb.Add(" LAZILY_ALLOCATED");
3573  sb.Add("\"");
3574  if(stats.memoryType[typeIndex].AllocationCount > 0)
3575  {
3576  sb.Add(",\n\t\t\"Stats\": ");
3577  VmaPrintStatInfo(sb, stats.memoryType[typeIndex]);
3578  }
3579  sb.Add("\n\t}");
3580  }
3581  }
3582  sb.Add("\n}");
3583  }
3584  if(detailedMap == VK_TRUE)
3585  allocator->PrintDetailedMap(sb);
3586  sb.Add("\n}\n");
3587  }
3588 
3589  const size_t len = sb.GetLength();
3590  char* const pChars = vma_new_array(allocator, char, len + 1);
3591  if(len > 0)
3592  memcpy(pChars, sb.GetData(), len);
3593  pChars[len] = '\0';
3594  *ppStatsString = pChars;
3595 }
3596 
3597 void vmaFreeStatsString(
3598  VmaAllocator allocator,
3599  char* pStatsString)
3600 {
3601  if(pStatsString != VMA_NULL)
3602  {
3603  VMA_ASSERT(allocator);
3604  size_t len = strlen(pStatsString);
3605  vma_delete_array(allocator, pStatsString, len + 1);
3606  }
3607 }
3608 
3609 #endif // #if VMA_STATS_STRING_ENABLED
3610 
3613 VkResult vmaFindMemoryTypeIndex(
3614  VmaAllocator allocator,
3615  uint32_t memoryTypeBits,
3616  const VmaMemoryRequirements* pMemoryRequirements,
3617  uint32_t* pMemoryTypeIndex)
3618 {
3619  VMA_ASSERT(allocator != VK_NULL_HANDLE);
3620  VMA_ASSERT(pMemoryRequirements != VMA_NULL);
3621  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
3622 
3623  uint32_t requiredFlags = pMemoryRequirements->requiredFlags;
3624  uint32_t preferredFlags = pMemoryRequirements->preferredFlags;
3625  if(preferredFlags == 0)
3626  preferredFlags = requiredFlags;
3627  // preferredFlags, if not 0, must be subset of requiredFlags.
3628  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
3629 
3630  // Convert usage to requiredFlags and preferredFlags.
3631  switch(pMemoryRequirements->usage)
3632  {
3634  break;
3636  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3637  break;
3639  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3640  break;
3642  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3643  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3644  break;
3646  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3647  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3648  break;
3649  default:
3650  break;
3651  }
3652 
3653  *pMemoryTypeIndex = UINT32_MAX;
3654  uint32_t minCost = UINT32_MAX;
3655  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
3656  memTypeIndex < allocator->GetMemoryTypeCount();
3657  ++memTypeIndex, memTypeBit <<= 1)
3658  {
3659  // This memory type is acceptable according to memoryTypeBits bitmask.
3660  if((memTypeBit & memoryTypeBits) != 0)
3661  {
3662  const VkMemoryPropertyFlags currFlags =
3663  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
3664  // This memory type contains requiredFlags.
3665  if((requiredFlags & ~currFlags) == 0)
3666  {
3667  // Calculate cost as number of bits from preferredFlags not present in this memory type.
3668  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
3669  // Remember memory type with lowest cost.
3670  if(currCost < minCost)
3671  {
3672  *pMemoryTypeIndex = memTypeIndex;
3673  if(currCost == 0)
3674  return VK_SUCCESS;
3675  minCost = currCost;
3676  }
3677  }
3678  }
3679  }
3680  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
3681 }
3682 
3683 VkResult vmaAllocateMemory(
3684  VmaAllocator allocator,
3685  const VkMemoryRequirements* pVkMemoryRequirements,
3686  const VmaMemoryRequirements* pVmaMemoryRequirements,
3687  VkMappedMemoryRange* pMemory,
3688  uint32_t* pMemoryTypeIndex)
3689 {
3690  VMA_ASSERT(allocator && pVkMemoryRequirements && pVmaMemoryRequirements && pMemory);
3691 
3692  VMA_DEBUG_LOG("vmaAllocateMemory");
3693 
3694  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3695 
3696  return allocator->AllocateMemory(
3697  *pVkMemoryRequirements,
3698  *pVmaMemoryRequirements,
3699  VMA_SUBALLOCATION_TYPE_UNKNOWN,
3700  pMemory,
3701  pMemoryTypeIndex);
3702 }
3703 
3705  VmaAllocator allocator,
3706  VkBuffer buffer,
3707  const VmaMemoryRequirements* pMemoryRequirements,
3708  VkMappedMemoryRange* pMemory,
3709  uint32_t* pMemoryTypeIndex)
3710 {
3711  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3712 
3713  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
3714 
3715  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3716 
3717  VkMemoryRequirements vkMemReq = {};
3718  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
3719 
3720  return allocator->AllocateMemory(
3721  vkMemReq,
3722  *pMemoryRequirements,
3723  VMA_SUBALLOCATION_TYPE_BUFFER,
3724  pMemory,
3725  pMemoryTypeIndex);
3726 }
3727 
3728 VkResult vmaAllocateMemoryForImage(
3729  VmaAllocator allocator,
3730  VkImage image,
3731  const VmaMemoryRequirements* pMemoryRequirements,
3732  VkMappedMemoryRange* pMemory,
3733  uint32_t* pMemoryTypeIndex)
3734 {
3735  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements);
3736 
3737  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
3738 
3739  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3740 
3741  return AllocateMemoryForImage(
3742  allocator,
3743  image,
3744  pMemoryRequirements,
3745  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
3746  pMemory,
3747  pMemoryTypeIndex);
3748 }
3749 
3750 void vmaFreeMemory(
3751  VmaAllocator allocator,
3752  const VkMappedMemoryRange* pMemory)
3753 {
3754  VMA_ASSERT(allocator && pMemory);
3755 
3756  VMA_DEBUG_LOG("vmaFreeMemory");
3757 
3758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3759 
3760  allocator->FreeMemory(pMemory);
3761 }
3762 
3763 VkResult vmaMapMemory(
3764  VmaAllocator allocator,
3765  const VkMappedMemoryRange* pMemory,
3766  void** ppData)
3767 {
3768  VMA_ASSERT(allocator && pMemory && ppData);
3769 
3770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3771 
3772  return vkMapMemory(allocator->m_hDevice, pMemory->memory,
3773  pMemory->offset, pMemory->size, 0, ppData);
3774 }
3775 
3776 void vmaUnmapMemory(
3777  VmaAllocator allocator,
3778  const VkMappedMemoryRange* pMemory)
3779 {
3780  VMA_ASSERT(allocator && pMemory);
3781 
3782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3783 
3784  vkUnmapMemory(allocator->m_hDevice, pMemory->memory);
3785 }
3786 
3787 VkResult vmaCreateBuffer(
3788  VmaAllocator allocator,
3789  const VkBufferCreateInfo* pCreateInfo,
3790  const VmaMemoryRequirements* pMemoryRequirements,
3791  VkBuffer* pBuffer,
3792  VkMappedMemoryRange* pMemory,
3793  uint32_t* pMemoryTypeIndex)
3794 {
3795  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3796 
3797  VMA_DEBUG_LOG("vmaCreateBuffer");
3798 
3799  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3800 
3801  // 1. Create VkBuffer.
3802  VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
3803  if(res >= 0)
3804  {
3805  VkMappedMemoryRange mem = {};
3806 
3807  // 2. vkGetBufferMemoryRequirements.
3808  VkMemoryRequirements vkMemReq = {};
3809  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
3810 
3811  // 3. Allocate memory using allocator.
3812  res = allocator->AllocateMemory(
3813  vkMemReq,
3814  *pMemoryRequirements,
3815  VMA_SUBALLOCATION_TYPE_BUFFER,
3816  &mem,
3817  pMemoryTypeIndex);
3818  if(res >= 0)
3819  {
3820  if(pMemory != VMA_NULL)
3821  {
3822  *pMemory = mem;
3823  }
3824  // 3. Bind buffer with memory.
3825  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, mem.memory, mem.offset);
3826  if(res >= 0)
3827  {
3828  // All steps succeeded.
3829  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3830  allocator->m_BufferToMemoryMap.insert(VmaPair<VkBuffer, VkMappedMemoryRange>(*pBuffer, mem));
3831  return VK_SUCCESS;
3832  }
3833  allocator->FreeMemory(&mem);
3834  return res;
3835  }
3836  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
3837  return res;
3838  }
3839  return res;
3840 }
3841 
3842 void vmaDestroyBuffer(
3843  VmaAllocator allocator,
3844  VkBuffer buffer)
3845 {
3846  if(buffer != VK_NULL_HANDLE)
3847  {
3848  VMA_ASSERT(allocator);
3849 
3850  VMA_DEBUG_LOG("vmaDestroyBuffer");
3851 
3852  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3853 
3854  VkMappedMemoryRange mem = {};
3855  {
3856  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3857  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = allocator->m_BufferToMemoryMap.find(buffer);
3858  if(it == allocator->m_BufferToMemoryMap.end())
3859  {
3860  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3861  return;
3862  }
3863  mem = it->second;
3864  allocator->m_BufferToMemoryMap.erase(it);
3865  }
3866 
3867  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
3868 
3869  allocator->FreeMemory(&mem);
3870  }
3871 }
3872 
3873 VkResult vmaCreateImage(
3874  VmaAllocator allocator,
3875  const VkImageCreateInfo* pCreateInfo,
3876  const VmaMemoryRequirements* pMemoryRequirements,
3877  VkImage* pImage,
3878  VkMappedMemoryRange* pMemory,
3879  uint32_t* pMemoryTypeIndex)
3880 {
3881  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3882 
3883  VMA_DEBUG_LOG("vmaCreateImage");
3884 
3885  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3886 
3887  // 1. Create VkImage.
3888  VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage);
3889  if(res >= 0)
3890  {
3891  VkMappedMemoryRange mem = {};
3892  VmaSuballocationType suballocType = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
3893  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
3894  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
3895 
3896  // 2. Allocate memory using allocator.
3897  res = AllocateMemoryForImage(allocator, *pImage, pMemoryRequirements, suballocType, &mem, pMemoryTypeIndex);
3898  if(res >= 0)
3899  {
3900  if(pMemory != VMA_NULL)
3901  *pMemory = mem;
3902  // 3. Bind image with memory.
3903  res = vkBindImageMemory(allocator->m_hDevice, *pImage, mem.memory, mem.offset);
3904  if(res >= 0)
3905  {
3906  // All steps succeeded.
3907  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3908  allocator->m_ImageToMemoryMap.insert(VmaPair<VkImage, VkMappedMemoryRange>(*pImage, mem));
3909  return VK_SUCCESS;
3910  }
3911  allocator->FreeMemory(&mem);
3912  return res;
3913  }
3914  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
3915  return res;
3916  }
3917  return res;
3918 }
3919 
3920 void vmaDestroyImage(
3921  VmaAllocator allocator,
3922  VkImage image)
3923 {
3924  if(image != VK_NULL_HANDLE)
3925  {
3926  VMA_ASSERT(allocator);
3927 
3928  VMA_DEBUG_LOG("vmaDestroyImage");
3929 
3930  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3931 
3932  VkMappedMemoryRange mem = {};
3933  {
3934  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3935  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = allocator->m_ImageToMemoryMap.find(image);
3936  if(it == allocator->m_ImageToMemoryMap.end())
3937  {
3938  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3939  return;
3940  }
3941  mem = it->second;
3942  allocator->m_ImageToMemoryMap.erase(it);
3943  }
3944 
3945  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
3946 
3947  allocator->FreeMemory(&mem);
3948  }
3949 }
3950 
3951 #endif // #ifdef VMA_IMPLEMENTATION
3952 
3953 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
struct VmaMemoryRequirements VmaMemoryRequirements
void vmaUnmapMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory)
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:165
VkResult vmaMapMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory, void **ppData)
Memory will be used for writing on device and readback on host.
Definition: vk_mem_alloc.h:276
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:295
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaMemoryRequirements *pMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
Function similar to vmaAllocateMemoryForBuffer().
const VkAllocationCallbacks * pAllocationCallbacks
Custom allocation callbacks.
Definition: vk_mem_alloc.h:177
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:161
VkDeviceSize preferredSmallHeapBlockSize
Size of a single memory block to allocate for resources from a small heap <= 512 MB.
Definition: vk_mem_alloc.h:174
VmaStatInfo total
Definition: vk_mem_alloc.h:232
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:168
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaMemoryRequirements *pVmaMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
General purpose memory allocation.
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer)
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkImage *pImage, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
Function similar to vmaCreateBuffer().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:228
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:304
VmaMemoryUsage
Definition: vk_mem_alloc.h:265
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkBuffer *pBuffer, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
Definition: vk_mem_alloc.h:216
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:299
Definition: vk_mem_alloc.h:280
VkBool32 neverAllocate
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:311
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:224
VkDeviceSize SuballocationSizeMax
Definition: vk_mem_alloc.h:223
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VkBool32 ownMemory
Set to true if this allocation should have its own memory block.
Definition: vk_mem_alloc.h:290
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:230
void vmaDestroyImage(VmaAllocator allocator, VkImage image)
uint32_t AllocationCount
Definition: vk_mem_alloc.h:218
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Definition: vk_mem_alloc.h:221
VkDeviceSize preferredLargeHeapBlockSize
Size of a single memory block to allocate for resources.
Definition: vk_mem_alloc.h:171
uint32_t UnusedRangeCount
Definition: vk_mem_alloc.h:220
Memory will be mapped on host. Could be used for transfer to device.
Definition: vk_mem_alloc.h:272
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
uint32_t SuballocationCount
Definition: vk_mem_alloc.h:219
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:224
VkDeviceSize SuballocationSizeMin
Definition: vk_mem_alloc.h:223
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaMemoryRequirements *pMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
VkDeviceSize SuballocationSizeAvg
Definition: vk_mem_alloc.h:223
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:268
Definition: vk_mem_alloc.h:277
Memory will be used for frequent (dynamic) updates from host and reads on device. ...
Definition: vk_mem_alloc.h:274
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Memory will be used on device only, no need to be mapped on host.
Definition: vk_mem_alloc.h:270
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Definition: vk_mem_alloc.h:222
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:231
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaMemoryRequirements *pMemoryRequirements, uint32_t *pMemoryTypeIndex)
void vmaFreeMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory)
Frees memory previously allocated using vmaAllocateMemoryForBuffer() or vmaAllocateMemoryForImage().
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:224