Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
151 #include <vulkan/vulkan.h>
152 
154 
158 VK_DEFINE_HANDLE(VmaAllocator)
159 
160 typedef struct VmaAllocatorCreateInfo
162 {
164 
165  VkPhysicalDevice physicalDevice;
167 
168  VkDevice device;
170 
173 
176 
177  const VkAllocationCallbacks* pAllocationCallbacks;
179 
181 VkResult vmaCreateAllocator(
182  const VmaAllocatorCreateInfo* pCreateInfo,
183  VmaAllocator* pAllocator);
184 
187  VmaAllocator allocator);
188 
194  VmaAllocator allocator,
195  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
196 
202  VmaAllocator allocator,
203  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
204 
212  VmaAllocator allocator,
213  uint32_t memoryTypeIndex,
214  VkMemoryPropertyFlags* pFlags);
215 
216 typedef struct VmaStatInfo
217 {
218  uint32_t AllocationCount;
221  VkDeviceSize UsedBytes;
222  VkDeviceSize UnusedBytes;
223  VkDeviceSize SuballocationSizeMin, SuballocationSizeAvg, SuballocationSizeMax;
224  VkDeviceSize UnusedRangeSizeMin, UnusedRangeSizeAvg, UnusedRangeSizeMax;
225 } VmaStatInfo;
226 
228 struct VmaStats
229 {
230  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
231  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
233 };
234 
236 void vmaCalculateStats(
237  VmaAllocator allocator,
238  VmaStats* pStats);
239 
240 #ifndef VMA_STATS_STRING_ENABLED
241  #define VMA_STATS_STRING_ENABLED 1
242 #endif
243 
244 #if VMA_STATS_STRING_ENABLED
245 
247 
250  VmaAllocator allocator,
251  char** ppStatsString,
252  VkBool32 detailedMap);
253 
254 void vmaFreeStatsString(
255  VmaAllocator allocator,
256  char* pStatsString);
257 
258 #endif // #if VMA_STATS_STRING_ENABLED
259 
262 
267 typedef enum VmaMemoryUsage
268 {
281 
282 typedef struct VmaMemoryRequirements
283 {
292  VkBool32 ownMemory;
301  VkMemoryPropertyFlags requiredFlags;
306  VkMemoryPropertyFlags preferredFlags;
313  VkBool32 neverAllocate;
315 
330 VkResult vmaFindMemoryTypeIndex(
331  VmaAllocator allocator,
332  uint32_t memoryTypeBits,
333  const VmaMemoryRequirements* pMemoryRequirements,
334  uint32_t* pMemoryTypeIndex);
335 
338 
355 VkResult vmaAllocateMemory(
356  VmaAllocator allocator,
357  const VkMemoryRequirements* pVkMemoryRequirements,
358  const VmaMemoryRequirements* pVmaMemoryRequirements,
359  VkMappedMemoryRange* pMemory,
360  uint32_t* pMemoryTypeIndex);
361 
370  VmaAllocator allocator,
371  VkBuffer buffer,
372  const VmaMemoryRequirements* pMemoryRequirements,
373  VkMappedMemoryRange* pMemory,
374  uint32_t* pMemoryTypeIndex);
375 
378  VmaAllocator allocator,
379  VkImage image,
380  const VmaMemoryRequirements* pMemoryRequirements,
381  VkMappedMemoryRange* pMemory,
382  uint32_t* pMemoryTypeIndex);
383 
385 void vmaFreeMemory(
386  VmaAllocator allocator,
387  const VkMappedMemoryRange* pMemory);
388 
394 VkResult vmaMapMemory(
395  VmaAllocator allocator,
396  const VkMappedMemoryRange* pMemory,
397  void** ppData);
398 
399 void vmaUnmapMemory(
400  VmaAllocator allocator,
401  const VkMappedMemoryRange* pMemory);
402 
405 
427 VkResult vmaCreateBuffer(
428  VmaAllocator allocator,
429  const VkBufferCreateInfo* pCreateInfo,
430  const VmaMemoryRequirements* pMemoryRequirements,
431  VkBuffer* pBuffer,
432  VkMappedMemoryRange* pMemory,
433  uint32_t* pMemoryTypeIndex);
434 
435 void vmaDestroyBuffer(
436  VmaAllocator allocator,
437  VkBuffer buffer);
438 
440 VkResult vmaCreateImage(
441  VmaAllocator allocator,
442  const VkImageCreateInfo* pCreateInfo,
443  const VmaMemoryRequirements* pMemoryRequirements,
444  VkImage* pImage,
445  VkMappedMemoryRange* pMemory,
446  uint32_t* pMemoryTypeIndex);
447 
448 void vmaDestroyImage(
449  VmaAllocator allocator,
450  VkImage image);
451 
454 #ifdef VMA_IMPLEMENTATION
455 
456 #include <cstdint>
457 #include <cstdlib>
458 
459 /*******************************************************************************
460 CONFIGURATION SECTION
461 
462 Define some of these macros before each #include of this header or change them
463 here if you need other then default behavior depending on your environment.
464 */
465 
466 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
467 //#define VMA_USE_STL_CONTAINERS 1
468 
469 /* Set this macro to 1 to make the library including and using STL containers:
470 std::pair, std::vector, std::list, std::unordered_map.
471 
472 Set it to 0 or undefined to make the library using its own implementation of
473 the containers.
474 */
475 #if VMA_USE_STL_CONTAINERS
476  #define VMA_USE_STL_VECTOR 1
477  #define VMA_USE_STL_UNORDERED_MAP 1
478  #define VMA_USE_STL_LIST 1
479 #endif
480 
481 #if VMA_USE_STL_VECTOR
482  #include <vector>
483 #endif
484 
485 #if VMA_USE_STL_UNORDERED_MAP
486  #include <unordered_map>
487 #endif
488 
489 #if VMA_USE_STL_LIST
490  #include <list>
491 #endif
492 
493 /*
494 Following headers are used in this CONFIGURATION section only, so feel free to
495 remove them if not needed.
496 */
497 #include <cassert> // for assert
498 #include <algorithm> // for min, max
499 #include <mutex> // for std::mutex
500 
501 #if !defined(_WIN32)
502  #include <malloc.h> // for aligned_alloc()
503 #endif
504 
505 // Normal assert to check for programmer's errors, especially in Debug configuration.
506 #ifndef VMA_ASSERT
507  #ifdef _DEBUG
508  #define VMA_ASSERT(expr) assert(expr)
509  #else
510  #define VMA_ASSERT(expr)
511  #endif
512 #endif
513 
514 // Assert that will be called very often, like inside data structures e.g. operator[].
515 // Making it non-empty can make program slow.
516 #ifndef VMA_HEAVY_ASSERT
517  #ifdef _DEBUG
518  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
519  #else
520  #define VMA_HEAVY_ASSERT(expr)
521  #endif
522 #endif
523 
524 #ifndef VMA_NULL
525  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
526  #define VMA_NULL nullptr
527 #endif
528 
529 #ifndef VMA_ALIGN_OF
530  #define VMA_ALIGN_OF(type) (__alignof(type))
531 #endif
532 
533 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
534  #if defined(_WIN32)
535  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
536  #else
537  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
538  #endif
539 #endif
540 
541 #ifndef VMA_SYSTEM_FREE
542  #if defined(_WIN32)
543  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
544  #else
545  #define VMA_SYSTEM_FREE(ptr) free(ptr)
546  #endif
547 #endif
548 
549 #ifndef VMA_MIN
550  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
551 #endif
552 
553 #ifndef VMA_MAX
554  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
555 #endif
556 
557 #ifndef VMA_SWAP
558  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
559 #endif
560 
561 #ifndef VMA_DEBUG_LOG
562  #define VMA_DEBUG_LOG(format, ...)
563  /*
564  #define VMA_DEBUG_LOG(format, ...) do { \
565  printf(format, __VA_ARGS__); \
566  printf("\n"); \
567  } while(false)
568  */
569 #endif
570 
571 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
572 #if VMA_STATS_STRING_ENABLED
573  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
574  {
575  _ultoa_s(num, outStr, strLen, 10);
576  }
577  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
578  {
579  _ui64toa_s(num, outStr, strLen, 10);
580  }
581 #endif
582 
583 #ifndef VMA_MUTEX
584  class VmaMutex
585  {
586  public:
587  VmaMutex() { }
588  ~VmaMutex() { }
589  void Lock() { m_Mutex.lock(); }
590  void Unlock() { m_Mutex.unlock(); }
591  private:
592  std::mutex m_Mutex;
593  };
594  #define VMA_MUTEX VmaMutex
595 #endif
596 
597 #ifndef VMA_BEST_FIT
598 
610  #define VMA_BEST_FIT (1)
611 #endif
612 
613 #ifndef VMA_DEBUG_ALWAYS_OWN_MEMORY
614 
618  #define VMA_DEBUG_ALWAYS_OWN_MEMORY (0)
619 #endif
620 
621 #ifndef VMA_DEBUG_ALIGNMENT
622 
626  #define VMA_DEBUG_ALIGNMENT (1)
627 #endif
628 
629 #ifndef VMA_DEBUG_MARGIN
630 
634  #define VMA_DEBUG_MARGIN (0)
635 #endif
636 
637 #ifndef VMA_DEBUG_GLOBAL_MUTEX
638 
642  #define VMA_DEBUG_GLOBAL_MUTEX (0)
643 #endif
644 
645 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
646 
650  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
651 #endif
652 
653 #ifndef VMA_SMALL_HEAP_MAX_SIZE
654  #define VMA_SMALL_HEAP_MAX_SIZE (512 * 1024 * 1024)
656 #endif
657 
658 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
659  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256 * 1024 * 1024)
661 #endif
662 
663 #ifndef VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE
664  #define VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE (64 * 1024 * 1024)
666 #endif
667 
668 /*******************************************************************************
669 END OF CONFIGURATION
670 */
671 
672 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
673  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
674 
675 // Returns number of bits set to 1 in (v).
676 static inline uint32_t CountBitsSet(uint32_t v)
677 {
678  uint32_t c = v - ((v >> 1) & 0x55555555);
679  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
680  c = ((c >> 4) + c) & 0x0F0F0F0F;
681  c = ((c >> 8) + c) & 0x00FF00FF;
682  c = ((c >> 16) + c) & 0x0000FFFF;
683  return c;
684 }
685 
686 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
687 // Use types like uint32_t, uint64_t as T.
688 template <typename T>
689 static inline T VmaAlignUp(T val, T align)
690 {
691  return (val + align - 1) / align * align;
692 }
693 
694 // Division with mathematical rounding to nearest number.
695 template <typename T>
696 inline T VmaRoundDiv(T x, T y)
697 {
698  return (x + (y / (T)2)) / y;
699 }
700 /*
701 Returns true if two memory blocks occupy overlapping pages.
702 ResourceA must be in less memory offset than ResourceB.
703 
704 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
705 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
706 */
707 static inline bool VmaBlocksOnSamePage(
708  VkDeviceSize resourceAOffset,
709  VkDeviceSize resourceASize,
710  VkDeviceSize resourceBOffset,
711  VkDeviceSize pageSize)
712 {
713  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
714  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
715  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
716  VkDeviceSize resourceBStart = resourceBOffset;
717  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
718  return resourceAEndPage == resourceBStartPage;
719 }
720 
721 enum VmaSuballocationType
722 {
723  VMA_SUBALLOCATION_TYPE_FREE = 0,
724  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
725  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
726  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
727  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
728  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
729  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
730 };
731 
732 /*
733 Returns true if given suballocation types could conflict and must respect
734 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
735 or linear image and another one is optimal image. If type is unknown, behave
736 conservatively.
737 */
738 static inline bool VmaIsBufferImageGranularityConflict(
739  VmaSuballocationType suballocType1,
740  VmaSuballocationType suballocType2)
741 {
742  if(suballocType1 > suballocType2)
743  VMA_SWAP(suballocType1, suballocType2);
744 
745  switch(suballocType1)
746  {
747  case VMA_SUBALLOCATION_TYPE_FREE:
748  return false;
749  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
750  return true;
751  case VMA_SUBALLOCATION_TYPE_BUFFER:
752  return
753  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
754  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
755  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
756  return
757  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
758  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
759  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
760  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
761  return
762  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
763  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
764  return false;
765  default:
766  VMA_ASSERT(0);
767  return true;
768  }
769 }
770 
771 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
772 struct VmaMutexLock
773 {
774 public:
775  VmaMutexLock(VMA_MUTEX& mutex) : m_Mutex(mutex) { mutex.Lock(); }
776  ~VmaMutexLock() { m_Mutex.Unlock(); }
777 
778 private:
779  VMA_MUTEX& m_Mutex;
780 };
781 
782 #if VMA_DEBUG_GLOBAL_MUTEX
783  static VMA_MUTEX gDebugGlobalMutex;
784  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex);
785 #else
786  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
787 #endif
788 
789 // Minimum size of a free suballocation to register it in the free suballocation collection.
790 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
791 
792 /*
793 Performs binary search and returns iterator to first element that is greater or
794 equal to (key), according to comparison (cmp).
795 
796 Cmp should return true if first argument is less than second argument.
797 
798 Returned value is the found element, if present in the collection or place where
799 new element with value (key) should be inserted.
800 */
801 template <typename IterT, typename KeyT, typename CmpT>
802 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
803 {
804  size_t down = 0, up = (end - beg);
805  while(down < up)
806  {
807  const size_t mid = (down + up) / 2;
808  if(cmp(*(beg+mid), key))
809  down = mid + 1;
810  else
811  up = mid;
812  }
813  return beg + down;
814 }
815 
817 // Memory allocation
818 
819 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
820 {
821  if((pAllocationCallbacks != VMA_NULL) &&
822  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
823  {
824  return (*pAllocationCallbacks->pfnAllocation)(
825  pAllocationCallbacks->pUserData,
826  size,
827  alignment,
828  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
829  }
830  else
831  {
832  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
833  }
834 }
835 
836 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
837 {
838  if((pAllocationCallbacks != VMA_NULL) &&
839  (pAllocationCallbacks->pfnFree != VMA_NULL))
840  {
841  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
842  }
843  else
844  {
845  VMA_SYSTEM_FREE(ptr);
846  }
847 }
848 
849 template<typename T>
850 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
851 {
852  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
853 }
854 
855 template<typename T>
856 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
857 {
858  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
859 }
860 
861 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
862 
863 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
864 
865 template<typename T>
866 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
867 {
868  ptr->~T();
869  VmaFree(pAllocationCallbacks, ptr);
870 }
871 
872 template<typename T>
873 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
874 {
875  if(ptr != VMA_NULL)
876  {
877  for(size_t i = count; i--; )
878  ptr[i].~T();
879  VmaFree(pAllocationCallbacks, ptr);
880  }
881 }
882 
883 // STL-compatible allocator.
884 template<typename T>
885 class VmaStlAllocator
886 {
887 public:
888  const VkAllocationCallbacks* const m_pCallbacks;
889  typedef T value_type;
890 
891  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
892  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
893 
894  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
895  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
896 
897  template<typename U>
898  bool operator==(const VmaStlAllocator<U>& rhs) const
899  {
900  return m_pCallbacks == rhs.m_pCallbacks;
901  }
902  template<typename U>
903  bool operator!=(const VmaStlAllocator<U>& rhs) const
904  {
905  return m_pCallbacks != rhs.m_pCallbacks;
906  }
907 
908  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
909 };
910 
911 #if VMA_USE_STL_VECTOR
912 
913 #define VmaVector std::vector
914 
915 template<typename T, typename allocatorT>
916 static void VectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
917 {
918  vec.insert(vec.begin() + index, item);
919 }
920 
921 template<typename T, typename allocatorT>
922 static void VectorRemove(std::vector<T, allocatorT>& vec, size_t index)
923 {
924  vec.erase(vec.begin() + index);
925 }
926 
927 #else // #if VMA_USE_STL_VECTOR
928 
929 /* Class with interface compatible with subset of std::vector.
930 T must be POD because constructors and destructors are not called and memcpy is
931 used for these objects. */
932 template<typename T, typename AllocatorT>
933 class VmaVector
934 {
935 public:
936  VmaVector(const AllocatorT& allocator) :
937  m_Allocator(allocator),
938  m_pArray(VMA_NULL),
939  m_Count(0),
940  m_Capacity(0)
941  {
942  }
943 
944  VmaVector(AllocatorT& allocator) :
945  m_Allocator(allocator),
946  m_pArray(VMA_NULL),
947  m_Count(0),
948  m_Capacity(0)
949  {
950  }
951 
952  VmaVector(size_t count, AllocatorT& allocator) :
953  m_Allocator(allocator),
954  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator->m_pCallbacks, count) : VMA_NULL),
955  m_Count(count),
956  m_Capacity(count)
957  {
958  }
959 
960  VmaVector(const VmaVector<T, AllocatorT>& src) :
961  m_Allocator(src.m_Allocator),
962  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src->m_pCallbacks, src.m_Count) : VMA_NULL),
963  m_Count(src.m_Count),
964  m_Capacity(src.m_Count)
965  {
966  if(m_Count != 0)
967  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
968  }
969 
970  ~VmaVector()
971  {
972  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
973  }
974 
975  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
976  {
977  if(&rhs != this)
978  {
979  Resize(rhs.m_Count);
980  if(m_Count != 0)
981  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
982  }
983  return *this;
984  }
985 
986  bool empty() const { return m_Count == 0; }
987  size_t size() const { return m_Count; }
988  T* data() { return m_pArray; }
989  const T* data() const { return m_pArray; }
990 
991  T& operator[](size_t index)
992  {
993  VMA_HEAVY_ASSERT(index < m_Count);
994  return m_pArray[index];
995  }
996  const T& operator[](size_t index) const
997  {
998  VMA_HEAVY_ASSERT(index < m_Count);
999  return m_pArray[index];
1000  }
1001 
1002  T& front()
1003  {
1004  VMA_HEAVY_ASSERT(m_Count > 0);
1005  return m_pArray[0];
1006  }
1007  const T& front() const
1008  {
1009  VMA_HEAVY_ASSERT(m_Count > 0);
1010  return m_pArray[0];
1011  }
1012  T& back()
1013  {
1014  VMA_HEAVY_ASSERT(m_Count > 0);
1015  return m_pArray[m_Count - 1];
1016  }
1017  const T& back() const
1018  {
1019  VMA_HEAVY_ASSERT(m_Count > 0);
1020  return m_pArray[m_Count - 1];
1021  }
1022 
1023  void reserve(size_t newCapacity, bool freeMemory = false)
1024  {
1025  newCapacity = VMA_MAX(newCapacity, m_Count);
1026 
1027  if((newCapacity < m_Capacity) && !freeMemory)
1028  newCapacity = m_Capacity;
1029 
1030  if(newCapacity != m_Capacity)
1031  {
1032  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
1033  if(m_Count != 0)
1034  memcpy(newArray, m_pArray, m_Count * sizeof(T));
1035  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1036  m_Capacity = newCapacity;
1037  m_pArray = newArray;
1038  }
1039  }
1040 
1041  void resize(size_t newCount, bool freeMemory = false)
1042  {
1043  size_t newCapacity = m_Capacity;
1044  if(newCount > m_Capacity)
1045  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
1046  else if(freeMemory)
1047  newCapacity = newCount;
1048 
1049  if(newCapacity != m_Capacity)
1050  {
1051  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
1052  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
1053  if(elementsToCopy != 0)
1054  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
1055  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
1056  m_Capacity = newCapacity;
1057  m_pArray = newArray;
1058  }
1059 
1060  m_Count = newCount;
1061  }
1062 
1063  void clear(bool freeMemory = false)
1064  {
1065  resize(0, freeMemory);
1066  }
1067 
1068  void insert(size_t index, const T& src)
1069  {
1070  VMA_HEAVY_ASSERT(index <= m_Count);
1071  const size_t oldCount = size();
1072  resize(oldCount + 1);
1073  if(index < oldCount)
1074  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
1075  m_pArray[index] = src;
1076  }
1077 
1078  void remove(size_t index)
1079  {
1080  VMA_HEAVY_ASSERT(index < m_Count);
1081  const size_t oldCount = size();
1082  if(index < oldCount - 1)
1083  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
1084  resize(oldCount - 1);
1085  }
1086 
1087  void push_back(const T& src)
1088  {
1089  const size_t newIndex = size();
1090  resize(newIndex + 1);
1091  m_pArray[newIndex] = src;
1092  }
1093 
1094  void pop_back()
1095  {
1096  VMA_HEAVY_ASSERT(m_Count > 0);
1097  resize(size() - 1);
1098  }
1099 
1100  void push_front(const T& src)
1101  {
1102  insert(0, src);
1103  }
1104 
1105  void pop_front()
1106  {
1107  VMA_HEAVY_ASSERT(m_Count > 0);
1108  remove(0);
1109  }
1110 
1111  typedef T* iterator;
1112 
1113  iterator begin() { return m_pArray; }
1114  iterator end() { return m_pArray + m_Count; }
1115 
1116 private:
1117  AllocatorT m_Allocator;
1118  T* m_pArray;
1119  size_t m_Count;
1120  size_t m_Capacity;
1121 };
1122 
1123 template<typename T, typename allocatorT>
1124 static void VectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
1125 {
1126  vec.insert(index, item);
1127 }
1128 
1129 template<typename T, typename allocatorT>
1130 static void VectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
1131 {
1132  vec.remove(index);
1133 }
1134 
1135 #endif // #if VMA_USE_STL_VECTOR
1136 
1138 // class VmaPoolAllocator
1139 
1140 /*
1141 Allocator for objects of type T using a list of arrays (pools) to speed up
1142 allocation. Number of elements that can be allocated is not bounded because
1143 allocator can create multiple blocks.
1144 */
1145 template<typename T>
1146 class VmaPoolAllocator
1147 {
1148 public:
1149  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
1150  ~VmaPoolAllocator();
1151  void Clear();
1152  T* Alloc();
1153  void Free(T* ptr);
1154 
1155 private:
1156  union Item
1157  {
1158  uint32_t NextFreeIndex;
1159  T Value;
1160  };
1161 
1162  struct ItemBlock
1163  {
1164  Item* pItems;
1165  uint32_t FirstFreeIndex;
1166  };
1167 
1168  const VkAllocationCallbacks* m_pAllocationCallbacks;
1169  size_t m_ItemsPerBlock;
1170  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
1171 
1172  ItemBlock& CreateNewBlock();
1173 };
1174 
1175 template<typename T>
1176 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
1177  m_pAllocationCallbacks(pAllocationCallbacks),
1178  m_ItemsPerBlock(itemsPerBlock),
1179  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
1180 {
1181  VMA_ASSERT(itemsPerBlock > 0);
1182 }
1183 
1184 template<typename T>
1185 VmaPoolAllocator<T>::~VmaPoolAllocator()
1186 {
1187  Clear();
1188 }
1189 
1190 template<typename T>
1191 void VmaPoolAllocator<T>::Clear()
1192 {
1193  for(size_t i = m_ItemBlocks.size(); i--; )
1194  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
1195  m_ItemBlocks.clear();
1196 }
1197 
1198 template<typename T>
1199 T* VmaPoolAllocator<T>::Alloc()
1200 {
1201  for(size_t i = m_ItemBlocks.size(); i--; )
1202  {
1203  ItemBlock& block = m_ItemBlocks[i];
1204  // This block has some free items: Use first one.
1205  if(block.FirstFreeIndex != UINT32_MAX)
1206  {
1207  Item* const pItem = &block.pItems[block.FirstFreeIndex];
1208  block.FirstFreeIndex = pItem->NextFreeIndex;
1209  return &pItem->Value;
1210  }
1211  }
1212 
1213  // No block has free item: Create new one and use it.
1214  ItemBlock& newBlock = CreateNewBlock();
1215  Item* const pItem = &newBlock.pItems[0];
1216  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
1217  return &pItem->Value;
1218 }
1219 
1220 template<typename T>
1221 void VmaPoolAllocator<T>::Free(T* ptr)
1222 {
1223  // Search all memory blocks to find ptr.
1224  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
1225  {
1226  ItemBlock& block = m_ItemBlocks[i];
1227 
1228  // Casting to union.
1229  Item* pItemPtr;
1230  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
1231 
1232  // Check if pItemPtr is in address range of this block.
1233  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
1234  {
1235  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
1236  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
1237  block.FirstFreeIndex = index;
1238  return;
1239  }
1240  }
1241  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
1242 }
1243 
1244 template<typename T>
1245 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
1246 {
1247  ItemBlock newBlock = {
1248  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
1249 
1250  m_ItemBlocks.push_back(newBlock);
1251 
1252  // Setup singly-linked list of all free items in this block.
1253  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
1254  newBlock.pItems[i].NextFreeIndex = i + 1;
1255  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
1256  return m_ItemBlocks.back();
1257 }
1258 
1260 // class VmaRawList, VmaList
1261 
1262 #if VMA_USE_STL_LIST
1263 
1264 #define VmaList std::list
1265 
1266 #else // #if VMA_USE_STL_LIST
1267 
1268 template<typename T>
1269 struct VmaListItem
1270 {
1271  VmaListItem* pPrev;
1272  VmaListItem* pNext;
1273  T Value;
1274 };
1275 
1276 // Doubly linked list.
1277 template<typename T>
1278 class VmaRawList
1279 {
1280 public:
1281  typedef VmaListItem<T> ItemType;
1282 
1283  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
1284  ~VmaRawList();
1285  void Clear();
1286 
1287  size_t GetCount() const { return m_Count; }
1288  bool IsEmpty() const { return m_Count == 0; }
1289 
1290  ItemType* Front() { return m_pFront; }
1291  const ItemType* Front() const { return m_pFront; }
1292  ItemType* Back() { return m_pBack; }
1293  const ItemType* Back() const { return m_pBack; }
1294 
1295  ItemType* PushBack();
1296  ItemType* PushFront();
1297  ItemType* PushBack(const T& value);
1298  ItemType* PushFront(const T& value);
1299  void PopBack();
1300  void PopFront();
1301 
1302  // Item can be null - it means PushBack.
1303  ItemType* InsertBefore(ItemType* pItem);
1304  // Item can be null - it means PushFront.
1305  ItemType* InsertAfter(ItemType* pItem);
1306 
1307  ItemType* InsertBefore(ItemType* pItem, const T& value);
1308  ItemType* InsertAfter(ItemType* pItem, const T& value);
1309 
1310  void Remove(ItemType* pItem);
1311 
1312 private:
1313  const VkAllocationCallbacks* const m_pAllocationCallbacks;
1314  VmaPoolAllocator<ItemType> m_ItemAllocator;
1315  ItemType* m_pFront;
1316  ItemType* m_pBack;
1317  size_t m_Count;
1318 
1319  // Declared not defined, to block copy constructor and assignment operator.
1320  VmaRawList(const VmaRawList<T>& src);
1321  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
1322 };
1323 
1324 template<typename T>
1325 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
1326  m_pAllocationCallbacks(pAllocationCallbacks),
1327  m_ItemAllocator(pAllocationCallbacks, 128),
1328  m_pFront(VMA_NULL),
1329  m_pBack(VMA_NULL),
1330  m_Count(0)
1331 {
1332 }
1333 
1334 template<typename T>
1335 VmaRawList<T>::~VmaRawList()
1336 {
1337  // Intentionally not calling Clear, because that would be unnecessary
1338  // computations to return all items to m_ItemAllocator as free.
1339 }
1340 
1341 template<typename T>
1342 void VmaRawList<T>::Clear()
1343 {
1344  if(IsEmpty() == false)
1345  {
1346  ItemType* pItem = m_pBack;
1347  while(pItem != VMA_NULL)
1348  {
1349  ItemType* const pPrevItem = pItem->pPrev;
1350  m_ItemAllocator.Free(pItem);
1351  pItem = pPrevItem;
1352  }
1353  m_pFront = VMA_NULL;
1354  m_pBack = VMA_NULL;
1355  m_Count = 0;
1356  }
1357 }
1358 
1359 template<typename T>
1360 VmaListItem<T>* VmaRawList<T>::PushBack()
1361 {
1362  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1363  pNewItem->pNext = VMA_NULL;
1364  if(IsEmpty())
1365  {
1366  pNewItem->pPrev = VMA_NULL;
1367  m_pFront = pNewItem;
1368  m_pBack = pNewItem;
1369  m_Count = 1;
1370  }
1371  else
1372  {
1373  pNewItem->pPrev = m_pBack;
1374  m_pBack->pNext = pNewItem;
1375  m_pBack = pNewItem;
1376  ++m_Count;
1377  }
1378  return pNewItem;
1379 }
1380 
1381 template<typename T>
1382 VmaListItem<T>* VmaRawList<T>::PushFront()
1383 {
1384  ItemType* const pNewItem = m_ItemAllocator.Alloc();
1385  pNewItem->pPrev = VMA_NULL;
1386  if(IsEmpty())
1387  {
1388  pNewItem->pNext = VMA_NULL;
1389  m_pFront = pNewItem;
1390  m_pBack = pNewItem;
1391  m_Count = 1;
1392  }
1393  else
1394  {
1395  pNewItem->pNext = m_pFront;
1396  m_pFront->pPrev = pNewItem;
1397  m_pFront = pNewItem;
1398  ++m_Count;
1399  }
1400  return pNewItem;
1401 }
1402 
1403 template<typename T>
1404 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
1405 {
1406  ItemType* const pNewItem = PushBack();
1407  pNewItem->Value = value;
1408  return pNewItem;
1409 }
1410 
1411 template<typename T>
1412 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
1413 {
1414  ItemType* const pNewItem = PushFront();
1415  pNewItem->Value = value;
1416  return pNewItem;
1417 }
1418 
1419 template<typename T>
1420 void VmaRawList<T>::PopBack()
1421 {
1422  VMA_HEAVY_ASSERT(m_Count > 0);
1423  ItemType* const pBackItem = m_pBack;
1424  ItemType* const pPrevItem = pBackItem->pPrev;
1425  if(pPrevItem != VMA_NULL)
1426  pPrevItem->pNext = VMA_NULL;
1427  m_pBack = pPrevItem;
1428  m_ItemAllocator.Free(pBackItem);
1429  --m_Count;
1430 }
1431 
1432 template<typename T>
1433 void VmaRawList<T>::PopFront()
1434 {
1435  VMA_HEAVY_ASSERT(m_Count > 0);
1436  ItemType* const pFrontItem = m_pFront;
1437  ItemType* const pNextItem = pFrontItem->pNext;
1438  if(pNextItem != VMA_NULL)
1439  pNextItem->pPrev = VMA_NULL;
1440  m_pFront = pNextItem;
1441  m_ItemAllocator.Free(pFrontItem);
1442  --m_Count;
1443 }
1444 
1445 template<typename T>
1446 void VmaRawList<T>::Remove(ItemType* pItem)
1447 {
1448  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
1449  VMA_HEAVY_ASSERT(m_Count > 0);
1450 
1451  if(pItem->pPrev != VMA_NULL)
1452  pItem->pPrev->pNext = pItem->pNext;
1453  else
1454  {
1455  VMA_HEAVY_ASSERT(m_pFront == pItem);
1456  m_pFront = pItem->pNext;
1457  }
1458 
1459  if(pItem->pNext != VMA_NULL)
1460  pItem->pNext->pPrev = pItem->pPrev;
1461  else
1462  {
1463  VMA_HEAVY_ASSERT(m_pBack == pItem);
1464  m_pBack = pItem->pPrev;
1465  }
1466 
1467  m_ItemAllocator.Free(pItem);
1468  --m_Count;
1469 }
1470 
1471 template<typename T>
1472 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
1473 {
1474  if(pItem != VMA_NULL)
1475  {
1476  ItemType* const prevItem = pItem->pPrev;
1477  ItemType* const newItem = m_ItemAllocator.Alloc();
1478  newItem->pPrev = prevItem;
1479  newItem->pNext = pItem;
1480  pItem->pPrev = newItem;
1481  if(prevItem != VMA_NULL)
1482  prevItem->pNext = newItem;
1483  else
1484  {
1485  VMA_HEAVY_ASSERT(m_pFront = pItem);
1486  m_pFront = newItem;
1487  }
1488  ++m_Count;
1489  return newItem;
1490  }
1491  else
1492  return PushBack();
1493 }
1494 
1495 template<typename T>
1496 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
1497 {
1498  if(pItem != VMA_NULL)
1499  {
1500  ItemType* const nextItem = pItem->pNext;
1501  ItemType* const newItem = m_ItemAllocator.Alloc();
1502  newItem->pNext = nextItem;
1503  newItem->pPrev = pItem;
1504  pItem->pNext = newItem;
1505  if(nextItem != VMA_NULL)
1506  nextItem->pPrev = newItem;
1507  else
1508  {
1509  VMA_HEAVY_ASSERT(m_pBack = pItem);
1510  m_pBack = newItem;
1511  }
1512  ++m_Count;
1513  return newItem;
1514  }
1515  else
1516  return PushFront();
1517 }
1518 
1519 template<typename T>
1520 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
1521 {
1522  ItemType* const newItem = InsertBefore(pItem);
1523  newItem->Value = value;
1524  return newItem;
1525 }
1526 
1527 template<typename T>
1528 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
1529 {
1530  ItemType* const newItem = InsertAfter(pItem);
1531  newItem->Value = value;
1532  return newItem;
1533 }
1534 
1535 template<typename T, typename AllocatorT>
1536 class VmaList
1537 {
1538 public:
1539  class iterator
1540  {
1541  public:
1542  iterator() :
1543  m_pList(VMA_NULL),
1544  m_pItem(VMA_NULL)
1545  {
1546  }
1547 
1548  T& operator*() const
1549  {
1550  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1551  return m_pItem->Value;
1552  }
1553  T* operator->() const
1554  {
1555  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1556  return &m_pItem->Value;
1557  }
1558 
1559  iterator& operator++()
1560  {
1561  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1562  m_pItem = m_pItem->pNext;
1563  return *this;
1564  }
1565  iterator& operator--()
1566  {
1567  if(m_pItem != VMA_NULL)
1568  m_pItem = m_pItem->pPrev;
1569  else
1570  {
1571  VMA_HEAVY_ASSERT(!m_pList.IsEmpty());
1572  m_pItem = m_pList->Back();
1573  }
1574  return *this;
1575  }
1576 
1577  iterator operator++(int)
1578  {
1579  iterator result = *this;
1580  ++*this;
1581  return result;
1582  }
1583  iterator operator--(int)
1584  {
1585  iterator result = *this;
1586  --*this;
1587  return result;
1588  }
1589 
1590  bool operator==(const iterator& rhs) const
1591  {
1592  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1593  return m_pItem == rhs.m_pItem;
1594  }
1595  bool operator!=(const iterator& rhs) const
1596  {
1597  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1598  return m_pItem != rhs.m_pItem;
1599  }
1600 
1601  private:
1602  VmaRawList<T>* m_pList;
1603  VmaListItem<T>* m_pItem;
1604 
1605  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
1606  m_pList(pList),
1607  m_pItem(pItem)
1608  {
1609  }
1610 
1611  friend class VmaList<T, AllocatorT>;
1612  friend class VmaList<T, AllocatorT>:: const_iterator;
1613  };
1614 
1615  class const_iterator
1616  {
1617  public:
1618  const_iterator() :
1619  m_pList(VMA_NULL),
1620  m_pItem(VMA_NULL)
1621  {
1622  }
1623 
1624  const_iterator(const iterator& src) :
1625  m_pList(src.m_pList),
1626  m_pItem(src.m_pItem)
1627  {
1628  }
1629 
1630  const T& operator*() const
1631  {
1632  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1633  return m_pItem->Value;
1634  }
1635  const T* operator->() const
1636  {
1637  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1638  return &m_pItem->Value;
1639  }
1640 
1641  const_iterator& operator++()
1642  {
1643  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
1644  m_pItem = m_pItem->pNext;
1645  return *this;
1646  }
1647  const_iterator& operator--()
1648  {
1649  if(m_pItem != VMA_NULL)
1650  m_pItem = m_pItem->pPrev;
1651  else
1652  {
1653  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
1654  m_pItem = m_pList->Back();
1655  }
1656  return *this;
1657  }
1658 
1659  const_iterator operator++(int)
1660  {
1661  const_iterator result = *this;
1662  ++*this;
1663  return result;
1664  }
1665  const_iterator operator--(int)
1666  {
1667  const_iterator result = *this;
1668  --*this;
1669  return result;
1670  }
1671 
1672  bool operator==(const const_iterator& rhs) const
1673  {
1674  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1675  return m_pItem == rhs.m_pItem;
1676  }
1677  bool operator!=(const const_iterator& rhs) const
1678  {
1679  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
1680  return m_pItem != rhs.m_pItem;
1681  }
1682 
1683  private:
1684  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
1685  m_pList(pList),
1686  m_pItem(pItem)
1687  {
1688  }
1689 
1690  const VmaRawList<T>* m_pList;
1691  const VmaListItem<T>* m_pItem;
1692 
1693  friend class VmaList<T, AllocatorT>;
1694  };
1695 
1696  VmaList(AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
1697  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
1698 
1699  bool empty() const { return m_RawList.IsEmpty(); }
1700  size_t size() const { return m_RawList.GetCount(); }
1701 
1702  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
1703  iterator end() { return iterator(&m_RawList, VMA_NULL); }
1704 
1705  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
1706  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
1707 
1708  void clear() { m_RawList.Clear(); }
1709  void push_back(const T& value) { m_RawList.PushBack(value); }
1710  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
1711  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
1712 
1713 private:
1714  VmaRawList<T> m_RawList;
1715 };
1716 
1717 #endif // #if VMA_USE_STL_LIST
1718 
1720 // class VmaMap
1721 
1722 #if VMA_USE_STL_UNORDERED_MAP
1723 
1724 #define VmaPair std::pair
1725 
1726 #define VMA_MAP_TYPE(KeyT, ValueT) \
1727  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
1728 
1729 #else // #if VMA_USE_STL_UNORDERED_MAP
1730 
1731 template<typename T1, typename T2>
1732 struct VmaPair
1733 {
1734  T1 first;
1735  T2 second;
1736 
1737  VmaPair() : first(), second() { }
1738  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
1739 };
1740 
1741 /* Class compatible with subset of interface of std::unordered_map.
1742 KeyT, ValueT must be POD because they will be stored in VmaVector.
1743 */
1744 template<typename KeyT, typename ValueT>
1745 class VmaMap
1746 {
1747 public:
1748  typedef VmaPair<KeyT, ValueT> PairType;
1749  typedef PairType* iterator;
1750 
1751  VmaMap(VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
1752  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
1753 
1754  iterator begin() { return m_Vector.begin(); }
1755  iterator end() { return m_Vector.end(); }
1756 
1757  void insert(const PairType& pair);
1758  iterator find(const KeyT& key);
1759  void erase(iterator it);
1760 
1761 private:
1762  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
1763 };
1764 
1765 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
1766 
1767 template<typename FirstT, typename SecondT>
1768 struct VmaPairFirstLess
1769 {
1770  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
1771  {
1772  return lhs.first < rhs.first;
1773  }
1774  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
1775  {
1776  return lhs.first < rhsFirst;
1777  }
1778 };
1779 
1780 template<typename KeyT, typename ValueT>
1781 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
1782 {
1783  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
1784  m_Vector.data(),
1785  m_Vector.data() + m_Vector.size(),
1786  pair,
1787  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
1788  VectorInsert(m_Vector, indexToInsert, pair);
1789 }
1790 
1791 template<typename KeyT, typename ValueT>
1792 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
1793 {
1794  PairType* it = VmaBinaryFindFirstNotLess(
1795  m_Vector.data(),
1796  m_Vector.data() + m_Vector.size(),
1797  key,
1798  VmaPairFirstLess<KeyT, ValueT>());
1799  if((it != m_Vector.end()) && (it->first == key))
1800  return it;
1801  else
1802  return m_Vector.end();
1803 }
1804 
1805 template<typename KeyT, typename ValueT>
1806 void VmaMap<KeyT, ValueT>::erase(iterator it)
1807 {
1808  VectorRemove(m_Vector, it - m_Vector.begin());
1809 }
1810 
1811 #endif // #if VMA_USE_STL_UNORDERED_MAP
1812 
1813 /*
1814 Represents a region of VmaAllocation that is either assigned and returned as
1815 allocated memory block or free.
1816 */
1817 struct VmaSuballocation
1818 {
1819  VkDeviceSize offset;
1820  VkDeviceSize size;
1821  VmaSuballocationType type;
1822 };
1823 
1824 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
1825 
1826 // Parameters of an allocation.
1827 struct VmaAllocationRequest
1828 {
1829  VmaSuballocationList::iterator freeSuballocationItem;
1830  VkDeviceSize offset;
1831 };
1832 
1833 /* Single block of memory - VkDeviceMemory with all the data about its regions
1834 assigned or free. */
1835 class VmaAllocation
1836 {
1837 public:
1838  VkDeviceMemory m_hMemory;
1839  VkDeviceSize m_Size;
1840  uint32_t m_FreeCount;
1841  VkDeviceSize m_SumFreeSize;
1842  VmaSuballocationList m_Suballocations;
1843  // Suballocations that are free and have size greater than certain threshold.
1844  // Sorted by size, ascending.
1845  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
1846 
1847  VmaAllocation(VmaAllocator hAllocator);
1848 
1849  ~VmaAllocation()
1850  {
1851  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
1852  }
1853 
1854  // Always call after construction.
1855  void Init(VkDeviceMemory newMemory, VkDeviceSize newSize);
1856  // Always call before destruction.
1857  void Destroy(VmaAllocator allocator);
1858 
1859  // Validates all data structures inside this object. If not valid, returns false.
1860  bool Validate() const;
1861 
1862  // Tries to find a place for suballocation with given parameters inside this allocation.
1863  // If succeeded, fills pAllocationRequest and returns true.
1864  // If failed, returns false.
1865  bool CreateAllocationRequest(
1866  VkDeviceSize bufferImageGranularity,
1867  VkDeviceSize allocSize,
1868  VkDeviceSize allocAlignment,
1869  VmaSuballocationType allocType,
1870  VmaAllocationRequest* pAllocationRequest);
1871 
1872  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
1873  // If yes, fills pOffset and returns true. If no, returns false.
1874  bool CheckAllocation(
1875  VkDeviceSize bufferImageGranularity,
1876  VkDeviceSize allocSize,
1877  VkDeviceSize allocAlignment,
1878  VmaSuballocationType allocType,
1879  VmaSuballocationList::const_iterator freeSuballocItem,
1880  VkDeviceSize* pOffset) const;
1881 
1882  // Returns true if this allocation is empty - contains only single free suballocation.
1883  bool IsEmpty() const;
1884 
1885  // Makes actual allocation based on request. Request must already be checked
1886  // and valid.
1887  void Alloc(
1888  const VmaAllocationRequest& request,
1889  VmaSuballocationType type,
1890  VkDeviceSize allocSize);
1891 
1892  // Frees suballocation assigned to given memory region.
1893  void Free(const VkMappedMemoryRange* pMemory);
1894 
1895 #if VMA_STATS_STRING_ENABLED
1896  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1897 #endif
1898 
1899 private:
1900  // Given free suballocation, it merges it with following one, which must also be free.
1901  void MergeFreeWithNext(VmaSuballocationList::iterator item);
1902  // Releases given suballocation, making it free. Merges it with adjacent free
1903  // suballocations if applicable.
1904  void FreeSuballocation(VmaSuballocationList::iterator suballocItem);
1905  // Given free suballocation, it inserts it into sorted list of
1906  // m_FreeSuballocationsBySize if it's suitable.
1907  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
1908  // Given free suballocation, it removes it from sorted list of
1909  // m_FreeSuballocationsBySize if it's suitable.
1910  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
1911 };
1912 
1913 // Allocation for an object that has its own private VkDeviceMemory.
1914 struct VmaOwnAllocation
1915 {
1916  VkDeviceMemory m_hMemory;
1917  VkDeviceSize m_Size;
1918  VmaSuballocationType m_Type;
1919 };
1920 
1921 struct VmaOwnAllocationMemoryHandleLess
1922 {
1923  bool operator()(const VmaOwnAllocation& lhs, const VmaOwnAllocation& rhs) const
1924  {
1925  return lhs.m_hMemory < rhs.m_hMemory;
1926  }
1927  bool operator()(const VmaOwnAllocation& lhs, VkDeviceMemory rhsMem) const
1928  {
1929  return lhs.m_hMemory < rhsMem;
1930  }
1931 };
1932 
1933 /* Sequence of VmaAllocation. Represents memory blocks allocated for a specific
1934 Vulkan memory type. */
1935 struct VmaAllocationVector
1936 {
1937  // Incrementally sorted by sumFreeSize, ascending.
1938  VmaVector< VmaAllocation*, VmaStlAllocator<VmaAllocation*> > m_Allocations;
1939 
1940  VmaAllocationVector(VmaAllocator hAllocator);
1941  ~VmaAllocationVector();
1942 
1943  bool IsEmpty() const { return m_Allocations.empty(); }
1944 
1945  // Tries to free memory from any if its Allocations.
1946  // Returns index of Allocation that the memory was freed from, or -1 if not found.
1947  size_t Free(const VkMappedMemoryRange* pMemory);
1948 
1949  // Performs single step in sorting m_Allocations. They may not be fully sorted
1950  // after this call.
1951  void IncrementallySortAllocations();
1952 
1953  // Adds statistics of this AllocationVector to pStats.
1954  void AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const;
1955 
1956 #if VMA_STATS_STRING_ENABLED
1957  void PrintDetailedMap(class VmaStringBuilder& sb) const;
1958 #endif
1959 
1960 private:
1961  VmaAllocator m_hAllocator;
1962 };
1963 
1964 // Main allocator object.
1965 struct VmaAllocator_T
1966 {
1967  VkDevice m_hDevice;
1968  bool m_AllocationCallbacksSpecified;
1969  VkAllocationCallbacks m_AllocationCallbacks;
1970  VkDeviceSize m_PreferredLargeHeapBlockSize;
1971  VkDeviceSize m_PreferredSmallHeapBlockSize;
1972 
1973  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
1974  VkPhysicalDeviceMemoryProperties m_MemProps;
1975 
1976  VmaAllocationVector* m_pAllocations[VK_MAX_MEMORY_TYPES];
1977  /* There can be at most one allocation that is completely empty - a
1978  hysteresis to avoid pessimistic case of alternating creation and destruction
1979  of a VkDeviceMemory. */
1980  bool m_HasEmptyAllocation[VK_MAX_MEMORY_TYPES];
1981  VMA_MUTEX m_AllocationsMutex[VK_MAX_MEMORY_TYPES];
1982 
1983  // Each vector is sorted by memory (handle value).
1984  typedef VmaVector< VmaOwnAllocation, VmaStlAllocator<VmaOwnAllocation> > OwnAllocationVectorType;
1985  OwnAllocationVectorType* m_pOwnAllocations[VK_MAX_MEMORY_TYPES];
1986  VMA_MUTEX m_OwnAllocationsMutex[VK_MAX_MEMORY_TYPES];
1987 
1988  // Sorted by first (VkBuffer handle value).
1989  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange) m_BufferToMemoryMap;
1990  VMA_MUTEX m_BufferToMemoryMapMutex;
1991  // Sorted by first (VkImage handle value).
1992  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange) m_ImageToMemoryMap;
1993  VMA_MUTEX m_ImageToMemoryMapMutex;
1994 
1995  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
1996  ~VmaAllocator_T();
1997 
1998  const VkAllocationCallbacks* GetAllocationCallbacks() const
1999  {
2000  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
2001  }
2002 
2003  VkDeviceSize GetPreferredBlockSize(uint32_t memTypeIndex) const;
2004 
2005  VkDeviceSize GetBufferImageGranularity() const
2006  {
2007  return VMA_MAX(
2008  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
2009  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
2010  }
2011 
2012  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
2013  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
2014 
2015  // Main allocation function.
2016  VkResult AllocateMemory(
2017  const VkMemoryRequirements& vkMemReq,
2018  const VmaMemoryRequirements& vmaMemReq,
2019  VmaSuballocationType suballocType,
2020  VkMappedMemoryRange* pMemory,
2021  uint32_t* pMemoryTypeIndex);
2022 
2023  // Main deallocation function.
2024  void FreeMemory(const VkMappedMemoryRange* pMemory);
2025 
2026  void CalculateStats(VmaStats* pStats);
2027 
2028 #if VMA_STATS_STRING_ENABLED
2029  void PrintDetailedMap(class VmaStringBuilder& sb);
2030 #endif
2031 
2032 private:
2033  VkPhysicalDevice m_PhysicalDevice;
2034 
2035  VkResult AllocateMemoryOfType(
2036  const VkMemoryRequirements& vkMemReq,
2037  const VmaMemoryRequirements& vmaMemReq,
2038  uint32_t memTypeIndex,
2039  VmaSuballocationType suballocType,
2040  VkMappedMemoryRange* pMemory);
2041 
2042  // Allocates and registers new VkDeviceMemory specifically for single allocation.
2043  VkResult AllocateOwnMemory(
2044  VkDeviceSize size,
2045  VmaSuballocationType suballocType,
2046  uint32_t memTypeIndex,
2047  VkMappedMemoryRange* pMemory);
2048 
2049  // Tries to free pMemory as Own Memory. Returns true if found and freed.
2050  bool FreeOwnMemory(const VkMappedMemoryRange* pMemory);
2051 };
2052 
2054 // Memory allocation #2 after VmaAllocator_T definition
2055 
2056 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
2057 {
2058  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
2059 }
2060 
2061 static void VmaFree(VmaAllocator hAllocator, void* ptr)
2062 {
2063  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
2064 }
2065 
2066 template<typename T>
2067 static T* VmaAllocate(VmaAllocator hAllocator)
2068 {
2069  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
2070 }
2071 
2072 template<typename T>
2073 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
2074 {
2075  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
2076 }
2077 
2078 template<typename T>
2079 static void vma_delete(VmaAllocator hAllocator, T* ptr)
2080 {
2081  if(ptr != VMA_NULL)
2082  {
2083  ptr->~T();
2084  VmaFree(hAllocator, ptr);
2085  }
2086 }
2087 
2088 template<typename T>
2089 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
2090 {
2091  if(ptr != VMA_NULL)
2092  {
2093  for(size_t i = count; i--; )
2094  ptr[i].~T();
2095  VmaFree(hAllocator, ptr);
2096  }
2097 }
2098 
2100 // VmaStringBuilder
2101 
2102 #if VMA_STATS_STRING_ENABLED
2103 
2104 class VmaStringBuilder
2105 {
2106 public:
2107  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
2108  size_t GetLength() const { return m_Data.size(); }
2109  const char* GetData() const { return m_Data.data(); }
2110 
2111  void Add(char ch) { m_Data.push_back(ch); }
2112  void Add(const char* pStr);
2113  void AddNewLine() { Add('\n'); }
2114  void AddNumber(uint32_t num);
2115  void AddNumber(uint64_t num);
2116  void AddBool(bool b) { Add(b ? "true" : "false"); }
2117  void AddNull() { Add("null"); }
2118  void AddString(const char* pStr);
2119 
2120 private:
2121  VmaVector< char, VmaStlAllocator<char> > m_Data;
2122 };
2123 
2124 void VmaStringBuilder::Add(const char* pStr)
2125 {
2126  const size_t strLen = strlen(pStr);
2127  if(strLen > 0)
2128  {
2129  const size_t oldCount = m_Data.size();
2130  m_Data.resize(oldCount + strLen);
2131  memcpy(m_Data.data() + oldCount, pStr, strLen);
2132  }
2133 }
2134 
2135 void VmaStringBuilder::AddNumber(uint32_t num)
2136 {
2137  char buf[11];
2138  VmaUint32ToStr(buf, sizeof(buf), num);
2139  Add(buf);
2140 }
2141 
2142 void VmaStringBuilder::AddNumber(uint64_t num)
2143 {
2144  char buf[21];
2145  VmaUint64ToStr(buf, sizeof(buf), num);
2146  Add(buf);
2147 }
2148 
2149 void VmaStringBuilder::AddString(const char* pStr)
2150 {
2151  Add('"');
2152  const size_t strLen = strlen(pStr);
2153  for(size_t i = 0; i < strLen; ++i)
2154  {
2155  char ch = pStr[i];
2156  if(ch == '\'')
2157  Add("\\\\");
2158  else if(ch == '"')
2159  Add("\\\"");
2160  else if(ch >= 32)
2161  Add(ch);
2162  else switch(ch)
2163  {
2164  case '\n':
2165  Add("\\n");
2166  break;
2167  case '\r':
2168  Add("\\r");
2169  break;
2170  case '\t':
2171  Add("\\t");
2172  break;
2173  default:
2174  VMA_ASSERT(0 && "Character not currently supported.");
2175  break;
2176  }
2177  }
2178  Add('"');
2179 }
2180 
2182 
2183 // Correspond to values of enum VmaSuballocationType.
2184 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
2185  "FREE",
2186  "UNKNOWN",
2187  "BUFFER",
2188  "IMAGE_UNKNOWN",
2189  "IMAGE_LINEAR",
2190  "IMAGE_OPTIMAL",
2191 };
2192 
2193 static void VmaPrintStatInfo(VmaStringBuilder& sb, const VmaStatInfo& stat)
2194 {
2195  sb.Add("{ \"Allocations\": ");
2196  sb.AddNumber(stat.AllocationCount);
2197  sb.Add(", \"Suballocations\": ");
2198  sb.AddNumber(stat.SuballocationCount);
2199  sb.Add(", \"UnusedRanges\": ");
2200  sb.AddNumber(stat.UnusedRangeCount);
2201  sb.Add(", \"UsedBytes\": ");
2202  sb.AddNumber(stat.UsedBytes);
2203  sb.Add(", \"UnusedBytes\": ");
2204  sb.AddNumber(stat.UnusedBytes);
2205  sb.Add(", \"SuballocationSize\": { \"Min\": ");
2206  sb.AddNumber(stat.SuballocationSizeMin);
2207  sb.Add(", \"Avg\": ");
2208  sb.AddNumber(stat.SuballocationSizeAvg);
2209  sb.Add(", \"Max\": ");
2210  sb.AddNumber(stat.SuballocationSizeMax);
2211  sb.Add(" }, \"UnusedRangeSize\": { \"Min\": ");
2212  sb.AddNumber(stat.UnusedRangeSizeMin);
2213  sb.Add(", \"Avg\": ");
2214  sb.AddNumber(stat.UnusedRangeSizeAvg);
2215  sb.Add(", \"Max\": ");
2216  sb.AddNumber(stat.UnusedRangeSizeMax);
2217  sb.Add(" } }");
2218 }
2219 
2220 #endif // #if VMA_STATS_STRING_ENABLED
2221 
2222 struct VmaSuballocationItemSizeLess
2223 {
2224  bool operator()(
2225  const VmaSuballocationList::iterator lhs,
2226  const VmaSuballocationList::iterator rhs) const
2227  {
2228  return lhs->size < rhs->size;
2229  }
2230  bool operator()(
2231  const VmaSuballocationList::iterator lhs,
2232  VkDeviceSize rhsSize) const
2233  {
2234  return lhs->size < rhsSize;
2235  }
2236 };
2237 
2238 VmaAllocation::VmaAllocation(VmaAllocator hAllocator) :
2239  m_hMemory(VK_NULL_HANDLE),
2240  m_Size(0),
2241  m_FreeCount(0),
2242  m_SumFreeSize(0),
2243  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
2244  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
2245 {
2246 }
2247 
2248 void VmaAllocation::Init(VkDeviceMemory newMemory, VkDeviceSize newSize)
2249 {
2250  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
2251 
2252  m_hMemory = newMemory;
2253  m_Size = newSize;
2254  m_FreeCount = 1;
2255  m_SumFreeSize = newSize;
2256 
2257  m_Suballocations.clear();
2258  m_FreeSuballocationsBySize.clear();
2259 
2260  VmaSuballocation suballoc = {};
2261  suballoc.offset = 0;
2262  suballoc.size = newSize;
2263  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2264 
2265  m_Suballocations.push_back(suballoc);
2266  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
2267  --suballocItem;
2268  m_FreeSuballocationsBySize.push_back(suballocItem);
2269 }
2270 
2271 void VmaAllocation::Destroy(VmaAllocator allocator)
2272 {
2273  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
2274  vkFreeMemory(allocator->m_hDevice, m_hMemory, allocator->GetAllocationCallbacks());
2275  m_hMemory = VK_NULL_HANDLE;
2276 }
2277 
2278 bool VmaAllocation::Validate() const
2279 {
2280  if((m_hMemory == VK_NULL_HANDLE) ||
2281  (m_Size == 0) ||
2282  m_Suballocations.empty())
2283  {
2284  return false;
2285  }
2286 
2287  // Expected offset of new suballocation as calculates from previous ones.
2288  VkDeviceSize calculatedOffset = 0;
2289  // Expected number of free suballocations as calculated from traversing their list.
2290  uint32_t calculatedFreeCount = 0;
2291  // Expected sum size of free suballocations as calculated from traversing their list.
2292  VkDeviceSize calculatedSumFreeSize = 0;
2293  // Expected number of free suballocations that should be registered in
2294  // m_FreeSuballocationsBySize calculated from traversing their list.
2295  size_t freeSuballocationsToRegister = 0;
2296  // True if previous visisted suballocation was free.
2297  bool prevFree = false;
2298 
2299  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2300  suballocItem != m_Suballocations.cend();
2301  ++suballocItem)
2302  {
2303  const VmaSuballocation& subAlloc = *suballocItem;
2304 
2305  // Actual offset of this suballocation doesn't match expected one.
2306  if(subAlloc.offset != calculatedOffset)
2307  return false;
2308 
2309  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
2310  // Two adjacent free suballocations are invalid. They should be merged.
2311  if(prevFree && currFree)
2312  return false;
2313  prevFree = currFree;
2314 
2315  if(currFree)
2316  {
2317  calculatedSumFreeSize += subAlloc.size;
2318  ++calculatedFreeCount;
2319  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2320  ++freeSuballocationsToRegister;
2321  }
2322 
2323  calculatedOffset += subAlloc.size;
2324  }
2325 
2326  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
2327  // match expected one.
2328  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
2329  return false;
2330 
2331  VkDeviceSize lastSize = 0;
2332  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
2333  {
2334  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
2335 
2336  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
2337  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
2338  return false;
2339  // They must be sorted by size ascending.
2340  if(suballocItem->size < lastSize)
2341  return false;
2342 
2343  lastSize = suballocItem->size;
2344  }
2345 
2346  // Check if totals match calculacted values.
2347  return
2348  (calculatedOffset == m_Size) &&
2349  (calculatedSumFreeSize == m_SumFreeSize) &&
2350  (calculatedFreeCount == m_FreeCount);
2351 }
2352 
2353 /*
2354 How many suitable free suballocations to analyze before choosing best one.
2355 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
2356  be chosen.
2357 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
2358  suballocations will be analized and best one will be chosen.
2359 - Any other value is also acceptable.
2360 */
2361 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
2362 
2363 bool VmaAllocation::CreateAllocationRequest(
2364  VkDeviceSize bufferImageGranularity,
2365  VkDeviceSize allocSize,
2366  VkDeviceSize allocAlignment,
2367  VmaSuballocationType allocType,
2368  VmaAllocationRequest* pAllocationRequest)
2369 {
2370  VMA_ASSERT(allocSize > 0);
2371  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2372  VMA_ASSERT(pAllocationRequest != VMA_NULL);
2373  VMA_HEAVY_ASSERT(Validate());
2374 
2375  // There is not enough total free space in this allocation to fullfill the request: Early return.
2376  if(m_SumFreeSize < allocSize)
2377  return false;
2378 
2379  // Old brute-force algorithm, linearly searching suballocations.
2380  /*
2381  uint32_t suitableSuballocationsFound = 0;
2382  for(VmaSuballocationList::iterator suballocItem = suballocations.Front();
2383  suballocItem != VMA_NULL &&
2384  suitableSuballocationsFound < MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK;
2385  suballocItem = suballocItem->Next)
2386  {
2387  if(suballocItem->Value.type == VMA_SUBALLOCATION_TYPE_FREE)
2388  {
2389  VkDeviceSize offset = 0, cost = 0;
2390  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset, &cost))
2391  {
2392  ++suitableSuballocationsFound;
2393  if(cost < costLimit)
2394  {
2395  pAllocationRequest->freeSuballocationItem = suballocItem;
2396  pAllocationRequest->offset = offset;
2397  pAllocationRequest->cost = cost;
2398  if(cost == 0)
2399  return true;
2400  costLimit = cost;
2401  betterSuballocationFound = true;
2402  }
2403  }
2404  }
2405  }
2406  */
2407 
2408  // New algorithm, efficiently searching freeSuballocationsBySize.
2409  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
2410  if(freeSuballocCount > 0)
2411  {
2412  if(VMA_BEST_FIT)
2413  {
2414  // Find first free suballocation with size not less than allocSize.
2415  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2416  m_FreeSuballocationsBySize.data(),
2417  m_FreeSuballocationsBySize.data() + freeSuballocCount,
2418  allocSize,
2419  VmaSuballocationItemSizeLess());
2420  size_t index = it - m_FreeSuballocationsBySize.data();
2421  for(; index < freeSuballocCount; ++index)
2422  {
2423  VkDeviceSize offset = 0;
2424  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2425  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2426  {
2427  pAllocationRequest->freeSuballocationItem = suballocItem;
2428  pAllocationRequest->offset = offset;
2429  return true;
2430  }
2431  }
2432  }
2433  else
2434  {
2435  // Search staring from biggest suballocations.
2436  for(size_t index = freeSuballocCount; index--; )
2437  {
2438  VkDeviceSize offset = 0;
2439  const VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[index];
2440  if(CheckAllocation(bufferImageGranularity, allocSize, allocAlignment, allocType, suballocItem, &offset))
2441  {
2442  pAllocationRequest->freeSuballocationItem = suballocItem;
2443  pAllocationRequest->offset = offset;
2444  return true;
2445  }
2446  }
2447  }
2448  }
2449 
2450  return false;
2451 }
2452 
2453 bool VmaAllocation::CheckAllocation(
2454  VkDeviceSize bufferImageGranularity,
2455  VkDeviceSize allocSize,
2456  VkDeviceSize allocAlignment,
2457  VmaSuballocationType allocType,
2458  VmaSuballocationList::const_iterator freeSuballocItem,
2459  VkDeviceSize* pOffset) const
2460 {
2461  VMA_ASSERT(allocSize > 0);
2462  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
2463  VMA_ASSERT(freeSuballocItem != m_Suballocations.cend());
2464  VMA_ASSERT(pOffset != VMA_NULL);
2465 
2466  const VmaSuballocation& suballoc = *freeSuballocItem;
2467  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2468 
2469  // Size of this suballocation is too small for this request: Early return.
2470  if(suballoc.size < allocSize)
2471  return false;
2472 
2473  // Start from offset equal to beginning of this suballocation.
2474  *pOffset = suballoc.offset;
2475 
2476  // Apply VMA_DEBUG_MARGIN at the beginning.
2477  if((VMA_DEBUG_MARGIN > 0) && freeSuballocItem != m_Suballocations.cbegin())
2478  *pOffset += VMA_DEBUG_MARGIN;
2479 
2480  // Apply alignment.
2481  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
2482  *pOffset = VmaAlignUp(*pOffset, alignment);
2483 
2484  // Check previous suballocations for BufferImageGranularity conflicts.
2485  // Make bigger alignment if necessary.
2486  if(bufferImageGranularity > 1)
2487  {
2488  bool bufferImageGranularityConflict = false;
2489  VmaSuballocationList::const_iterator prevSuballocItem = freeSuballocItem;
2490  while(prevSuballocItem != m_Suballocations.cbegin())
2491  {
2492  --prevSuballocItem;
2493  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
2494  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
2495  {
2496  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
2497  {
2498  bufferImageGranularityConflict = true;
2499  break;
2500  }
2501  }
2502  else
2503  // Already on previous page.
2504  break;
2505  }
2506  if(bufferImageGranularityConflict)
2507  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
2508  }
2509 
2510  // Calculate padding at the beginning based on current offset.
2511  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
2512 
2513  // Calculate required margin at the end if this is not last suballocation.
2514  VmaSuballocationList::const_iterator next = freeSuballocItem;
2515  ++next;
2516  const VkDeviceSize requiredEndMargin =
2517  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
2518 
2519  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
2520  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
2521  return false;
2522 
2523  // Check next suballocations for BufferImageGranularity conflicts.
2524  // If conflict exists, allocation cannot be made here.
2525  if(bufferImageGranularity > 1)
2526  {
2527  VmaSuballocationList::const_iterator nextSuballocItem = freeSuballocItem;
2528  ++nextSuballocItem;
2529  while(nextSuballocItem != m_Suballocations.cend())
2530  {
2531  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
2532  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
2533  {
2534  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
2535  return false;
2536  }
2537  else
2538  // Already on next page.
2539  break;
2540  ++nextSuballocItem;
2541  }
2542  }
2543 
2544  // All tests passed: Success. pOffset is already filled.
2545  return true;
2546 }
2547 
2548 bool VmaAllocation::IsEmpty() const
2549 {
2550  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
2551 }
2552 
2553 void VmaAllocation::Alloc(
2554  const VmaAllocationRequest& request,
2555  VmaSuballocationType type,
2556  VkDeviceSize allocSize)
2557 {
2558  VMA_ASSERT(request.freeSuballocationItem != m_Suballocations.end());
2559  VmaSuballocation& suballoc = *request.freeSuballocationItem;
2560  // Given suballocation is a free block.
2561  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
2562  // Given offset is inside this suballocation.
2563  VMA_ASSERT(request.offset >= suballoc.offset);
2564  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
2565  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
2566  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
2567 
2568  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
2569  // it to become used.
2570  UnregisterFreeSuballocation(request.freeSuballocationItem);
2571 
2572  suballoc.offset = request.offset;
2573  suballoc.size = allocSize;
2574  suballoc.type = type;
2575 
2576  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
2577  if(paddingEnd)
2578  {
2579  VmaSuballocation paddingSuballoc = {};
2580  paddingSuballoc.offset = request.offset + allocSize;
2581  paddingSuballoc.size = paddingEnd;
2582  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2583  VmaSuballocationList::iterator next = request.freeSuballocationItem;
2584  ++next;
2585  const VmaSuballocationList::iterator paddingEndItem =
2586  m_Suballocations.insert(next, paddingSuballoc);
2587  RegisterFreeSuballocation(paddingEndItem);
2588  }
2589 
2590  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
2591  if(paddingBegin)
2592  {
2593  VmaSuballocation paddingSuballoc = {};
2594  paddingSuballoc.offset = request.offset - paddingBegin;
2595  paddingSuballoc.size = paddingBegin;
2596  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2597  const VmaSuballocationList::iterator paddingBeginItem =
2598  m_Suballocations.insert(request.freeSuballocationItem, paddingSuballoc);
2599  RegisterFreeSuballocation(paddingBeginItem);
2600  }
2601 
2602  // Update totals.
2603  m_FreeCount = m_FreeCount - 1;
2604  if(paddingBegin > 0)
2605  ++m_FreeCount;
2606  if(paddingEnd > 0)
2607  ++m_FreeCount;
2608  m_SumFreeSize -= allocSize;
2609 }
2610 
2611 void VmaAllocation::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
2612 {
2613  // Change this suballocation to be marked as free.
2614  VmaSuballocation& suballoc = *suballocItem;
2615  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
2616 
2617  // Update totals.
2618  ++m_FreeCount;
2619  m_SumFreeSize += suballoc.size;
2620 
2621  // Merge with previous and/or next suballocation if it's also free.
2622  bool mergeWithNext = false;
2623  bool mergeWithPrev = false;
2624 
2625  VmaSuballocationList::iterator nextItem = suballocItem;
2626  ++nextItem;
2627  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
2628  mergeWithNext = true;
2629 
2630  VmaSuballocationList::iterator prevItem = suballocItem;
2631  if(suballocItem != m_Suballocations.begin())
2632  {
2633  --prevItem;
2634  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
2635  mergeWithPrev = true;
2636  }
2637 
2638  if(mergeWithNext)
2639  {
2640  UnregisterFreeSuballocation(nextItem);
2641  MergeFreeWithNext(suballocItem);
2642  }
2643 
2644  if(mergeWithPrev)
2645  {
2646  UnregisterFreeSuballocation(prevItem);
2647  MergeFreeWithNext(prevItem);
2648  RegisterFreeSuballocation(prevItem);
2649  }
2650  else
2651  RegisterFreeSuballocation(suballocItem);
2652 }
2653 
2654 void VmaAllocation::Free(const VkMappedMemoryRange* pMemory)
2655 {
2656  // If suballocation to free has offset smaller than half of allocation size, search forward.
2657  // Otherwise search backward.
2658  const bool forwardDirection = pMemory->offset < (m_Size / 2);
2659  if(forwardDirection)
2660  {
2661  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2662  suballocItem != m_Suballocations.end();
2663  ++suballocItem)
2664  {
2665  VmaSuballocation& suballoc = *suballocItem;
2666  if(suballoc.offset == pMemory->offset)
2667  {
2668  FreeSuballocation(suballocItem);
2669  VMA_HEAVY_ASSERT(Validate());
2670  return;
2671  }
2672  }
2673  VMA_ASSERT(0 && "Not found!");
2674  }
2675  else
2676  {
2677  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
2678  suballocItem != m_Suballocations.end();
2679  ++suballocItem)
2680  {
2681  VmaSuballocation& suballoc = *suballocItem;
2682  if(suballoc.offset == pMemory->offset)
2683  {
2684  FreeSuballocation(suballocItem);
2685  VMA_HEAVY_ASSERT(Validate());
2686  return;
2687  }
2688  }
2689  VMA_ASSERT(0 && "Not found!");
2690  }
2691 }
2692 
2693 #if VMA_STATS_STRING_ENABLED
2694 
2695 void VmaAllocation::PrintDetailedMap(class VmaStringBuilder& sb) const
2696 {
2697  sb.Add("{\n\t\t\t\"Bytes\": ");
2698  sb.AddNumber(m_Size);
2699  sb.Add(",\n\t\t\t\"FreeBytes\": ");
2700  sb.AddNumber(m_SumFreeSize);
2701  sb.Add(",\n\t\t\t\"Suballocations\": ");
2702  sb.AddNumber(m_Suballocations.size());
2703  sb.Add(",\n\t\t\t\"FreeSuballocations\": ");
2704  sb.AddNumber(m_FreeCount);
2705  sb.Add(",\n\t\t\t\"SuballocationList\": [");
2706 
2707  size_t i = 0;
2708  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
2709  suballocItem != m_Suballocations.cend();
2710  ++suballocItem, ++i)
2711  {
2712  if(i > 0)
2713  sb.Add(",\n\t\t\t\t{ \"Type\": ");
2714  else
2715  sb.Add("\n\t\t\t\t{ \"Type\": ");
2716  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
2717  sb.Add(", \"Size\": ");
2718  sb.AddNumber(suballocItem->size);
2719  sb.Add(", \"Offset\": ");
2720  sb.AddNumber(suballocItem->offset);
2721  sb.Add(" }");
2722  }
2723 
2724  sb.Add("\n\t\t\t]\n\t\t}");
2725 }
2726 
2727 #endif // #if VMA_STATS_STRING_ENABLED
2728 
2729 void VmaAllocation::MergeFreeWithNext(VmaSuballocationList::iterator item)
2730 {
2731  VMA_ASSERT(item != m_Suballocations.end());
2732  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2733 
2734  VmaSuballocationList::iterator nextItem = item;
2735  ++nextItem;
2736  VMA_ASSERT(nextItem != m_Suballocations.end());
2737  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
2738 
2739  item->size += nextItem->size;
2740  --m_FreeCount;
2741  m_Suballocations.erase(nextItem);
2742 }
2743 
2744 void VmaAllocation::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
2745 {
2746  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2747  VMA_ASSERT(item->size > 0);
2748 
2749  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2750  {
2751  if(m_FreeSuballocationsBySize.empty())
2752  m_FreeSuballocationsBySize.push_back(item);
2753  else
2754  {
2755  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2756  m_FreeSuballocationsBySize.data(),
2757  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2758  item,
2759  VmaSuballocationItemSizeLess());
2760  size_t index = it - m_FreeSuballocationsBySize.data();
2761  VectorInsert(m_FreeSuballocationsBySize, index, item);
2762  }
2763  }
2764 }
2765 
2766 void VmaAllocation::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
2767 {
2768  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
2769  VMA_ASSERT(item->size > 0);
2770 
2771  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
2772  {
2773  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
2774  m_FreeSuballocationsBySize.data(),
2775  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
2776  item,
2777  VmaSuballocationItemSizeLess());
2778  for(size_t index = it - m_FreeSuballocationsBySize.data();
2779  index < m_FreeSuballocationsBySize.size();
2780  ++index)
2781  {
2782  if(m_FreeSuballocationsBySize[index] == item)
2783  {
2784  VectorRemove(m_FreeSuballocationsBySize, index);
2785  return;
2786  }
2787  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
2788  }
2789  VMA_ASSERT(0 && "Not found.");
2790  }
2791 }
2792 
2793 static void InitStatInfo(VmaStatInfo& outInfo)
2794 {
2795  memset(&outInfo, 0, sizeof(outInfo));
2796  outInfo.SuballocationSizeMin = UINT64_MAX;
2797  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2798 }
2799 
2800 static void CalcAllocationStatInfo(VmaStatInfo& outInfo, const VmaAllocation& alloc)
2801 {
2802  outInfo.AllocationCount = 1;
2803 
2804  const uint32_t rangeCount = (uint32_t)alloc.m_Suballocations.size();
2805  outInfo.SuballocationCount = rangeCount - alloc.m_FreeCount;
2806  outInfo.UnusedRangeCount = alloc.m_FreeCount;
2807 
2808  outInfo.UnusedBytes = alloc.m_SumFreeSize;
2809  outInfo.UsedBytes = alloc.m_Size - outInfo.UnusedBytes;
2810 
2811  outInfo.SuballocationSizeMin = UINT64_MAX;
2812  outInfo.SuballocationSizeMax = 0;
2813  outInfo.UnusedRangeSizeMin = UINT64_MAX;
2814  outInfo.UnusedRangeSizeMax = 0;
2815 
2816  for(VmaSuballocationList::const_iterator suballocItem = alloc.m_Suballocations.cbegin();
2817  suballocItem != alloc.m_Suballocations.cend();
2818  ++suballocItem)
2819  {
2820  const VmaSuballocation& suballoc = *suballocItem;
2821  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
2822  {
2823  outInfo.SuballocationSizeMin = VMA_MIN(outInfo.SuballocationSizeMin, suballoc.size);
2824  outInfo.SuballocationSizeMax = VMA_MAX(outInfo.SuballocationSizeMax, suballoc.size);
2825  }
2826  else
2827  {
2828  outInfo.UnusedRangeSizeMin = VMA_MIN(outInfo.UnusedRangeSizeMin, suballoc.size);
2829  outInfo.UnusedRangeSizeMax = VMA_MAX(outInfo.UnusedRangeSizeMax, suballoc.size);
2830  }
2831  }
2832 }
2833 
2834 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
2835 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
2836 {
2837  inoutInfo.AllocationCount += srcInfo.AllocationCount;
2838  inoutInfo.SuballocationCount += srcInfo.SuballocationCount;
2839  inoutInfo.UnusedRangeCount += srcInfo.UnusedRangeCount;
2840  inoutInfo.UsedBytes += srcInfo.UsedBytes;
2841  inoutInfo.UnusedBytes += srcInfo.UnusedBytes;
2842  inoutInfo.SuballocationSizeMin = VMA_MIN(inoutInfo.SuballocationSizeMin, srcInfo.SuballocationSizeMin);
2843  inoutInfo.SuballocationSizeMax = VMA_MAX(inoutInfo.SuballocationSizeMax, srcInfo.SuballocationSizeMax);
2844  inoutInfo.UnusedRangeSizeMin = VMA_MIN(inoutInfo.UnusedRangeSizeMin, srcInfo.UnusedRangeSizeMin);
2845  inoutInfo.UnusedRangeSizeMax = VMA_MAX(inoutInfo.UnusedRangeSizeMax, srcInfo.UnusedRangeSizeMax);
2846 }
2847 
2848 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
2849 {
2850  inoutInfo.SuballocationSizeAvg = (inoutInfo.SuballocationCount > 0) ?
2851  VmaRoundDiv<VkDeviceSize>(inoutInfo.UsedBytes, inoutInfo.SuballocationCount) : 0;
2852  inoutInfo.UnusedRangeSizeAvg = (inoutInfo.UnusedRangeCount > 0) ?
2853  VmaRoundDiv<VkDeviceSize>(inoutInfo.UnusedBytes, inoutInfo.UnusedRangeCount) : 0;
2854 }
2855 
2856 VmaAllocationVector::VmaAllocationVector(VmaAllocator hAllocator) :
2857  m_hAllocator(hAllocator),
2858  m_Allocations(VmaStlAllocator<VmaAllocation*>(hAllocator->GetAllocationCallbacks()))
2859 {
2860 }
2861 
2862 VmaAllocationVector::~VmaAllocationVector()
2863 {
2864  for(size_t i = m_Allocations.size(); i--; )
2865  {
2866  m_Allocations[i]->Destroy(m_hAllocator);
2867  vma_delete(m_hAllocator, m_Allocations[i]);
2868  }
2869 }
2870 
2871 size_t VmaAllocationVector::Free(const VkMappedMemoryRange* pMemory)
2872 {
2873  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2874  {
2875  VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2876  VMA_ASSERT(pAlloc);
2877  if(pAlloc->m_hMemory == pMemory->memory)
2878  {
2879  pAlloc->Free(pMemory);
2880  VMA_HEAVY_ASSERT(pAlloc->Validate());
2881  return allocIndex;
2882  }
2883  }
2884 
2885  return (size_t)-1;
2886 }
2887 
2888 void VmaAllocationVector::IncrementallySortAllocations()
2889 {
2890  // Bubble sort only until first swap.
2891  for(size_t i = 1; i < m_Allocations.size(); ++i)
2892  {
2893  if(m_Allocations[i - 1]->m_SumFreeSize > m_Allocations[i]->m_SumFreeSize)
2894  {
2895  VMA_SWAP(m_Allocations[i - 1], m_Allocations[i]);
2896  return;
2897  }
2898  }
2899 }
2900 
2901 #if VMA_STATS_STRING_ENABLED
2902 
2903 void VmaAllocationVector::PrintDetailedMap(class VmaStringBuilder& sb) const
2904 {
2905  for(size_t i = 0; i < m_Allocations.size(); ++i)
2906  {
2907  if(i > 0)
2908  sb.Add(",\n\t\t");
2909  else
2910  sb.Add("\n\t\t");
2911  m_Allocations[i]->PrintDetailedMap(sb);
2912  }
2913 }
2914 
2915 #endif // #if VMA_STATS_STRING_ENABLED
2916 
2917 void VmaAllocationVector::AddStats(VmaStats* pStats, uint32_t memTypeIndex, uint32_t memHeapIndex) const
2918 {
2919  for(uint32_t allocIndex = 0; allocIndex < m_Allocations.size(); ++allocIndex)
2920  {
2921  const VmaAllocation* const pAlloc = m_Allocations[allocIndex];
2922  VMA_ASSERT(pAlloc);
2923  VMA_HEAVY_ASSERT(pAlloc->Validate());
2924  VmaStatInfo allocationStatInfo;
2925  CalcAllocationStatInfo(allocationStatInfo, *pAlloc);
2926  VmaAddStatInfo(pStats->total, allocationStatInfo);
2927  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
2928  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
2929  }
2930 }
2931 
2933 // VmaAllocator_T
2934 
2935 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
2936  m_PhysicalDevice(pCreateInfo->physicalDevice),
2937  m_hDevice(pCreateInfo->device),
2938  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
2939  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
2940  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
2941  m_PreferredLargeHeapBlockSize(0),
2942  m_PreferredSmallHeapBlockSize(0),
2943  m_BufferToMemoryMap(VmaStlAllocator< VmaPair<VkBuffer, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks)),
2944  m_ImageToMemoryMap(VmaStlAllocator< VmaPair<VkImage, VkMappedMemoryRange> >(pCreateInfo->pAllocationCallbacks))
2945 {
2946  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
2947 
2948  memset(&m_MemProps, 0, sizeof(m_MemProps));
2949  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
2950 
2951  memset(&m_pAllocations, 0, sizeof(m_pAllocations));
2952  memset(&m_HasEmptyAllocation, 0, sizeof(m_HasEmptyAllocation));
2953  memset(&m_pOwnAllocations, 0, sizeof(m_pOwnAllocations));
2954 
2955  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
2956  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
2957  m_PreferredSmallHeapBlockSize = (pCreateInfo->preferredSmallHeapBlockSize != 0) ?
2958  pCreateInfo->preferredSmallHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_SMALL_HEAP_BLOCK_SIZE);
2959 
2960  vkGetPhysicalDeviceProperties(m_PhysicalDevice, &m_PhysicalDeviceProperties);
2961  vkGetPhysicalDeviceMemoryProperties(m_PhysicalDevice, &m_MemProps);
2962 
2963  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
2964  {
2965  m_pAllocations[i] = vma_new(this, VmaAllocationVector)(this);
2966  m_pOwnAllocations[i] = vma_new(this, OwnAllocationVectorType)(VmaStlAllocator<VmaOwnAllocation>(GetAllocationCallbacks()));
2967  }
2968 }
2969 
2970 VmaAllocator_T::~VmaAllocator_T()
2971 {
2972  for(VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = m_ImageToMemoryMap.begin();
2973  it != m_ImageToMemoryMap.end();
2974  ++it)
2975  {
2976  vkDestroyImage(m_hDevice, it->first, GetAllocationCallbacks());
2977  }
2978 
2979  for(VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = m_BufferToMemoryMap.begin();
2980  it != m_BufferToMemoryMap.end();
2981  ++it)
2982  {
2983  vkDestroyBuffer(m_hDevice, it->first, GetAllocationCallbacks());
2984  }
2985 
2986  for(uint32_t typeIndex = 0; typeIndex < GetMemoryTypeCount(); ++typeIndex)
2987  {
2988  OwnAllocationVectorType* pOwnAllocations = m_pOwnAllocations[typeIndex];
2989  VMA_ASSERT(pOwnAllocations);
2990  for(size_t allocationIndex = 0; allocationIndex < pOwnAllocations->size(); ++allocationIndex)
2991  {
2992  const VmaOwnAllocation& ownAlloc = (*pOwnAllocations)[allocationIndex];
2993  vkFreeMemory(m_hDevice, ownAlloc.m_hMemory, GetAllocationCallbacks());
2994  }
2995  }
2996 
2997  for(size_t i = GetMemoryTypeCount(); i--; )
2998  {
2999  vma_delete(this, m_pAllocations[i]);
3000  vma_delete(this, m_pOwnAllocations[i]);
3001  }
3002 }
3003 
3004 VkDeviceSize VmaAllocator_T::GetPreferredBlockSize(uint32_t memTypeIndex) const
3005 {
3006  VkDeviceSize heapSize = m_MemProps.memoryHeaps[m_MemProps.memoryTypes[memTypeIndex].heapIndex].size;
3007  return (heapSize <= VMA_SMALL_HEAP_MAX_SIZE) ?
3008  m_PreferredSmallHeapBlockSize : m_PreferredLargeHeapBlockSize;
3009 }
3010 
3011 VkResult VmaAllocator_T::AllocateMemoryOfType(
3012  const VkMemoryRequirements& vkMemReq,
3013  const VmaMemoryRequirements& vmaMemReq,
3014  uint32_t memTypeIndex,
3015  VmaSuballocationType suballocType,
3016  VkMappedMemoryRange* pMemory)
3017 {
3018  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
3019 
3020  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3021  pMemory->pNext = VMA_NULL;
3022  pMemory->size = vkMemReq.size;
3023 
3024  const VkDeviceSize preferredBlockSize = GetPreferredBlockSize(memTypeIndex);
3025  // Heuristics: Allocate own memory if requested size if greater than half of preferred block size.
3026  const bool ownMemory =
3027  vmaMemReq.ownMemory ||
3028  VMA_DEBUG_ALWAYS_OWN_MEMORY ||
3029  ((vmaMemReq.neverAllocate == false) && (vkMemReq.size > preferredBlockSize / 2));
3030 
3031  if(ownMemory)
3032  {
3033  if(vmaMemReq.neverAllocate)
3034  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3035  else
3036  return AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
3037  }
3038  else
3039  {
3040  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
3041  VmaAllocationVector* const allocationVector = m_pAllocations[memTypeIndex];
3042  VMA_ASSERT(allocationVector);
3043 
3044  // 1. Search existing allocations.
3045  // Forward order - prefer blocks with smallest amount of free space.
3046  for(size_t allocIndex = 0; allocIndex < allocationVector->m_Allocations.size(); ++allocIndex )
3047  {
3048  VmaAllocation* const pAlloc = allocationVector->m_Allocations[allocIndex];
3049  VMA_ASSERT(pAlloc);
3050  VmaAllocationRequest allocRequest = {};
3051  // Check if can allocate from pAlloc.
3052  if(pAlloc->CreateAllocationRequest(
3053  GetBufferImageGranularity(),
3054  vkMemReq.size,
3055  vkMemReq.alignment,
3056  suballocType,
3057  &allocRequest))
3058  {
3059  // We no longer have an empty Allocation.
3060  if(pAlloc->IsEmpty())
3061  m_HasEmptyAllocation[memTypeIndex] = false;
3062  // Allocate from this pAlloc.
3063  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
3064  // Return VkDeviceMemory and offset (size already filled above).
3065  pMemory->memory = pAlloc->m_hMemory;
3066  pMemory->offset = allocRequest.offset;
3067  VMA_HEAVY_ASSERT(pAlloc->Validate());
3068  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)allocIndex);
3069  return VK_SUCCESS;
3070  }
3071  }
3072 
3073  // 2. Create new Allocation.
3074  if(vmaMemReq.neverAllocate)
3075  {
3076  VMA_DEBUG_LOG(" FAILED due to VmaMemoryRequirements::neverAllocate");
3077  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3078  }
3079  else
3080  {
3081  // Start with full preferredBlockSize.
3082  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3083  allocInfo.memoryTypeIndex = memTypeIndex;
3084  allocInfo.allocationSize = preferredBlockSize;
3085  VkDeviceMemory mem = VK_NULL_HANDLE;
3086  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3087  if(res < 0)
3088  {
3089  // 3. Try half the size.
3090  allocInfo.allocationSize /= 2;
3091  if(allocInfo.allocationSize >= vkMemReq.size)
3092  {
3093  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3094  if(res < 0)
3095  {
3096  // 4. Try quarter the size.
3097  allocInfo.allocationSize /= 2;
3098  if(allocInfo.allocationSize >= vkMemReq.size)
3099  {
3100  res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &mem);
3101  }
3102  }
3103  }
3104  }
3105  if(res < 0)
3106  {
3107  // 5. Try OwnAlloc.
3108  res = AllocateOwnMemory(vkMemReq.size, suballocType, memTypeIndex, pMemory);
3109  if(res == VK_SUCCESS)
3110  {
3111  // Succeeded: AllocateOwnMemory function already filld pMemory, nothing more to do here.
3112  VMA_DEBUG_LOG(" Allocated as OwnMemory");
3113  return VK_SUCCESS;
3114  }
3115  else
3116  {
3117  // Everything failed: Return error code.
3118  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3119  return res;
3120  }
3121  }
3122 
3123  // New VkDeviceMemory successfully created. Create new Allocation for it.
3124  VmaAllocation* const pAlloc = vma_new(this, VmaAllocation)(this);
3125  pAlloc->Init(mem, allocInfo.allocationSize);
3126 
3127  allocationVector->m_Allocations.push_back(pAlloc);
3128 
3129  // Allocate from pAlloc. Because it is empty, allocRequest can be trivially filled.
3130  VmaAllocationRequest allocRequest = {};
3131  allocRequest.freeSuballocationItem = pAlloc->m_Suballocations.begin();
3132  allocRequest.offset = 0;
3133  pAlloc->Alloc(allocRequest, suballocType, vkMemReq.size);
3134  pMemory->memory = mem;
3135  pMemory->offset = allocRequest.offset;
3136  VMA_HEAVY_ASSERT(pAlloc->Validate());
3137  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
3138  return VK_SUCCESS;
3139  }
3140  }
3141 }
3142 
3143 VkResult VmaAllocator_T::AllocateOwnMemory(
3144  VkDeviceSize size,
3145  VmaSuballocationType suballocType,
3146  uint32_t memTypeIndex,
3147  VkMappedMemoryRange* pMemory)
3148 {
3149  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
3150  allocInfo.memoryTypeIndex = memTypeIndex;
3151  allocInfo.allocationSize = size;
3152 
3153  // Allocate VkDeviceMemory.
3154  VmaOwnAllocation ownAlloc = {};
3155  ownAlloc.m_Size = size;
3156  ownAlloc.m_Type = suballocType;
3157  VkResult res = vkAllocateMemory(m_hDevice, &allocInfo, GetAllocationCallbacks(), &ownAlloc.m_hMemory);
3158  if(res < 0)
3159  {
3160  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
3161  return res;
3162  }
3163 
3164  // Register it in m_pOwnAllocations.
3165  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3166  OwnAllocationVectorType* ownAllocations = m_pOwnAllocations[memTypeIndex];
3167  VMA_ASSERT(ownAllocations);
3168  VmaOwnAllocation* const pOwnAllocationsBeg = ownAllocations->data();
3169  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + ownAllocations->size();
3170  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3171  pOwnAllocationsBeg,
3172  pOwnAllocationsEnd,
3173  ownAlloc,
3174  VmaOwnAllocationMemoryHandleLess()) - pOwnAllocationsBeg;
3175  VectorInsert(*ownAllocations, indexToInsert, ownAlloc);
3176 
3177  // Return parameters of the allocation.
3178  pMemory->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3179  pMemory->pNext = VMA_NULL;
3180  pMemory->memory = ownAlloc.m_hMemory;
3181  pMemory->offset = 0;
3182  pMemory->size = size;
3183 
3184  VMA_DEBUG_LOG(" Allocated OwnMemory MemoryTypeIndex=#%u", memTypeIndex);
3185 
3186  return VK_SUCCESS;
3187 }
3188 
3189 VkResult VmaAllocator_T::AllocateMemory(
3190  const VkMemoryRequirements& vkMemReq,
3191  const VmaMemoryRequirements& vmaMemReq,
3192  VmaSuballocationType suballocType,
3193  VkMappedMemoryRange* pMemory,
3194  uint32_t* pMemoryTypeIndex)
3195 {
3196  if(vmaMemReq.ownMemory && vmaMemReq.neverAllocate)
3197  {
3198  VMA_ASSERT(0 && "Specifying VmaMemoryRequirements::ownMemory && VmaMemoryRequirements::neverAllocate makes no sense.");
3199  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3200  }
3201 
3202  // Bit mask of memory Vulkan types acceptable for this allocation.
3203  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
3204  uint32_t memTypeIndex = UINT32_MAX;
3205  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3206  if(res == VK_SUCCESS)
3207  {
3208  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3209  // Succeeded on first try.
3210  if(res == VK_SUCCESS)
3211  {
3212  if(pMemoryTypeIndex != VMA_NULL)
3213  *pMemoryTypeIndex = memTypeIndex;
3214  return res;
3215  }
3216  // Allocation from this memory type failed. Try other compatible memory types.
3217  else
3218  {
3219  for(;;)
3220  {
3221  // Remove old memTypeIndex from list of possibilities.
3222  memoryTypeBits &= ~(1u << memTypeIndex);
3223  // Find alternative memTypeIndex.
3224  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &vmaMemReq, &memTypeIndex);
3225  if(res == VK_SUCCESS)
3226  {
3227  res = AllocateMemoryOfType(vkMemReq, vmaMemReq, memTypeIndex, suballocType, pMemory);
3228  // Allocation from this alternative memory type succeeded.
3229  if(res == VK_SUCCESS)
3230  {
3231  if(pMemoryTypeIndex != VMA_NULL)
3232  *pMemoryTypeIndex = memTypeIndex;
3233  return res;
3234  }
3235  // else: Allocation from this memory type failed. Try next one - next loop iteration.
3236  }
3237  // No other matching memory type index could be found.
3238  else
3239  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
3240  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3241  }
3242  }
3243  }
3244  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
3245  else
3246  return res;
3247 }
3248 
3249 void VmaAllocator_T::FreeMemory(const VkMappedMemoryRange* pMemory)
3250 {
3251  uint32_t memTypeIndex = 0;
3252  bool found = false;
3253  VmaAllocation* allocationToDelete = VMA_NULL;
3254  // Check all memory types because we don't know which one does pMemory come from.
3255  for(; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3256  {
3257  VmaMutexLock lock(m_AllocationsMutex[memTypeIndex]);
3258  VmaAllocationVector* const pAllocationVector = m_pAllocations[memTypeIndex];
3259  VMA_ASSERT(pAllocationVector);
3260  // Try to free pMemory from pAllocationVector.
3261  const size_t allocIndex = pAllocationVector->Free(pMemory);
3262  if(allocIndex != (size_t)-1)
3263  {
3264  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
3265  found = true;
3266  VmaAllocation* const pAlloc = pAllocationVector->m_Allocations[allocIndex];
3267  VMA_ASSERT(pAlloc);
3268  // pAlloc became empty after this deallocation.
3269  if(pAlloc->IsEmpty())
3270  {
3271  // Already has empty Allocation. We don't want to have two, so delete this one.
3272  if(m_HasEmptyAllocation[memTypeIndex])
3273  {
3274  allocationToDelete = pAlloc;
3275  VectorRemove(pAllocationVector->m_Allocations, allocIndex);
3276  break;
3277  }
3278  // We now have first empty Allocation.
3279  else
3280  m_HasEmptyAllocation[memTypeIndex] = true;
3281  }
3282  // Must be called after allocIndex is used, because later it may become invalid!
3283  pAllocationVector->IncrementallySortAllocations();
3284  break;
3285  }
3286  }
3287  if(found)
3288  {
3289  // Destruction of a free Allocation. Deferred until this point, outside of mutex
3290  // lock, for performance reason.
3291  if(allocationToDelete != VMA_NULL)
3292  {
3293  VMA_DEBUG_LOG(" Deleted empty allocation");
3294  allocationToDelete->Destroy(this);
3295  vma_delete(this, allocationToDelete);
3296  }
3297  return;
3298  }
3299 
3300  // pMemory not found in allocations. Try free it as Own Memory.
3301  if(FreeOwnMemory(pMemory))
3302  return;
3303 
3304  // pMemory not found as Own Memory either.
3305  VMA_ASSERT(0 && "Not found. Trying to free memory not allocated using this allocator (or some other bug).");
3306 }
3307 
3308 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
3309 {
3310  InitStatInfo(pStats->total);
3311  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
3312  InitStatInfo(pStats->memoryType[i]);
3313  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
3314  InitStatInfo(pStats->memoryHeap[i]);
3315 
3316  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3317  {
3318  VmaMutexLock allocationsLock(m_AllocationsMutex[memTypeIndex]);
3319  const uint32_t heapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
3320  const VmaAllocationVector* const allocVector = m_pAllocations[memTypeIndex];
3321  VMA_ASSERT(allocVector);
3322  allocVector->AddStats(pStats, memTypeIndex, heapIndex);
3323  }
3324 
3325  VmaPostprocessCalcStatInfo(pStats->total);
3326  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
3327  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
3328  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
3329  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
3330 }
3331 
3332 bool VmaAllocator_T::FreeOwnMemory(const VkMappedMemoryRange* pMemory)
3333 {
3334  VkDeviceMemory vkMemory = VK_NULL_HANDLE;
3335 
3336  // Check all memory types because we don't know which one does pMemory come from.
3337  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3338  {
3339  VmaMutexLock lock(m_OwnAllocationsMutex[memTypeIndex]);
3340  OwnAllocationVectorType* const pOwnAllocations = m_pOwnAllocations[memTypeIndex];
3341  VMA_ASSERT(pOwnAllocations);
3342  VmaOwnAllocation* const pOwnAllocationsBeg = pOwnAllocations->data();
3343  VmaOwnAllocation* const pOwnAllocationsEnd = pOwnAllocationsBeg + pOwnAllocations->size();
3344  VmaOwnAllocation* const pOwnAllocationIt = VmaBinaryFindFirstNotLess(
3345  pOwnAllocationsBeg,
3346  pOwnAllocationsEnd,
3347  pMemory->memory,
3348  VmaOwnAllocationMemoryHandleLess());
3349  if((pOwnAllocationIt != pOwnAllocationsEnd) &&
3350  (pOwnAllocationIt->m_hMemory == pMemory->memory))
3351  {
3352  VMA_ASSERT(pMemory->size == pOwnAllocationIt->m_Size && pMemory->offset == 0);
3353  vkMemory = pOwnAllocationIt->m_hMemory;
3354  const size_t ownAllocationIndex = pOwnAllocationIt - pOwnAllocationsBeg;
3355  VectorRemove(*pOwnAllocations, ownAllocationIndex);
3356  VMA_DEBUG_LOG(" Freed OwnMemory MemoryTypeIndex=%u", memTypeIndex);
3357  break;
3358  }
3359  }
3360 
3361  // Found. Free VkDeviceMemory deferred until this point, outside of mutex lock,
3362  // for performance reason.
3363  if(vkMemory != VK_NULL_HANDLE)
3364  {
3365  vkFreeMemory(m_hDevice, vkMemory, GetAllocationCallbacks());
3366  return true;
3367  }
3368  else
3369  return false;
3370 }
3371 
3372 #if VMA_STATS_STRING_ENABLED
3373 
3374 void VmaAllocator_T::PrintDetailedMap(VmaStringBuilder& sb)
3375 {
3376  bool ownAllocationsStarted = false;
3377  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3378  {
3379  VmaMutexLock ownAllocationsLock(m_OwnAllocationsMutex[memTypeIndex]);
3380  OwnAllocationVectorType* const pOwnAllocVector = m_pOwnAllocations[memTypeIndex];
3381  VMA_ASSERT(pOwnAllocVector);
3382  if(pOwnAllocVector->empty() == false)
3383  {
3384  if(ownAllocationsStarted)
3385  sb.Add(",\n\t\"Type ");
3386  else
3387  {
3388  sb.Add(",\n\"OwnAllocations\": {\n\t\"Type ");
3389  ownAllocationsStarted = true;
3390  }
3391  sb.AddNumber(memTypeIndex);
3392  sb.Add("\": [");
3393 
3394  for(size_t i = 0; i < pOwnAllocVector->size(); ++i)
3395  {
3396  const VmaOwnAllocation& ownAlloc = (*pOwnAllocVector)[i];
3397  if(i > 0)
3398  sb.Add(",\n\t\t{ \"Size\": ");
3399  else
3400  sb.Add("\n\t\t{ \"Size\": ");
3401  sb.AddNumber(ownAlloc.m_Size);
3402  sb.Add(", \"Type\": ");
3403  sb.AddString(VMA_SUBALLOCATION_TYPE_NAMES[ownAlloc.m_Type]);
3404  sb.Add(" }");
3405  }
3406 
3407  sb.Add("\n\t]");
3408  }
3409  }
3410  if(ownAllocationsStarted)
3411  sb.Add("\n}");
3412 
3413  {
3414  bool allocationsStarted = false;
3415  for(size_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
3416  {
3417  VmaMutexLock globalAllocationsLock(m_AllocationsMutex[memTypeIndex]);
3418  if(m_pAllocations[memTypeIndex]->IsEmpty() == false)
3419  {
3420  if(allocationsStarted)
3421  sb.Add(",\n\t\"Type ");
3422  else
3423  {
3424  sb.Add(",\n\"Allocations\": {\n\t\"Type ");
3425  allocationsStarted = true;
3426  }
3427  sb.AddNumber(memTypeIndex);
3428  sb.Add("\": [");
3429 
3430  m_pAllocations[memTypeIndex]->PrintDetailedMap(sb);
3431 
3432  sb.Add("\n\t]");
3433  }
3434  }
3435  if(allocationsStarted)
3436  sb.Add("\n}");
3437  }
3438 }
3439 
3440 #endif // #if VMA_STATS_STRING_ENABLED
3441 
3442 static VkResult AllocateMemoryForImage(
3443  VmaAllocator allocator,
3444  VkImage image,
3445  const VmaMemoryRequirements* pMemoryRequirements,
3446  VmaSuballocationType suballocType,
3447  VkMappedMemoryRange* pMemory,
3448  uint32_t* pMemoryTypeIndex)
3449 {
3450  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3451 
3452  VkMemoryRequirements vkMemReq = {};
3453  vkGetImageMemoryRequirements(allocator->m_hDevice, image, &vkMemReq);
3454 
3455  return allocator->AllocateMemory(
3456  vkMemReq,
3457  *pMemoryRequirements,
3458  suballocType,
3459  pMemory,
3460  pMemoryTypeIndex);
3461 }
3462 
3464 // Public interface
3465 
3466 VkResult vmaCreateAllocator(
3467  const VmaAllocatorCreateInfo* pCreateInfo,
3468  VmaAllocator* pAllocator)
3469 {
3470  VMA_ASSERT(pCreateInfo && pAllocator);
3471  VMA_DEBUG_LOG("vmaCreateAllocator");
3472  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
3473  return VK_SUCCESS;
3474 }
3475 
3476 void vmaDestroyAllocator(
3477  VmaAllocator allocator)
3478 {
3479  if(allocator != VK_NULL_HANDLE)
3480  {
3481  VMA_DEBUG_LOG("vmaDestroyAllocator");
3482  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
3483  vma_delete(&allocationCallbacks, allocator);
3484  }
3485 }
3486 
3488  VmaAllocator allocator,
3489  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
3490 {
3491  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
3492  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
3493 }
3494 
3496  VmaAllocator allocator,
3497  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
3498 {
3499  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
3500  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
3501 }
3502 
3504  VmaAllocator allocator,
3505  uint32_t memoryTypeIndex,
3506  VkMemoryPropertyFlags* pFlags)
3507 {
3508  VMA_ASSERT(allocator && pFlags);
3509  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
3510  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
3511 }
3512 
3513 void vmaCalculateStats(
3514  VmaAllocator allocator,
3515  VmaStats* pStats)
3516 {
3517  VMA_ASSERT(allocator && pStats);
3518  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3519  allocator->CalculateStats(pStats);
3520 }
3521 
3522 #if VMA_STATS_STRING_ENABLED
3523 
3524 void vmaBuildStatsString(
3525  VmaAllocator allocator,
3526  char** ppStatsString,
3527  VkBool32 detailedMap)
3528 {
3529  VMA_ASSERT(allocator && ppStatsString);
3530  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3531 
3532  VmaStringBuilder sb(allocator);
3533  {
3534  VmaStats stats;
3535  allocator->CalculateStats(&stats);
3536 
3537  sb.Add("{\n\"Total\": ");
3538  VmaPrintStatInfo(sb, stats.total);
3539 
3540  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
3541  {
3542  sb.Add(",\n\"Heap ");
3543  sb.AddNumber(heapIndex);
3544  sb.Add("\": {\n\t\"Size\": ");
3545  sb.AddNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
3546  sb.Add(",\n\t\"Flags\": ");
3547  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
3548  sb.AddString("DEVICE_LOCAL");
3549  else
3550  sb.AddString("");
3551  if(stats.memoryHeap[heapIndex].AllocationCount > 0)
3552  {
3553  sb.Add(",\n\t\"Stats:\": ");
3554  VmaPrintStatInfo(sb, stats.memoryHeap[heapIndex]);
3555  }
3556 
3557  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
3558  {
3559  if(allocator->m_MemProps.memoryTypes[typeIndex].heapIndex == heapIndex)
3560  {
3561  sb.Add(",\n\t\"Type ");
3562  sb.AddNumber(typeIndex);
3563  sb.Add("\": {\n\t\t\"Flags\": \"");
3564  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
3565  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
3566  sb.Add(" DEVICE_LOCAL");
3567  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
3568  sb.Add(" HOST_VISIBLE");
3569  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
3570  sb.Add(" HOST_COHERENT");
3571  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
3572  sb.Add(" HOST_CACHED");
3573  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
3574  sb.Add(" LAZILY_ALLOCATED");
3575  sb.Add("\"");
3576  if(stats.memoryType[typeIndex].AllocationCount > 0)
3577  {
3578  sb.Add(",\n\t\t\"Stats\": ");
3579  VmaPrintStatInfo(sb, stats.memoryType[typeIndex]);
3580  }
3581  sb.Add("\n\t}");
3582  }
3583  }
3584  sb.Add("\n}");
3585  }
3586  if(detailedMap == VK_TRUE)
3587  allocator->PrintDetailedMap(sb);
3588  sb.Add("\n}\n");
3589  }
3590 
3591  const size_t len = sb.GetLength();
3592  char* const pChars = vma_new_array(allocator, char, len + 1);
3593  if(len > 0)
3594  memcpy(pChars, sb.GetData(), len);
3595  pChars[len] = '\0';
3596  *ppStatsString = pChars;
3597 }
3598 
3599 void vmaFreeStatsString(
3600  VmaAllocator allocator,
3601  char* pStatsString)
3602 {
3603  if(pStatsString != VMA_NULL)
3604  {
3605  VMA_ASSERT(allocator);
3606  size_t len = strlen(pStatsString);
3607  vma_delete_array(allocator, pStatsString, len + 1);
3608  }
3609 }
3610 
3611 #endif // #if VMA_STATS_STRING_ENABLED
3612 
3615 VkResult vmaFindMemoryTypeIndex(
3616  VmaAllocator allocator,
3617  uint32_t memoryTypeBits,
3618  const VmaMemoryRequirements* pMemoryRequirements,
3619  uint32_t* pMemoryTypeIndex)
3620 {
3621  VMA_ASSERT(allocator != VK_NULL_HANDLE);
3622  VMA_ASSERT(pMemoryRequirements != VMA_NULL);
3623  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
3624 
3625  uint32_t requiredFlags = pMemoryRequirements->requiredFlags;
3626  uint32_t preferredFlags = pMemoryRequirements->preferredFlags;
3627  if(preferredFlags == 0)
3628  preferredFlags = requiredFlags;
3629  // preferredFlags, if not 0, must be subset of requiredFlags.
3630  VMA_ASSERT((requiredFlags & ~preferredFlags) == 0);
3631 
3632  // Convert usage to requiredFlags and preferredFlags.
3633  switch(pMemoryRequirements->usage)
3634  {
3636  break;
3638  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3639  break;
3641  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3642  break;
3644  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3645  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3646  break;
3648  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3649  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3650  break;
3651  default:
3652  break;
3653  }
3654 
3655  *pMemoryTypeIndex = UINT32_MAX;
3656  uint32_t minCost = UINT32_MAX;
3657  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
3658  memTypeIndex < allocator->GetMemoryTypeCount();
3659  ++memTypeIndex, memTypeBit <<= 1)
3660  {
3661  // This memory type is acceptable according to memoryTypeBits bitmask.
3662  if((memTypeBit & memoryTypeBits) != 0)
3663  {
3664  const VkMemoryPropertyFlags currFlags =
3665  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
3666  // This memory type contains requiredFlags.
3667  if((requiredFlags & ~currFlags) == 0)
3668  {
3669  // Calculate cost as number of bits from preferredFlags not present in this memory type.
3670  uint32_t currCost = CountBitsSet(preferredFlags & ~currFlags);
3671  // Remember memory type with lowest cost.
3672  if(currCost < minCost)
3673  {
3674  *pMemoryTypeIndex = memTypeIndex;
3675  if(currCost == 0)
3676  return VK_SUCCESS;
3677  minCost = currCost;
3678  }
3679  }
3680  }
3681  }
3682  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
3683 }
3684 
3685 VkResult vmaAllocateMemory(
3686  VmaAllocator allocator,
3687  const VkMemoryRequirements* pVkMemoryRequirements,
3688  const VmaMemoryRequirements* pVmaMemoryRequirements,
3689  VkMappedMemoryRange* pMemory,
3690  uint32_t* pMemoryTypeIndex)
3691 {
3692  VMA_ASSERT(allocator && pVkMemoryRequirements && pVmaMemoryRequirements && pMemory);
3693 
3694  VMA_DEBUG_LOG("vmaAllocateMemory");
3695 
3696  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3697 
3698  return allocator->AllocateMemory(
3699  *pVkMemoryRequirements,
3700  *pVmaMemoryRequirements,
3701  VMA_SUBALLOCATION_TYPE_UNKNOWN,
3702  pMemory,
3703  pMemoryTypeIndex);
3704 }
3705 
3707  VmaAllocator allocator,
3708  VkBuffer buffer,
3709  const VmaMemoryRequirements* pMemoryRequirements,
3710  VkMappedMemoryRange* pMemory,
3711  uint32_t* pMemoryTypeIndex)
3712 {
3713  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pMemoryRequirements && pMemory);
3714 
3715  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
3716 
3717  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3718 
3719  VkMemoryRequirements vkMemReq = {};
3720  vkGetBufferMemoryRequirements(allocator->m_hDevice, buffer, &vkMemReq);
3721 
3722  return allocator->AllocateMemory(
3723  vkMemReq,
3724  *pMemoryRequirements,
3725  VMA_SUBALLOCATION_TYPE_BUFFER,
3726  pMemory,
3727  pMemoryTypeIndex);
3728 }
3729 
3730 VkResult vmaAllocateMemoryForImage(
3731  VmaAllocator allocator,
3732  VkImage image,
3733  const VmaMemoryRequirements* pMemoryRequirements,
3734  VkMappedMemoryRange* pMemory,
3735  uint32_t* pMemoryTypeIndex)
3736 {
3737  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pMemoryRequirements);
3738 
3739  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
3740 
3741  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3742 
3743  return AllocateMemoryForImage(
3744  allocator,
3745  image,
3746  pMemoryRequirements,
3747  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
3748  pMemory,
3749  pMemoryTypeIndex);
3750 }
3751 
3752 void vmaFreeMemory(
3753  VmaAllocator allocator,
3754  const VkMappedMemoryRange* pMemory)
3755 {
3756  VMA_ASSERT(allocator && pMemory);
3757 
3758  VMA_DEBUG_LOG("vmaFreeMemory");
3759 
3760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3761 
3762  allocator->FreeMemory(pMemory);
3763 }
3764 
3765 VkResult vmaMapMemory(
3766  VmaAllocator allocator,
3767  const VkMappedMemoryRange* pMemory,
3768  void** ppData)
3769 {
3770  VMA_ASSERT(allocator && pMemory && ppData);
3771 
3772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3773 
3774  return vkMapMemory(allocator->m_hDevice, pMemory->memory,
3775  pMemory->offset, pMemory->size, 0, ppData);
3776 }
3777 
3778 void vmaUnmapMemory(
3779  VmaAllocator allocator,
3780  const VkMappedMemoryRange* pMemory)
3781 {
3782  VMA_ASSERT(allocator && pMemory);
3783 
3784  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3785 
3786  vkUnmapMemory(allocator->m_hDevice, pMemory->memory);
3787 }
3788 
3789 VkResult vmaCreateBuffer(
3790  VmaAllocator allocator,
3791  const VkBufferCreateInfo* pCreateInfo,
3792  const VmaMemoryRequirements* pMemoryRequirements,
3793  VkBuffer* pBuffer,
3794  VkMappedMemoryRange* pMemory,
3795  uint32_t* pMemoryTypeIndex)
3796 {
3797  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3798 
3799  VMA_DEBUG_LOG("vmaCreateBuffer");
3800 
3801  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3802 
3803  // 1. Create VkBuffer.
3804  VkResult res = vkCreateBuffer(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pBuffer);
3805  if(res >= 0)
3806  {
3807  VkMappedMemoryRange mem = {};
3808 
3809  // 2. vkGetBufferMemoryRequirements.
3810  VkMemoryRequirements vkMemReq = {};
3811  vkGetBufferMemoryRequirements(allocator->m_hDevice, *pBuffer, &vkMemReq);
3812 
3813  // 3. Allocate memory using allocator.
3814  res = allocator->AllocateMemory(
3815  vkMemReq,
3816  *pMemoryRequirements,
3817  VMA_SUBALLOCATION_TYPE_BUFFER,
3818  &mem,
3819  pMemoryTypeIndex);
3820  if(res >= 0)
3821  {
3822  if(pMemory != VMA_NULL)
3823  {
3824  *pMemory = mem;
3825  }
3826  // 3. Bind buffer with memory.
3827  res = vkBindBufferMemory(allocator->m_hDevice, *pBuffer, mem.memory, mem.offset);
3828  if(res >= 0)
3829  {
3830  // All steps succeeded.
3831  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3832  allocator->m_BufferToMemoryMap.insert(VmaPair<VkBuffer, VkMappedMemoryRange>(*pBuffer, mem));
3833  return VK_SUCCESS;
3834  }
3835  allocator->FreeMemory(&mem);
3836  return res;
3837  }
3838  vkDestroyBuffer(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
3839  return res;
3840  }
3841  return res;
3842 }
3843 
3844 void vmaDestroyBuffer(
3845  VmaAllocator allocator,
3846  VkBuffer buffer)
3847 {
3848  if(buffer != VK_NULL_HANDLE)
3849  {
3850  VMA_ASSERT(allocator);
3851 
3852  VMA_DEBUG_LOG("vmaDestroyBuffer");
3853 
3854  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3855 
3856  VkMappedMemoryRange mem = {};
3857  {
3858  VmaMutexLock lock(allocator->m_BufferToMemoryMapMutex);
3859  VMA_MAP_TYPE(VkBuffer, VkMappedMemoryRange)::iterator it = allocator->m_BufferToMemoryMap.find(buffer);
3860  if(it == allocator->m_BufferToMemoryMap.end())
3861  {
3862  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3863  return;
3864  }
3865  mem = it->second;
3866  allocator->m_BufferToMemoryMap.erase(it);
3867  }
3868 
3869  vkDestroyBuffer(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
3870 
3871  allocator->FreeMemory(&mem);
3872  }
3873 }
3874 
3875 VkResult vmaCreateImage(
3876  VmaAllocator allocator,
3877  const VkImageCreateInfo* pCreateInfo,
3878  const VmaMemoryRequirements* pMemoryRequirements,
3879  VkImage* pImage,
3880  VkMappedMemoryRange* pMemory,
3881  uint32_t* pMemoryTypeIndex)
3882 {
3883  VMA_ASSERT(allocator && pCreateInfo && pMemoryRequirements);
3884 
3885  VMA_DEBUG_LOG("vmaCreateImage");
3886 
3887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3888 
3889  // 1. Create VkImage.
3890  VkResult res = vkCreateImage(allocator->m_hDevice, pCreateInfo, allocator->GetAllocationCallbacks(), pImage);
3891  if(res >= 0)
3892  {
3893  VkMappedMemoryRange mem = {};
3894  VmaSuballocationType suballocType = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
3895  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
3896  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
3897 
3898  // 2. Allocate memory using allocator.
3899  res = AllocateMemoryForImage(allocator, *pImage, pMemoryRequirements, suballocType, &mem, pMemoryTypeIndex);
3900  if(res >= 0)
3901  {
3902  if(pMemory != VMA_NULL)
3903  *pMemory = mem;
3904  // 3. Bind image with memory.
3905  res = vkBindImageMemory(allocator->m_hDevice, *pImage, mem.memory, mem.offset);
3906  if(res >= 0)
3907  {
3908  // All steps succeeded.
3909  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3910  allocator->m_ImageToMemoryMap.insert(VmaPair<VkImage, VkMappedMemoryRange>(*pImage, mem));
3911  return VK_SUCCESS;
3912  }
3913  allocator->FreeMemory(&mem);
3914  return res;
3915  }
3916  vkDestroyImage(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
3917  return res;
3918  }
3919  return res;
3920 }
3921 
3922 void vmaDestroyImage(
3923  VmaAllocator allocator,
3924  VkImage image)
3925 {
3926  if(image != VK_NULL_HANDLE)
3927  {
3928  VMA_ASSERT(allocator);
3929 
3930  VMA_DEBUG_LOG("vmaDestroyImage");
3931 
3932  VMA_DEBUG_GLOBAL_MUTEX_LOCK
3933 
3934  VkMappedMemoryRange mem = {};
3935  {
3936  VmaMutexLock lock(allocator->m_ImageToMemoryMapMutex);
3937  VMA_MAP_TYPE(VkImage, VkMappedMemoryRange)::iterator it = allocator->m_ImageToMemoryMap.find(image);
3938  if(it == allocator->m_ImageToMemoryMap.end())
3939  {
3940  VMA_ASSERT(0 && "Trying to destroy buffer that was not created using vmaCreateBuffer or already freed.");
3941  return;
3942  }
3943  mem = it->second;
3944  allocator->m_ImageToMemoryMap.erase(it);
3945  }
3946 
3947  vkDestroyImage(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
3948 
3949  allocator->FreeMemory(&mem);
3950  }
3951 }
3952 
3953 #endif // #ifdef VMA_IMPLEMENTATION
3954 
3955 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
struct VmaMemoryRequirements VmaMemoryRequirements
void vmaUnmapMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory)
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:165
VkResult vmaMapMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory, void **ppData)
Memory will be used for writing on device and readback on host.
Definition: vk_mem_alloc.h:278
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:297
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaMemoryRequirements *pMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
Function similar to vmaAllocateMemoryForBuffer().
const VkAllocationCallbacks * pAllocationCallbacks
Custom allocation callbacks.
Definition: vk_mem_alloc.h:177
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:161
VkDeviceSize preferredSmallHeapBlockSize
Size of a single memory block to allocate for resources from a small heap <= 512 MB.
Definition: vk_mem_alloc.h:174
VmaStatInfo total
Definition: vk_mem_alloc.h:232
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:168
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaMemoryRequirements *pVmaMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
General purpose memory allocation.
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer)
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkImage *pImage, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
Function similar to vmaCreateBuffer().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:228
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:306
VmaMemoryUsage
Definition: vk_mem_alloc.h:267
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pCreateInfo, const VmaMemoryRequirements *pMemoryRequirements, VkBuffer *pBuffer, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
Definition: vk_mem_alloc.h:216
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:301
Definition: vk_mem_alloc.h:282
VkBool32 neverAllocate
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:313
VkDeviceSize UnusedRangeSizeMax
Definition: vk_mem_alloc.h:224
VkDeviceSize SuballocationSizeMax
Definition: vk_mem_alloc.h:223
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VkBool32 ownMemory
Set to true if this allocation should have its own memory block.
Definition: vk_mem_alloc.h:292
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:230
void vmaDestroyImage(VmaAllocator allocator, VkImage image)
uint32_t AllocationCount
Definition: vk_mem_alloc.h:218
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkDeviceSize UsedBytes
Definition: vk_mem_alloc.h:221
VkDeviceSize preferredLargeHeapBlockSize
Size of a single memory block to allocate for resources.
Definition: vk_mem_alloc.h:171
uint32_t UnusedRangeCount
Definition: vk_mem_alloc.h:220
Memory will be mapped on host. Could be used for transfer to device.
Definition: vk_mem_alloc.h:274
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
uint32_t SuballocationCount
Definition: vk_mem_alloc.h:219
VkDeviceSize UnusedRangeSizeAvg
Definition: vk_mem_alloc.h:224
VkDeviceSize SuballocationSizeMin
Definition: vk_mem_alloc.h:223
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaMemoryRequirements *pMemoryRequirements, VkMappedMemoryRange *pMemory, uint32_t *pMemoryTypeIndex)
VkDeviceSize SuballocationSizeAvg
Definition: vk_mem_alloc.h:223
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
No intended memory usage specified.
Definition: vk_mem_alloc.h:270
Definition: vk_mem_alloc.h:279
Memory will be used for frequent (dynamic) updates from host and reads on device. ...
Definition: vk_mem_alloc.h:276
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
Memory will be used on device only, no need to be mapped on host.
Definition: vk_mem_alloc.h:272
struct VmaStatInfo VmaStatInfo
VkDeviceSize UnusedBytes
Definition: vk_mem_alloc.h:222
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:231
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaMemoryRequirements *pMemoryRequirements, uint32_t *pMemoryTypeIndex)
void vmaFreeMemory(VmaAllocator allocator, const VkMappedMemoryRange *pMemory)
Frees memory previously allocated using vmaAllocateMemoryForBuffer() or vmaAllocateMemoryForImage().
VkDeviceSize UnusedRangeSizeMin
Definition: vk_mem_alloc.h:224