Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1343 #include <vulkan/vulkan.h>
1344 
1345 #if !defined(VMA_DEDICATED_ALLOCATION)
1346  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1347  #define VMA_DEDICATED_ALLOCATION 1
1348  #else
1349  #define VMA_DEDICATED_ALLOCATION 0
1350  #endif
1351 #endif
1352 
1362 VK_DEFINE_HANDLE(VmaAllocator)
1363 
1364 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1366  VmaAllocator allocator,
1367  uint32_t memoryType,
1368  VkDeviceMemory memory,
1369  VkDeviceSize size);
1371 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1372  VmaAllocator allocator,
1373  uint32_t memoryType,
1374  VkDeviceMemory memory,
1375  VkDeviceSize size);
1376 
1390 
1420 
1423 typedef VkFlags VmaAllocatorCreateFlags;
1424 
1429 typedef struct VmaVulkanFunctions {
1430  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1431  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1432  PFN_vkAllocateMemory vkAllocateMemory;
1433  PFN_vkFreeMemory vkFreeMemory;
1434  PFN_vkMapMemory vkMapMemory;
1435  PFN_vkUnmapMemory vkUnmapMemory;
1436  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1437  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1438  PFN_vkBindBufferMemory vkBindBufferMemory;
1439  PFN_vkBindImageMemory vkBindImageMemory;
1440  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1441  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1442  PFN_vkCreateBuffer vkCreateBuffer;
1443  PFN_vkDestroyBuffer vkDestroyBuffer;
1444  PFN_vkCreateImage vkCreateImage;
1445  PFN_vkDestroyImage vkDestroyImage;
1446 #if VMA_DEDICATED_ALLOCATION
1447  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1448  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1449 #endif
1451 
1453 typedef enum VmaRecordFlagBits {
1460 
1463 typedef VkFlags VmaRecordFlags;
1464 
1465 /*
1466 Define this macro to 0/1 to disable/enable support for recording functionality,
1467 available through VmaAllocatorCreateInfo::pRecordSettings.
1468 */
1469 #ifndef VMA_RECORDING_ENABLED
1470  #ifdef _WIN32
1471  #define VMA_RECORDING_ENABLED 1
1472  #else
1473  #define VMA_RECORDING_ENABLED 0
1474  #endif
1475 #endif
1476 
1478 typedef struct VmaRecordSettings
1479 {
1481  VmaRecordFlags flags;
1489  const char* pFilePath;
1491 
1494 {
1496  VmaAllocatorCreateFlags flags;
1498 
1499  VkPhysicalDevice physicalDevice;
1501 
1502  VkDevice device;
1504 
1507 
1508  const VkAllocationCallbacks* pAllocationCallbacks;
1510 
1549  const VkDeviceSize* pHeapSizeLimit;
1570 
1572 VkResult vmaCreateAllocator(
1573  const VmaAllocatorCreateInfo* pCreateInfo,
1574  VmaAllocator* pAllocator);
1575 
1577 void vmaDestroyAllocator(
1578  VmaAllocator allocator);
1579 
1585  VmaAllocator allocator,
1586  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1587 
1593  VmaAllocator allocator,
1594  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1595 
1603  VmaAllocator allocator,
1604  uint32_t memoryTypeIndex,
1605  VkMemoryPropertyFlags* pFlags);
1606 
1616  VmaAllocator allocator,
1617  uint32_t frameIndex);
1618 
1621 typedef struct VmaStatInfo
1622 {
1624  uint32_t blockCount;
1630  VkDeviceSize usedBytes;
1632  VkDeviceSize unusedBytes;
1633  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1634  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1635 } VmaStatInfo;
1636 
1638 typedef struct VmaStats
1639 {
1640  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1641  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1643 } VmaStats;
1644 
1646 void vmaCalculateStats(
1647  VmaAllocator allocator,
1648  VmaStats* pStats);
1649 
1650 #define VMA_STATS_STRING_ENABLED 1
1651 
1652 #if VMA_STATS_STRING_ENABLED
1653 
1655 
1657 void vmaBuildStatsString(
1658  VmaAllocator allocator,
1659  char** ppStatsString,
1660  VkBool32 detailedMap);
1661 
1662 void vmaFreeStatsString(
1663  VmaAllocator allocator,
1664  char* pStatsString);
1665 
1666 #endif // #if VMA_STATS_STRING_ENABLED
1667 
1676 VK_DEFINE_HANDLE(VmaPool)
1677 
1678 typedef enum VmaMemoryUsage
1679 {
1728 } VmaMemoryUsage;
1729 
1744 
1799 
1803 
1805 {
1807  VmaAllocationCreateFlags flags;
1818  VkMemoryPropertyFlags requiredFlags;
1823  VkMemoryPropertyFlags preferredFlags;
1831  uint32_t memoryTypeBits;
1844  void* pUserData;
1846 
1863 VkResult vmaFindMemoryTypeIndex(
1864  VmaAllocator allocator,
1865  uint32_t memoryTypeBits,
1866  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1867  uint32_t* pMemoryTypeIndex);
1868 
1882  VmaAllocator allocator,
1883  const VkBufferCreateInfo* pBufferCreateInfo,
1884  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1885  uint32_t* pMemoryTypeIndex);
1886 
1900  VmaAllocator allocator,
1901  const VkImageCreateInfo* pImageCreateInfo,
1902  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1903  uint32_t* pMemoryTypeIndex);
1904 
1925 
1940 
1943 typedef VkFlags VmaPoolCreateFlags;
1944 
1947 typedef struct VmaPoolCreateInfo {
1953  VmaPoolCreateFlags flags;
1958  VkDeviceSize blockSize;
1988 
1991 typedef struct VmaPoolStats {
1994  VkDeviceSize size;
1997  VkDeviceSize unusedSize;
2010  VkDeviceSize unusedRangeSizeMax;
2011 } VmaPoolStats;
2012 
2019 VkResult vmaCreatePool(
2020  VmaAllocator allocator,
2021  const VmaPoolCreateInfo* pCreateInfo,
2022  VmaPool* pPool);
2023 
2026 void vmaDestroyPool(
2027  VmaAllocator allocator,
2028  VmaPool pool);
2029 
2036 void vmaGetPoolStats(
2037  VmaAllocator allocator,
2038  VmaPool pool,
2039  VmaPoolStats* pPoolStats);
2040 
2048  VmaAllocator allocator,
2049  VmaPool pool,
2050  size_t* pLostAllocationCount);
2051 
2066 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2067 
2092 VK_DEFINE_HANDLE(VmaAllocation)
2093 
2094 
2096 typedef struct VmaAllocationInfo {
2101  uint32_t memoryType;
2110  VkDeviceMemory deviceMemory;
2115  VkDeviceSize offset;
2120  VkDeviceSize size;
2134  void* pUserData;
2136 
2147 VkResult vmaAllocateMemory(
2148  VmaAllocator allocator,
2149  const VkMemoryRequirements* pVkMemoryRequirements,
2150  const VmaAllocationCreateInfo* pCreateInfo,
2151  VmaAllocation* pAllocation,
2152  VmaAllocationInfo* pAllocationInfo);
2153 
2161  VmaAllocator allocator,
2162  VkBuffer buffer,
2163  const VmaAllocationCreateInfo* pCreateInfo,
2164  VmaAllocation* pAllocation,
2165  VmaAllocationInfo* pAllocationInfo);
2166 
2168 VkResult vmaAllocateMemoryForImage(
2169  VmaAllocator allocator,
2170  VkImage image,
2171  const VmaAllocationCreateInfo* pCreateInfo,
2172  VmaAllocation* pAllocation,
2173  VmaAllocationInfo* pAllocationInfo);
2174 
2176 void vmaFreeMemory(
2177  VmaAllocator allocator,
2178  VmaAllocation allocation);
2179 
2197  VmaAllocator allocator,
2198  VmaAllocation allocation,
2199  VmaAllocationInfo* pAllocationInfo);
2200 
2215 VkBool32 vmaTouchAllocation(
2216  VmaAllocator allocator,
2217  VmaAllocation allocation);
2218 
2233  VmaAllocator allocator,
2234  VmaAllocation allocation,
2235  void* pUserData);
2236 
2248  VmaAllocator allocator,
2249  VmaAllocation* pAllocation);
2250 
2285 VkResult vmaMapMemory(
2286  VmaAllocator allocator,
2287  VmaAllocation allocation,
2288  void** ppData);
2289 
2294 void vmaUnmapMemory(
2295  VmaAllocator allocator,
2296  VmaAllocation allocation);
2297 
2310 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2311 
2324 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2325 
2342 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2343 
2345 typedef struct VmaDefragmentationInfo {
2350  VkDeviceSize maxBytesToMove;
2357 
2359 typedef struct VmaDefragmentationStats {
2361  VkDeviceSize bytesMoved;
2363  VkDeviceSize bytesFreed;
2369 
2456 VkResult vmaDefragment(
2457  VmaAllocator allocator,
2458  VmaAllocation* pAllocations,
2459  size_t allocationCount,
2460  VkBool32* pAllocationsChanged,
2461  const VmaDefragmentationInfo *pDefragmentationInfo,
2462  VmaDefragmentationStats* pDefragmentationStats);
2463 
2476 VkResult vmaBindBufferMemory(
2477  VmaAllocator allocator,
2478  VmaAllocation allocation,
2479  VkBuffer buffer);
2480 
2493 VkResult vmaBindImageMemory(
2494  VmaAllocator allocator,
2495  VmaAllocation allocation,
2496  VkImage image);
2497 
2524 VkResult vmaCreateBuffer(
2525  VmaAllocator allocator,
2526  const VkBufferCreateInfo* pBufferCreateInfo,
2527  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2528  VkBuffer* pBuffer,
2529  VmaAllocation* pAllocation,
2530  VmaAllocationInfo* pAllocationInfo);
2531 
2543 void vmaDestroyBuffer(
2544  VmaAllocator allocator,
2545  VkBuffer buffer,
2546  VmaAllocation allocation);
2547 
2549 VkResult vmaCreateImage(
2550  VmaAllocator allocator,
2551  const VkImageCreateInfo* pImageCreateInfo,
2552  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2553  VkImage* pImage,
2554  VmaAllocation* pAllocation,
2555  VmaAllocationInfo* pAllocationInfo);
2556 
2568 void vmaDestroyImage(
2569  VmaAllocator allocator,
2570  VkImage image,
2571  VmaAllocation allocation);
2572 
2573 #ifdef __cplusplus
2574 }
2575 #endif
2576 
2577 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2578 
2579 // For Visual Studio IntelliSense.
2580 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2581 #define VMA_IMPLEMENTATION
2582 #endif
2583 
2584 #ifdef VMA_IMPLEMENTATION
2585 #undef VMA_IMPLEMENTATION
2586 
2587 #include <cstdint>
2588 #include <cstdlib>
2589 #include <cstring>
2590 
2591 /*******************************************************************************
2592 CONFIGURATION SECTION
2593 
2594 Define some of these macros before each #include of this header or change them
2595 here if you need other then default behavior depending on your environment.
2596 */
2597 
2598 /*
2599 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2600 internally, like:
2601 
2602  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2603 
2604 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2605 VmaAllocatorCreateInfo::pVulkanFunctions.
2606 */
2607 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2608 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2609 #endif
2610 
2611 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2612 //#define VMA_USE_STL_CONTAINERS 1
2613 
2614 /* Set this macro to 1 to make the library including and using STL containers:
2615 std::pair, std::vector, std::list, std::unordered_map.
2616 
2617 Set it to 0 or undefined to make the library using its own implementation of
2618 the containers.
2619 */
2620 #if VMA_USE_STL_CONTAINERS
2621  #define VMA_USE_STL_VECTOR 1
2622  #define VMA_USE_STL_UNORDERED_MAP 1
2623  #define VMA_USE_STL_LIST 1
2624 #endif
2625 
2626 #if VMA_USE_STL_VECTOR
2627  #include <vector>
2628 #endif
2629 
2630 #if VMA_USE_STL_UNORDERED_MAP
2631  #include <unordered_map>
2632 #endif
2633 
2634 #if VMA_USE_STL_LIST
2635  #include <list>
2636 #endif
2637 
2638 /*
2639 Following headers are used in this CONFIGURATION section only, so feel free to
2640 remove them if not needed.
2641 */
2642 #include <cassert> // for assert
2643 #include <algorithm> // for min, max
2644 #include <mutex> // for std::mutex
2645 #include <atomic> // for std::atomic
2646 
2647 #ifndef VMA_NULL
2648  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2649  #define VMA_NULL nullptr
2650 #endif
2651 
2652 #if defined(__APPLE__) || defined(__ANDROID__)
2653 #include <cstdlib>
2654 void *aligned_alloc(size_t alignment, size_t size)
2655 {
2656  // alignment must be >= sizeof(void*)
2657  if(alignment < sizeof(void*))
2658  {
2659  alignment = sizeof(void*);
2660  }
2661 
2662  void *pointer;
2663  if(posix_memalign(&pointer, alignment, size) == 0)
2664  return pointer;
2665  return VMA_NULL;
2666 }
2667 #endif
2668 
2669 // If your compiler is not compatible with C++11 and definition of
2670 // aligned_alloc() function is missing, uncommeting following line may help:
2671 
2672 //#include <malloc.h>
2673 
2674 // Normal assert to check for programmer's errors, especially in Debug configuration.
2675 #ifndef VMA_ASSERT
2676  #ifdef _DEBUG
2677  #define VMA_ASSERT(expr) assert(expr)
2678  #else
2679  #define VMA_ASSERT(expr)
2680  #endif
2681 #endif
2682 
2683 // Assert that will be called very often, like inside data structures e.g. operator[].
2684 // Making it non-empty can make program slow.
2685 #ifndef VMA_HEAVY_ASSERT
2686  #ifdef _DEBUG
2687  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2688  #else
2689  #define VMA_HEAVY_ASSERT(expr)
2690  #endif
2691 #endif
2692 
2693 #ifndef VMA_ALIGN_OF
2694  #define VMA_ALIGN_OF(type) (__alignof(type))
2695 #endif
2696 
2697 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2698  #if defined(_WIN32)
2699  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2700  #else
2701  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2702  #endif
2703 #endif
2704 
2705 #ifndef VMA_SYSTEM_FREE
2706  #if defined(_WIN32)
2707  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2708  #else
2709  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2710  #endif
2711 #endif
2712 
2713 #ifndef VMA_MIN
2714  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2715 #endif
2716 
2717 #ifndef VMA_MAX
2718  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2719 #endif
2720 
2721 #ifndef VMA_SWAP
2722  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2723 #endif
2724 
2725 #ifndef VMA_SORT
2726  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2727 #endif
2728 
2729 #ifndef VMA_DEBUG_LOG
2730  #define VMA_DEBUG_LOG(format, ...)
2731  /*
2732  #define VMA_DEBUG_LOG(format, ...) do { \
2733  printf(format, __VA_ARGS__); \
2734  printf("\n"); \
2735  } while(false)
2736  */
2737 #endif
2738 
2739 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2740 #if VMA_STATS_STRING_ENABLED
2741  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2742  {
2743  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2744  }
2745  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2746  {
2747  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2748  }
2749  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2750  {
2751  snprintf(outStr, strLen, "%p", ptr);
2752  }
2753 #endif
2754 
2755 #ifndef VMA_MUTEX
2756  class VmaMutex
2757  {
2758  public:
2759  VmaMutex() { }
2760  ~VmaMutex() { }
2761  void Lock() { m_Mutex.lock(); }
2762  void Unlock() { m_Mutex.unlock(); }
2763  private:
2764  std::mutex m_Mutex;
2765  };
2766  #define VMA_MUTEX VmaMutex
2767 #endif
2768 
2769 /*
2770 If providing your own implementation, you need to implement a subset of std::atomic:
2771 
2772 - Constructor(uint32_t desired)
2773 - uint32_t load() const
2774 - void store(uint32_t desired)
2775 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2776 */
2777 #ifndef VMA_ATOMIC_UINT32
2778  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2779 #endif
2780 
2781 #ifndef VMA_BEST_FIT
2782 
2794  #define VMA_BEST_FIT (1)
2795 #endif
2796 
2797 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2798 
2802  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2803 #endif
2804 
2805 #ifndef VMA_DEBUG_ALIGNMENT
2806 
2810  #define VMA_DEBUG_ALIGNMENT (1)
2811 #endif
2812 
2813 #ifndef VMA_DEBUG_MARGIN
2814 
2818  #define VMA_DEBUG_MARGIN (0)
2819 #endif
2820 
2821 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2822 
2826  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2827 #endif
2828 
2829 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2830 
2835  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2836 #endif
2837 
2838 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2839 
2843  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2844 #endif
2845 
2846 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2847 
2851  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2852 #endif
2853 
2854 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2855  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2857 #endif
2858 
2859 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2860  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2862 #endif
2863 
2864 #ifndef VMA_CLASS_NO_COPY
2865  #define VMA_CLASS_NO_COPY(className) \
2866  private: \
2867  className(const className&) = delete; \
2868  className& operator=(const className&) = delete;
2869 #endif
2870 
2871 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2872 
2873 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2874 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2875 
2876 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2877 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2878 
2879 /*******************************************************************************
2880 END OF CONFIGURATION
2881 */
2882 
2883 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2884  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2885 
2886 // Returns number of bits set to 1 in (v).
2887 static inline uint32_t VmaCountBitsSet(uint32_t v)
2888 {
2889  uint32_t c = v - ((v >> 1) & 0x55555555);
2890  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2891  c = ((c >> 4) + c) & 0x0F0F0F0F;
2892  c = ((c >> 8) + c) & 0x00FF00FF;
2893  c = ((c >> 16) + c) & 0x0000FFFF;
2894  return c;
2895 }
2896 
2897 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2898 // Use types like uint32_t, uint64_t as T.
2899 template <typename T>
2900 static inline T VmaAlignUp(T val, T align)
2901 {
2902  return (val + align - 1) / align * align;
2903 }
2904 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2905 // Use types like uint32_t, uint64_t as T.
2906 template <typename T>
2907 static inline T VmaAlignDown(T val, T align)
2908 {
2909  return val / align * align;
2910 }
2911 
2912 // Division with mathematical rounding to nearest number.
2913 template <typename T>
2914 inline T VmaRoundDiv(T x, T y)
2915 {
2916  return (x + (y / (T)2)) / y;
2917 }
2918 
2919 static inline bool VmaStrIsEmpty(const char* pStr)
2920 {
2921  return pStr == VMA_NULL || *pStr == '\0';
2922 }
2923 
2924 #ifndef VMA_SORT
2925 
2926 template<typename Iterator, typename Compare>
2927 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2928 {
2929  Iterator centerValue = end; --centerValue;
2930  Iterator insertIndex = beg;
2931  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2932  {
2933  if(cmp(*memTypeIndex, *centerValue))
2934  {
2935  if(insertIndex != memTypeIndex)
2936  {
2937  VMA_SWAP(*memTypeIndex, *insertIndex);
2938  }
2939  ++insertIndex;
2940  }
2941  }
2942  if(insertIndex != centerValue)
2943  {
2944  VMA_SWAP(*insertIndex, *centerValue);
2945  }
2946  return insertIndex;
2947 }
2948 
2949 template<typename Iterator, typename Compare>
2950 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2951 {
2952  if(beg < end)
2953  {
2954  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2955  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2956  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2957  }
2958 }
2959 
2960 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2961 
2962 #endif // #ifndef VMA_SORT
2963 
2964 /*
2965 Returns true if two memory blocks occupy overlapping pages.
2966 ResourceA must be in less memory offset than ResourceB.
2967 
2968 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2969 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2970 */
2971 static inline bool VmaBlocksOnSamePage(
2972  VkDeviceSize resourceAOffset,
2973  VkDeviceSize resourceASize,
2974  VkDeviceSize resourceBOffset,
2975  VkDeviceSize pageSize)
2976 {
2977  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2978  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2979  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2980  VkDeviceSize resourceBStart = resourceBOffset;
2981  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2982  return resourceAEndPage == resourceBStartPage;
2983 }
2984 
2985 enum VmaSuballocationType
2986 {
2987  VMA_SUBALLOCATION_TYPE_FREE = 0,
2988  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2989  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2990  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2991  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2992  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2993  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2994 };
2995 
2996 /*
2997 Returns true if given suballocation types could conflict and must respect
2998 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2999 or linear image and another one is optimal image. If type is unknown, behave
3000 conservatively.
3001 */
3002 static inline bool VmaIsBufferImageGranularityConflict(
3003  VmaSuballocationType suballocType1,
3004  VmaSuballocationType suballocType2)
3005 {
3006  if(suballocType1 > suballocType2)
3007  {
3008  VMA_SWAP(suballocType1, suballocType2);
3009  }
3010 
3011  switch(suballocType1)
3012  {
3013  case VMA_SUBALLOCATION_TYPE_FREE:
3014  return false;
3015  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3016  return true;
3017  case VMA_SUBALLOCATION_TYPE_BUFFER:
3018  return
3019  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3020  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3021  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3022  return
3023  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3024  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3025  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3026  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3027  return
3028  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3029  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3030  return false;
3031  default:
3032  VMA_ASSERT(0);
3033  return true;
3034  }
3035 }
3036 
3037 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3038 {
3039  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3040  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3041  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3042  {
3043  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3044  }
3045 }
3046 
3047 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3048 {
3049  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3050  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3051  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3052  {
3053  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3054  {
3055  return false;
3056  }
3057  }
3058  return true;
3059 }
3060 
3061 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3062 struct VmaMutexLock
3063 {
3064  VMA_CLASS_NO_COPY(VmaMutexLock)
3065 public:
3066  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3067  m_pMutex(useMutex ? &mutex : VMA_NULL)
3068  {
3069  if(m_pMutex)
3070  {
3071  m_pMutex->Lock();
3072  }
3073  }
3074 
3075  ~VmaMutexLock()
3076  {
3077  if(m_pMutex)
3078  {
3079  m_pMutex->Unlock();
3080  }
3081  }
3082 
3083 private:
3084  VMA_MUTEX* m_pMutex;
3085 };
3086 
3087 #if VMA_DEBUG_GLOBAL_MUTEX
3088  static VMA_MUTEX gDebugGlobalMutex;
3089  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3090 #else
3091  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3092 #endif
3093 
3094 // Minimum size of a free suballocation to register it in the free suballocation collection.
3095 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3096 
3097 /*
3098 Performs binary search and returns iterator to first element that is greater or
3099 equal to (key), according to comparison (cmp).
3100 
3101 Cmp should return true if first argument is less than second argument.
3102 
3103 Returned value is the found element, if present in the collection or place where
3104 new element with value (key) should be inserted.
3105 */
3106 template <typename CmpLess, typename IterT, typename KeyT>
3107 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3108 {
3109  size_t down = 0, up = (end - beg);
3110  while(down < up)
3111  {
3112  const size_t mid = (down + up) / 2;
3113  if(cmp(*(beg+mid), key))
3114  {
3115  down = mid + 1;
3116  }
3117  else
3118  {
3119  up = mid;
3120  }
3121  }
3122  return beg + down;
3123 }
3124 
3126 // Memory allocation
3127 
3128 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3129 {
3130  if((pAllocationCallbacks != VMA_NULL) &&
3131  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3132  {
3133  return (*pAllocationCallbacks->pfnAllocation)(
3134  pAllocationCallbacks->pUserData,
3135  size,
3136  alignment,
3137  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3138  }
3139  else
3140  {
3141  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3142  }
3143 }
3144 
3145 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3146 {
3147  if((pAllocationCallbacks != VMA_NULL) &&
3148  (pAllocationCallbacks->pfnFree != VMA_NULL))
3149  {
3150  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3151  }
3152  else
3153  {
3154  VMA_SYSTEM_FREE(ptr);
3155  }
3156 }
3157 
3158 template<typename T>
3159 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3160 {
3161  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3162 }
3163 
3164 template<typename T>
3165 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3166 {
3167  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3168 }
3169 
3170 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3171 
3172 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3173 
3174 template<typename T>
3175 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3176 {
3177  ptr->~T();
3178  VmaFree(pAllocationCallbacks, ptr);
3179 }
3180 
3181 template<typename T>
3182 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3183 {
3184  if(ptr != VMA_NULL)
3185  {
3186  for(size_t i = count; i--; )
3187  {
3188  ptr[i].~T();
3189  }
3190  VmaFree(pAllocationCallbacks, ptr);
3191  }
3192 }
3193 
3194 // STL-compatible allocator.
3195 template<typename T>
3196 class VmaStlAllocator
3197 {
3198 public:
3199  const VkAllocationCallbacks* const m_pCallbacks;
3200  typedef T value_type;
3201 
3202  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3203  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3204 
3205  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3206  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3207 
3208  template<typename U>
3209  bool operator==(const VmaStlAllocator<U>& rhs) const
3210  {
3211  return m_pCallbacks == rhs.m_pCallbacks;
3212  }
3213  template<typename U>
3214  bool operator!=(const VmaStlAllocator<U>& rhs) const
3215  {
3216  return m_pCallbacks != rhs.m_pCallbacks;
3217  }
3218 
3219  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3220 };
3221 
3222 #if VMA_USE_STL_VECTOR
3223 
3224 #define VmaVector std::vector
3225 
3226 template<typename T, typename allocatorT>
3227 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3228 {
3229  vec.insert(vec.begin() + index, item);
3230 }
3231 
3232 template<typename T, typename allocatorT>
3233 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3234 {
3235  vec.erase(vec.begin() + index);
3236 }
3237 
3238 #else // #if VMA_USE_STL_VECTOR
3239 
3240 /* Class with interface compatible with subset of std::vector.
3241 T must be POD because constructors and destructors are not called and memcpy is
3242 used for these objects. */
3243 template<typename T, typename AllocatorT>
3244 class VmaVector
3245 {
3246 public:
3247  typedef T value_type;
3248 
3249  VmaVector(const AllocatorT& allocator) :
3250  m_Allocator(allocator),
3251  m_pArray(VMA_NULL),
3252  m_Count(0),
3253  m_Capacity(0)
3254  {
3255  }
3256 
3257  VmaVector(size_t count, const AllocatorT& allocator) :
3258  m_Allocator(allocator),
3259  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3260  m_Count(count),
3261  m_Capacity(count)
3262  {
3263  }
3264 
3265  VmaVector(const VmaVector<T, AllocatorT>& src) :
3266  m_Allocator(src.m_Allocator),
3267  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3268  m_Count(src.m_Count),
3269  m_Capacity(src.m_Count)
3270  {
3271  if(m_Count != 0)
3272  {
3273  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3274  }
3275  }
3276 
3277  ~VmaVector()
3278  {
3279  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3280  }
3281 
3282  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3283  {
3284  if(&rhs != this)
3285  {
3286  resize(rhs.m_Count);
3287  if(m_Count != 0)
3288  {
3289  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3290  }
3291  }
3292  return *this;
3293  }
3294 
3295  bool empty() const { return m_Count == 0; }
3296  size_t size() const { return m_Count; }
3297  T* data() { return m_pArray; }
3298  const T* data() const { return m_pArray; }
3299 
3300  T& operator[](size_t index)
3301  {
3302  VMA_HEAVY_ASSERT(index < m_Count);
3303  return m_pArray[index];
3304  }
3305  const T& operator[](size_t index) const
3306  {
3307  VMA_HEAVY_ASSERT(index < m_Count);
3308  return m_pArray[index];
3309  }
3310 
3311  T& front()
3312  {
3313  VMA_HEAVY_ASSERT(m_Count > 0);
3314  return m_pArray[0];
3315  }
3316  const T& front() const
3317  {
3318  VMA_HEAVY_ASSERT(m_Count > 0);
3319  return m_pArray[0];
3320  }
3321  T& back()
3322  {
3323  VMA_HEAVY_ASSERT(m_Count > 0);
3324  return m_pArray[m_Count - 1];
3325  }
3326  const T& back() const
3327  {
3328  VMA_HEAVY_ASSERT(m_Count > 0);
3329  return m_pArray[m_Count - 1];
3330  }
3331 
3332  void reserve(size_t newCapacity, bool freeMemory = false)
3333  {
3334  newCapacity = VMA_MAX(newCapacity, m_Count);
3335 
3336  if((newCapacity < m_Capacity) && !freeMemory)
3337  {
3338  newCapacity = m_Capacity;
3339  }
3340 
3341  if(newCapacity != m_Capacity)
3342  {
3343  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3344  if(m_Count != 0)
3345  {
3346  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3347  }
3348  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3349  m_Capacity = newCapacity;
3350  m_pArray = newArray;
3351  }
3352  }
3353 
3354  void resize(size_t newCount, bool freeMemory = false)
3355  {
3356  size_t newCapacity = m_Capacity;
3357  if(newCount > m_Capacity)
3358  {
3359  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3360  }
3361  else if(freeMemory)
3362  {
3363  newCapacity = newCount;
3364  }
3365 
3366  if(newCapacity != m_Capacity)
3367  {
3368  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3369  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3370  if(elementsToCopy != 0)
3371  {
3372  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3373  }
3374  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3375  m_Capacity = newCapacity;
3376  m_pArray = newArray;
3377  }
3378 
3379  m_Count = newCount;
3380  }
3381 
3382  void clear(bool freeMemory = false)
3383  {
3384  resize(0, freeMemory);
3385  }
3386 
3387  void insert(size_t index, const T& src)
3388  {
3389  VMA_HEAVY_ASSERT(index <= m_Count);
3390  const size_t oldCount = size();
3391  resize(oldCount + 1);
3392  if(index < oldCount)
3393  {
3394  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3395  }
3396  m_pArray[index] = src;
3397  }
3398 
3399  void remove(size_t index)
3400  {
3401  VMA_HEAVY_ASSERT(index < m_Count);
3402  const size_t oldCount = size();
3403  if(index < oldCount - 1)
3404  {
3405  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3406  }
3407  resize(oldCount - 1);
3408  }
3409 
3410  void push_back(const T& src)
3411  {
3412  const size_t newIndex = size();
3413  resize(newIndex + 1);
3414  m_pArray[newIndex] = src;
3415  }
3416 
3417  void pop_back()
3418  {
3419  VMA_HEAVY_ASSERT(m_Count > 0);
3420  resize(size() - 1);
3421  }
3422 
3423  void push_front(const T& src)
3424  {
3425  insert(0, src);
3426  }
3427 
3428  void pop_front()
3429  {
3430  VMA_HEAVY_ASSERT(m_Count > 0);
3431  remove(0);
3432  }
3433 
3434  typedef T* iterator;
3435 
3436  iterator begin() { return m_pArray; }
3437  iterator end() { return m_pArray + m_Count; }
3438 
3439 private:
3440  AllocatorT m_Allocator;
3441  T* m_pArray;
3442  size_t m_Count;
3443  size_t m_Capacity;
3444 };
3445 
3446 template<typename T, typename allocatorT>
3447 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3448 {
3449  vec.insert(index, item);
3450 }
3451 
3452 template<typename T, typename allocatorT>
3453 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3454 {
3455  vec.remove(index);
3456 }
3457 
3458 #endif // #if VMA_USE_STL_VECTOR
3459 
3460 template<typename CmpLess, typename VectorT>
3461 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3462 {
3463  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3464  vector.data(),
3465  vector.data() + vector.size(),
3466  value,
3467  CmpLess()) - vector.data();
3468  VmaVectorInsert(vector, indexToInsert, value);
3469  return indexToInsert;
3470 }
3471 
3472 template<typename CmpLess, typename VectorT>
3473 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3474 {
3475  CmpLess comparator;
3476  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3477  vector.begin(),
3478  vector.end(),
3479  value,
3480  comparator);
3481  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3482  {
3483  size_t indexToRemove = it - vector.begin();
3484  VmaVectorRemove(vector, indexToRemove);
3485  return true;
3486  }
3487  return false;
3488 }
3489 
3490 template<typename CmpLess, typename IterT, typename KeyT>
3491 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3492 {
3493  CmpLess comparator;
3494  typename IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3495  beg, end, value, comparator);
3496  if(it == end ||
3497  !comparator(*it, value) && !comparator(value, *it))
3498  {
3499  return it;
3500  }
3501  return end;
3502 }
3503 
3505 // class VmaPoolAllocator
3506 
3507 /*
3508 Allocator for objects of type T using a list of arrays (pools) to speed up
3509 allocation. Number of elements that can be allocated is not bounded because
3510 allocator can create multiple blocks.
3511 */
3512 template<typename T>
3513 class VmaPoolAllocator
3514 {
3515  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3516 public:
3517  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3518  ~VmaPoolAllocator();
3519  void Clear();
3520  T* Alloc();
3521  void Free(T* ptr);
3522 
3523 private:
3524  union Item
3525  {
3526  uint32_t NextFreeIndex;
3527  T Value;
3528  };
3529 
3530  struct ItemBlock
3531  {
3532  Item* pItems;
3533  uint32_t FirstFreeIndex;
3534  };
3535 
3536  const VkAllocationCallbacks* m_pAllocationCallbacks;
3537  size_t m_ItemsPerBlock;
3538  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3539 
3540  ItemBlock& CreateNewBlock();
3541 };
3542 
3543 template<typename T>
3544 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3545  m_pAllocationCallbacks(pAllocationCallbacks),
3546  m_ItemsPerBlock(itemsPerBlock),
3547  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3548 {
3549  VMA_ASSERT(itemsPerBlock > 0);
3550 }
3551 
3552 template<typename T>
3553 VmaPoolAllocator<T>::~VmaPoolAllocator()
3554 {
3555  Clear();
3556 }
3557 
3558 template<typename T>
3559 void VmaPoolAllocator<T>::Clear()
3560 {
3561  for(size_t i = m_ItemBlocks.size(); i--; )
3562  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3563  m_ItemBlocks.clear();
3564 }
3565 
3566 template<typename T>
3567 T* VmaPoolAllocator<T>::Alloc()
3568 {
3569  for(size_t i = m_ItemBlocks.size(); i--; )
3570  {
3571  ItemBlock& block = m_ItemBlocks[i];
3572  // This block has some free items: Use first one.
3573  if(block.FirstFreeIndex != UINT32_MAX)
3574  {
3575  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3576  block.FirstFreeIndex = pItem->NextFreeIndex;
3577  return &pItem->Value;
3578  }
3579  }
3580 
3581  // No block has free item: Create new one and use it.
3582  ItemBlock& newBlock = CreateNewBlock();
3583  Item* const pItem = &newBlock.pItems[0];
3584  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3585  return &pItem->Value;
3586 }
3587 
3588 template<typename T>
3589 void VmaPoolAllocator<T>::Free(T* ptr)
3590 {
3591  // Search all memory blocks to find ptr.
3592  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3593  {
3594  ItemBlock& block = m_ItemBlocks[i];
3595 
3596  // Casting to union.
3597  Item* pItemPtr;
3598  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3599 
3600  // Check if pItemPtr is in address range of this block.
3601  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3602  {
3603  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3604  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3605  block.FirstFreeIndex = index;
3606  return;
3607  }
3608  }
3609  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3610 }
3611 
3612 template<typename T>
3613 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3614 {
3615  ItemBlock newBlock = {
3616  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3617 
3618  m_ItemBlocks.push_back(newBlock);
3619 
3620  // Setup singly-linked list of all free items in this block.
3621  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3622  newBlock.pItems[i].NextFreeIndex = i + 1;
3623  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3624  return m_ItemBlocks.back();
3625 }
3626 
3628 // class VmaRawList, VmaList
3629 
3630 #if VMA_USE_STL_LIST
3631 
3632 #define VmaList std::list
3633 
3634 #else // #if VMA_USE_STL_LIST
3635 
3636 template<typename T>
3637 struct VmaListItem
3638 {
3639  VmaListItem* pPrev;
3640  VmaListItem* pNext;
3641  T Value;
3642 };
3643 
3644 // Doubly linked list.
3645 template<typename T>
3646 class VmaRawList
3647 {
3648  VMA_CLASS_NO_COPY(VmaRawList)
3649 public:
3650  typedef VmaListItem<T> ItemType;
3651 
3652  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3653  ~VmaRawList();
3654  void Clear();
3655 
3656  size_t GetCount() const { return m_Count; }
3657  bool IsEmpty() const { return m_Count == 0; }
3658 
3659  ItemType* Front() { return m_pFront; }
3660  const ItemType* Front() const { return m_pFront; }
3661  ItemType* Back() { return m_pBack; }
3662  const ItemType* Back() const { return m_pBack; }
3663 
3664  ItemType* PushBack();
3665  ItemType* PushFront();
3666  ItemType* PushBack(const T& value);
3667  ItemType* PushFront(const T& value);
3668  void PopBack();
3669  void PopFront();
3670 
3671  // Item can be null - it means PushBack.
3672  ItemType* InsertBefore(ItemType* pItem);
3673  // Item can be null - it means PushFront.
3674  ItemType* InsertAfter(ItemType* pItem);
3675 
3676  ItemType* InsertBefore(ItemType* pItem, const T& value);
3677  ItemType* InsertAfter(ItemType* pItem, const T& value);
3678 
3679  void Remove(ItemType* pItem);
3680 
3681 private:
3682  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3683  VmaPoolAllocator<ItemType> m_ItemAllocator;
3684  ItemType* m_pFront;
3685  ItemType* m_pBack;
3686  size_t m_Count;
3687 };
3688 
3689 template<typename T>
3690 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3691  m_pAllocationCallbacks(pAllocationCallbacks),
3692  m_ItemAllocator(pAllocationCallbacks, 128),
3693  m_pFront(VMA_NULL),
3694  m_pBack(VMA_NULL),
3695  m_Count(0)
3696 {
3697 }
3698 
3699 template<typename T>
3700 VmaRawList<T>::~VmaRawList()
3701 {
3702  // Intentionally not calling Clear, because that would be unnecessary
3703  // computations to return all items to m_ItemAllocator as free.
3704 }
3705 
3706 template<typename T>
3707 void VmaRawList<T>::Clear()
3708 {
3709  if(IsEmpty() == false)
3710  {
3711  ItemType* pItem = m_pBack;
3712  while(pItem != VMA_NULL)
3713  {
3714  ItemType* const pPrevItem = pItem->pPrev;
3715  m_ItemAllocator.Free(pItem);
3716  pItem = pPrevItem;
3717  }
3718  m_pFront = VMA_NULL;
3719  m_pBack = VMA_NULL;
3720  m_Count = 0;
3721  }
3722 }
3723 
3724 template<typename T>
3725 VmaListItem<T>* VmaRawList<T>::PushBack()
3726 {
3727  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3728  pNewItem->pNext = VMA_NULL;
3729  if(IsEmpty())
3730  {
3731  pNewItem->pPrev = VMA_NULL;
3732  m_pFront = pNewItem;
3733  m_pBack = pNewItem;
3734  m_Count = 1;
3735  }
3736  else
3737  {
3738  pNewItem->pPrev = m_pBack;
3739  m_pBack->pNext = pNewItem;
3740  m_pBack = pNewItem;
3741  ++m_Count;
3742  }
3743  return pNewItem;
3744 }
3745 
3746 template<typename T>
3747 VmaListItem<T>* VmaRawList<T>::PushFront()
3748 {
3749  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3750  pNewItem->pPrev = VMA_NULL;
3751  if(IsEmpty())
3752  {
3753  pNewItem->pNext = VMA_NULL;
3754  m_pFront = pNewItem;
3755  m_pBack = pNewItem;
3756  m_Count = 1;
3757  }
3758  else
3759  {
3760  pNewItem->pNext = m_pFront;
3761  m_pFront->pPrev = pNewItem;
3762  m_pFront = pNewItem;
3763  ++m_Count;
3764  }
3765  return pNewItem;
3766 }
3767 
3768 template<typename T>
3769 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3770 {
3771  ItemType* const pNewItem = PushBack();
3772  pNewItem->Value = value;
3773  return pNewItem;
3774 }
3775 
3776 template<typename T>
3777 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3778 {
3779  ItemType* const pNewItem = PushFront();
3780  pNewItem->Value = value;
3781  return pNewItem;
3782 }
3783 
3784 template<typename T>
3785 void VmaRawList<T>::PopBack()
3786 {
3787  VMA_HEAVY_ASSERT(m_Count > 0);
3788  ItemType* const pBackItem = m_pBack;
3789  ItemType* const pPrevItem = pBackItem->pPrev;
3790  if(pPrevItem != VMA_NULL)
3791  {
3792  pPrevItem->pNext = VMA_NULL;
3793  }
3794  m_pBack = pPrevItem;
3795  m_ItemAllocator.Free(pBackItem);
3796  --m_Count;
3797 }
3798 
3799 template<typename T>
3800 void VmaRawList<T>::PopFront()
3801 {
3802  VMA_HEAVY_ASSERT(m_Count > 0);
3803  ItemType* const pFrontItem = m_pFront;
3804  ItemType* const pNextItem = pFrontItem->pNext;
3805  if(pNextItem != VMA_NULL)
3806  {
3807  pNextItem->pPrev = VMA_NULL;
3808  }
3809  m_pFront = pNextItem;
3810  m_ItemAllocator.Free(pFrontItem);
3811  --m_Count;
3812 }
3813 
3814 template<typename T>
3815 void VmaRawList<T>::Remove(ItemType* pItem)
3816 {
3817  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3818  VMA_HEAVY_ASSERT(m_Count > 0);
3819 
3820  if(pItem->pPrev != VMA_NULL)
3821  {
3822  pItem->pPrev->pNext = pItem->pNext;
3823  }
3824  else
3825  {
3826  VMA_HEAVY_ASSERT(m_pFront == pItem);
3827  m_pFront = pItem->pNext;
3828  }
3829 
3830  if(pItem->pNext != VMA_NULL)
3831  {
3832  pItem->pNext->pPrev = pItem->pPrev;
3833  }
3834  else
3835  {
3836  VMA_HEAVY_ASSERT(m_pBack == pItem);
3837  m_pBack = pItem->pPrev;
3838  }
3839 
3840  m_ItemAllocator.Free(pItem);
3841  --m_Count;
3842 }
3843 
3844 template<typename T>
3845 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3846 {
3847  if(pItem != VMA_NULL)
3848  {
3849  ItemType* const prevItem = pItem->pPrev;
3850  ItemType* const newItem = m_ItemAllocator.Alloc();
3851  newItem->pPrev = prevItem;
3852  newItem->pNext = pItem;
3853  pItem->pPrev = newItem;
3854  if(prevItem != VMA_NULL)
3855  {
3856  prevItem->pNext = newItem;
3857  }
3858  else
3859  {
3860  VMA_HEAVY_ASSERT(m_pFront == pItem);
3861  m_pFront = newItem;
3862  }
3863  ++m_Count;
3864  return newItem;
3865  }
3866  else
3867  return PushBack();
3868 }
3869 
3870 template<typename T>
3871 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3872 {
3873  if(pItem != VMA_NULL)
3874  {
3875  ItemType* const nextItem = pItem->pNext;
3876  ItemType* const newItem = m_ItemAllocator.Alloc();
3877  newItem->pNext = nextItem;
3878  newItem->pPrev = pItem;
3879  pItem->pNext = newItem;
3880  if(nextItem != VMA_NULL)
3881  {
3882  nextItem->pPrev = newItem;
3883  }
3884  else
3885  {
3886  VMA_HEAVY_ASSERT(m_pBack == pItem);
3887  m_pBack = newItem;
3888  }
3889  ++m_Count;
3890  return newItem;
3891  }
3892  else
3893  return PushFront();
3894 }
3895 
3896 template<typename T>
3897 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3898 {
3899  ItemType* const newItem = InsertBefore(pItem);
3900  newItem->Value = value;
3901  return newItem;
3902 }
3903 
3904 template<typename T>
3905 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3906 {
3907  ItemType* const newItem = InsertAfter(pItem);
3908  newItem->Value = value;
3909  return newItem;
3910 }
3911 
3912 template<typename T, typename AllocatorT>
3913 class VmaList
3914 {
3915  VMA_CLASS_NO_COPY(VmaList)
3916 public:
3917  class iterator
3918  {
3919  public:
3920  iterator() :
3921  m_pList(VMA_NULL),
3922  m_pItem(VMA_NULL)
3923  {
3924  }
3925 
3926  T& operator*() const
3927  {
3928  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3929  return m_pItem->Value;
3930  }
3931  T* operator->() const
3932  {
3933  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3934  return &m_pItem->Value;
3935  }
3936 
3937  iterator& operator++()
3938  {
3939  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3940  m_pItem = m_pItem->pNext;
3941  return *this;
3942  }
3943  iterator& operator--()
3944  {
3945  if(m_pItem != VMA_NULL)
3946  {
3947  m_pItem = m_pItem->pPrev;
3948  }
3949  else
3950  {
3951  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3952  m_pItem = m_pList->Back();
3953  }
3954  return *this;
3955  }
3956 
3957  iterator operator++(int)
3958  {
3959  iterator result = *this;
3960  ++*this;
3961  return result;
3962  }
3963  iterator operator--(int)
3964  {
3965  iterator result = *this;
3966  --*this;
3967  return result;
3968  }
3969 
3970  bool operator==(const iterator& rhs) const
3971  {
3972  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3973  return m_pItem == rhs.m_pItem;
3974  }
3975  bool operator!=(const iterator& rhs) const
3976  {
3977  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3978  return m_pItem != rhs.m_pItem;
3979  }
3980 
3981  private:
3982  VmaRawList<T>* m_pList;
3983  VmaListItem<T>* m_pItem;
3984 
3985  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3986  m_pList(pList),
3987  m_pItem(pItem)
3988  {
3989  }
3990 
3991  friend class VmaList<T, AllocatorT>;
3992  };
3993 
3994  class const_iterator
3995  {
3996  public:
3997  const_iterator() :
3998  m_pList(VMA_NULL),
3999  m_pItem(VMA_NULL)
4000  {
4001  }
4002 
4003  const_iterator(const iterator& src) :
4004  m_pList(src.m_pList),
4005  m_pItem(src.m_pItem)
4006  {
4007  }
4008 
4009  const T& operator*() const
4010  {
4011  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4012  return m_pItem->Value;
4013  }
4014  const T* operator->() const
4015  {
4016  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4017  return &m_pItem->Value;
4018  }
4019 
4020  const_iterator& operator++()
4021  {
4022  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4023  m_pItem = m_pItem->pNext;
4024  return *this;
4025  }
4026  const_iterator& operator--()
4027  {
4028  if(m_pItem != VMA_NULL)
4029  {
4030  m_pItem = m_pItem->pPrev;
4031  }
4032  else
4033  {
4034  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4035  m_pItem = m_pList->Back();
4036  }
4037  return *this;
4038  }
4039 
4040  const_iterator operator++(int)
4041  {
4042  const_iterator result = *this;
4043  ++*this;
4044  return result;
4045  }
4046  const_iterator operator--(int)
4047  {
4048  const_iterator result = *this;
4049  --*this;
4050  return result;
4051  }
4052 
4053  bool operator==(const const_iterator& rhs) const
4054  {
4055  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4056  return m_pItem == rhs.m_pItem;
4057  }
4058  bool operator!=(const const_iterator& rhs) const
4059  {
4060  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4061  return m_pItem != rhs.m_pItem;
4062  }
4063 
4064  private:
4065  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4066  m_pList(pList),
4067  m_pItem(pItem)
4068  {
4069  }
4070 
4071  const VmaRawList<T>* m_pList;
4072  const VmaListItem<T>* m_pItem;
4073 
4074  friend class VmaList<T, AllocatorT>;
4075  };
4076 
4077  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4078 
4079  bool empty() const { return m_RawList.IsEmpty(); }
4080  size_t size() const { return m_RawList.GetCount(); }
4081 
4082  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4083  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4084 
4085  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4086  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4087 
4088  void clear() { m_RawList.Clear(); }
4089  void push_back(const T& value) { m_RawList.PushBack(value); }
4090  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4091  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4092 
4093 private:
4094  VmaRawList<T> m_RawList;
4095 };
4096 
4097 #endif // #if VMA_USE_STL_LIST
4098 
4100 // class VmaMap
4101 
4102 // Unused in this version.
4103 #if 0
4104 
4105 #if VMA_USE_STL_UNORDERED_MAP
4106 
4107 #define VmaPair std::pair
4108 
4109 #define VMA_MAP_TYPE(KeyT, ValueT) \
4110  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4111 
4112 #else // #if VMA_USE_STL_UNORDERED_MAP
4113 
4114 template<typename T1, typename T2>
4115 struct VmaPair
4116 {
4117  T1 first;
4118  T2 second;
4119 
4120  VmaPair() : first(), second() { }
4121  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4122 };
4123 
4124 /* Class compatible with subset of interface of std::unordered_map.
4125 KeyT, ValueT must be POD because they will be stored in VmaVector.
4126 */
4127 template<typename KeyT, typename ValueT>
4128 class VmaMap
4129 {
4130 public:
4131  typedef VmaPair<KeyT, ValueT> PairType;
4132  typedef PairType* iterator;
4133 
4134  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4135 
4136  iterator begin() { return m_Vector.begin(); }
4137  iterator end() { return m_Vector.end(); }
4138 
4139  void insert(const PairType& pair);
4140  iterator find(const KeyT& key);
4141  void erase(iterator it);
4142 
4143 private:
4144  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4145 };
4146 
4147 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4148 
4149 template<typename FirstT, typename SecondT>
4150 struct VmaPairFirstLess
4151 {
4152  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4153  {
4154  return lhs.first < rhs.first;
4155  }
4156  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4157  {
4158  return lhs.first < rhsFirst;
4159  }
4160 };
4161 
4162 template<typename KeyT, typename ValueT>
4163 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4164 {
4165  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4166  m_Vector.data(),
4167  m_Vector.data() + m_Vector.size(),
4168  pair,
4169  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4170  VmaVectorInsert(m_Vector, indexToInsert, pair);
4171 }
4172 
4173 template<typename KeyT, typename ValueT>
4174 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4175 {
4176  PairType* it = VmaBinaryFindFirstNotLess(
4177  m_Vector.data(),
4178  m_Vector.data() + m_Vector.size(),
4179  key,
4180  VmaPairFirstLess<KeyT, ValueT>());
4181  if((it != m_Vector.end()) && (it->first == key))
4182  {
4183  return it;
4184  }
4185  else
4186  {
4187  return m_Vector.end();
4188  }
4189 }
4190 
4191 template<typename KeyT, typename ValueT>
4192 void VmaMap<KeyT, ValueT>::erase(iterator it)
4193 {
4194  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4195 }
4196 
4197 #endif // #if VMA_USE_STL_UNORDERED_MAP
4198 
4199 #endif // #if 0
4200 
4202 
4203 class VmaDeviceMemoryBlock;
4204 
4205 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4206 
4207 struct VmaAllocation_T
4208 {
4209  VMA_CLASS_NO_COPY(VmaAllocation_T)
4210 private:
4211  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4212 
4213  enum FLAGS
4214  {
4215  FLAG_USER_DATA_STRING = 0x01,
4216  };
4217 
4218 public:
4219  enum ALLOCATION_TYPE
4220  {
4221  ALLOCATION_TYPE_NONE,
4222  ALLOCATION_TYPE_BLOCK,
4223  ALLOCATION_TYPE_DEDICATED,
4224  };
4225 
4226  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4227  m_Alignment(1),
4228  m_Size(0),
4229  m_pUserData(VMA_NULL),
4230  m_LastUseFrameIndex(currentFrameIndex),
4231  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4232  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4233  m_MapCount(0),
4234  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4235  {
4236 #if VMA_STATS_STRING_ENABLED
4237  m_CreationFrameIndex = currentFrameIndex;
4238  m_BufferImageUsage = 0;
4239 #endif
4240  }
4241 
4242  ~VmaAllocation_T()
4243  {
4244  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4245 
4246  // Check if owned string was freed.
4247  VMA_ASSERT(m_pUserData == VMA_NULL);
4248  }
4249 
4250  void InitBlockAllocation(
4251  VmaPool hPool,
4252  VmaDeviceMemoryBlock* block,
4253  VkDeviceSize offset,
4254  VkDeviceSize alignment,
4255  VkDeviceSize size,
4256  VmaSuballocationType suballocationType,
4257  bool mapped,
4258  bool canBecomeLost)
4259  {
4260  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4261  VMA_ASSERT(block != VMA_NULL);
4262  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4263  m_Alignment = alignment;
4264  m_Size = size;
4265  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4266  m_SuballocationType = (uint8_t)suballocationType;
4267  m_BlockAllocation.m_hPool = hPool;
4268  m_BlockAllocation.m_Block = block;
4269  m_BlockAllocation.m_Offset = offset;
4270  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4271  }
4272 
4273  void InitLost()
4274  {
4275  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4276  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4277  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4278  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4279  m_BlockAllocation.m_Block = VMA_NULL;
4280  m_BlockAllocation.m_Offset = 0;
4281  m_BlockAllocation.m_CanBecomeLost = true;
4282  }
4283 
4284  void ChangeBlockAllocation(
4285  VmaAllocator hAllocator,
4286  VmaDeviceMemoryBlock* block,
4287  VkDeviceSize offset);
4288 
4289  // pMappedData not null means allocation is created with MAPPED flag.
4290  void InitDedicatedAllocation(
4291  uint32_t memoryTypeIndex,
4292  VkDeviceMemory hMemory,
4293  VmaSuballocationType suballocationType,
4294  void* pMappedData,
4295  VkDeviceSize size)
4296  {
4297  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4298  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4299  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4300  m_Alignment = 0;
4301  m_Size = size;
4302  m_SuballocationType = (uint8_t)suballocationType;
4303  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4304  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4305  m_DedicatedAllocation.m_hMemory = hMemory;
4306  m_DedicatedAllocation.m_pMappedData = pMappedData;
4307  }
4308 
4309  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4310  VkDeviceSize GetAlignment() const { return m_Alignment; }
4311  VkDeviceSize GetSize() const { return m_Size; }
4312  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4313  void* GetUserData() const { return m_pUserData; }
4314  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4315  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4316 
4317  VmaDeviceMemoryBlock* GetBlock() const
4318  {
4319  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4320  return m_BlockAllocation.m_Block;
4321  }
4322  VkDeviceSize GetOffset() const;
4323  VkDeviceMemory GetMemory() const;
4324  uint32_t GetMemoryTypeIndex() const;
4325  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4326  void* GetMappedData() const;
4327  bool CanBecomeLost() const;
4328  VmaPool GetPool() const;
4329 
4330  uint32_t GetLastUseFrameIndex() const
4331  {
4332  return m_LastUseFrameIndex.load();
4333  }
4334  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4335  {
4336  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4337  }
4338  /*
4339  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4340  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4341  - Else, returns false.
4342 
4343  If hAllocation is already lost, assert - you should not call it then.
4344  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4345  */
4346  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4347 
4348  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4349  {
4350  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4351  outInfo.blockCount = 1;
4352  outInfo.allocationCount = 1;
4353  outInfo.unusedRangeCount = 0;
4354  outInfo.usedBytes = m_Size;
4355  outInfo.unusedBytes = 0;
4356  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4357  outInfo.unusedRangeSizeMin = UINT64_MAX;
4358  outInfo.unusedRangeSizeMax = 0;
4359  }
4360 
4361  void BlockAllocMap();
4362  void BlockAllocUnmap();
4363  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4364  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4365 
4366 #if VMA_STATS_STRING_ENABLED
4367  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4368  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4369 
4370  void InitBufferImageUsage(uint32_t bufferImageUsage)
4371  {
4372  VMA_ASSERT(m_BufferImageUsage == 0);
4373  m_BufferImageUsage = bufferImageUsage;
4374  }
4375 
4376  void PrintParameters(class VmaJsonWriter& json) const;
4377 #endif
4378 
4379 private:
4380  VkDeviceSize m_Alignment;
4381  VkDeviceSize m_Size;
4382  void* m_pUserData;
4383  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4384  uint8_t m_Type; // ALLOCATION_TYPE
4385  uint8_t m_SuballocationType; // VmaSuballocationType
4386  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4387  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4388  uint8_t m_MapCount;
4389  uint8_t m_Flags; // enum FLAGS
4390 
4391  // Allocation out of VmaDeviceMemoryBlock.
4392  struct BlockAllocation
4393  {
4394  VmaPool m_hPool; // Null if belongs to general memory.
4395  VmaDeviceMemoryBlock* m_Block;
4396  VkDeviceSize m_Offset;
4397  bool m_CanBecomeLost;
4398  };
4399 
4400  // Allocation for an object that has its own private VkDeviceMemory.
4401  struct DedicatedAllocation
4402  {
4403  uint32_t m_MemoryTypeIndex;
4404  VkDeviceMemory m_hMemory;
4405  void* m_pMappedData; // Not null means memory is mapped.
4406  };
4407 
4408  union
4409  {
4410  // Allocation out of VmaDeviceMemoryBlock.
4411  BlockAllocation m_BlockAllocation;
4412  // Allocation for an object that has its own private VkDeviceMemory.
4413  DedicatedAllocation m_DedicatedAllocation;
4414  };
4415 
4416 #if VMA_STATS_STRING_ENABLED
4417  uint32_t m_CreationFrameIndex;
4418  uint32_t m_BufferImageUsage; // 0 if unknown.
4419 #endif
4420 
4421  void FreeUserDataString(VmaAllocator hAllocator);
4422 };
4423 
4424 /*
4425 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4426 allocated memory block or free.
4427 */
4428 struct VmaSuballocation
4429 {
4430  VkDeviceSize offset;
4431  VkDeviceSize size;
4432  VmaAllocation hAllocation;
4433  VmaSuballocationType type;
4434 };
4435 
4436 // Comparator for offsets.
4437 struct VmaSuballocationOffsetLess
4438 {
4439  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4440  {
4441  return lhs.offset < rhs.offset;
4442  }
4443 };
4444 struct VmaSuballocationOffsetGreater
4445 {
4446  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4447  {
4448  return lhs.offset > rhs.offset;
4449  }
4450 };
4451 
4452 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4453 
4454 // Cost of one additional allocation lost, as equivalent in bytes.
4455 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4456 
4457 /*
4458 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4459 
4460 If canMakeOtherLost was false:
4461 - item points to a FREE suballocation.
4462 - itemsToMakeLostCount is 0.
4463 
4464 If canMakeOtherLost was true:
4465 - item points to first of sequence of suballocations, which are either FREE,
4466  or point to VmaAllocations that can become lost.
4467 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4468  the requested allocation to succeed.
4469 */
4470 struct VmaAllocationRequest
4471 {
4472  VkDeviceSize offset;
4473  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4474  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4475  VmaSuballocationList::iterator item;
4476  size_t itemsToMakeLostCount;
4477 
4478  VkDeviceSize CalcCost() const
4479  {
4480  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4481  }
4482 };
4483 
4484 /*
4485 Data structure used for bookkeeping of allocations and unused ranges of memory
4486 in a single VkDeviceMemory block.
4487 */
4488 class VmaBlockMetadata
4489 {
4490 public:
4491  VmaBlockMetadata() : m_Size(0) { }
4492  virtual ~VmaBlockMetadata() { }
4493  virtual void Init(VkDeviceSize size) { m_Size = size; }
4494 
4495  // Validates all data structures inside this object. If not valid, returns false.
4496  virtual bool Validate() const = 0;
4497  VkDeviceSize GetSize() const { return m_Size; }
4498  virtual size_t GetAllocationCount() const = 0;
4499  virtual VkDeviceSize GetSumFreeSize() const = 0;
4500  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4501  // Returns true if this block is empty - contains only single free suballocation.
4502  virtual bool IsEmpty() const = 0;
4503 
4504  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4505  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4506 
4507 #if VMA_STATS_STRING_ENABLED
4508  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4509 #endif
4510 
4511  // Tries to find a place for suballocation with given parameters inside this block.
4512  // If succeeded, fills pAllocationRequest and returns true.
4513  // If failed, returns false.
4514  virtual bool CreateAllocationRequest(
4515  uint32_t currentFrameIndex,
4516  uint32_t frameInUseCount,
4517  VkDeviceSize bufferImageGranularity,
4518  VkDeviceSize allocSize,
4519  VkDeviceSize allocAlignment,
4520  bool upperAddress,
4521  VmaSuballocationType allocType,
4522  bool canMakeOtherLost,
4523  VmaAllocationRequest* pAllocationRequest) = 0;
4524 
4525  virtual bool MakeRequestedAllocationsLost(
4526  uint32_t currentFrameIndex,
4527  uint32_t frameInUseCount,
4528  VmaAllocationRequest* pAllocationRequest) = 0;
4529 
4530  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4531 
4532  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4533 
4534  // Makes actual allocation based on request. Request must already be checked and valid.
4535  virtual void Alloc(
4536  const VmaAllocationRequest& request,
4537  VmaSuballocationType type,
4538  VkDeviceSize allocSize,
4539  bool upperAddress,
4540  VmaAllocation hAllocation) = 0;
4541 
4542  // Frees suballocation assigned to given memory region.
4543  virtual void Free(const VmaAllocation allocation) = 0;
4544  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4545 
4546 protected:
4547 #if VMA_STATS_STRING_ENABLED
4548  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4549  VkDeviceSize unusedBytes,
4550  size_t allocationCount,
4551  size_t unusedRangeCount) const;
4552  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4553  VkDeviceSize offset,
4554  VmaAllocation hAllocation) const;
4555  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4556  VkDeviceSize offset,
4557  VkDeviceSize size) const;
4558  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4559 #endif
4560 
4561 private:
4562  VkDeviceSize m_Size;
4563 };
4564 
4565 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4566 {
4567  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4568 public:
4569  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4570  virtual ~VmaBlockMetadata_Generic();
4571  virtual void Init(VkDeviceSize size);
4572 
4573  virtual bool Validate() const;
4574  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4575  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4576  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4577  virtual bool IsEmpty() const;
4578 
4579  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4580  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4581 
4582 #if VMA_STATS_STRING_ENABLED
4583  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4584 #endif
4585 
4586  virtual bool CreateAllocationRequest(
4587  uint32_t currentFrameIndex,
4588  uint32_t frameInUseCount,
4589  VkDeviceSize bufferImageGranularity,
4590  VkDeviceSize allocSize,
4591  VkDeviceSize allocAlignment,
4592  bool upperAddress,
4593  VmaSuballocationType allocType,
4594  bool canMakeOtherLost,
4595  VmaAllocationRequest* pAllocationRequest);
4596 
4597  virtual bool MakeRequestedAllocationsLost(
4598  uint32_t currentFrameIndex,
4599  uint32_t frameInUseCount,
4600  VmaAllocationRequest* pAllocationRequest);
4601 
4602  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4603 
4604  virtual VkResult CheckCorruption(const void* pBlockData);
4605 
4606  virtual void Alloc(
4607  const VmaAllocationRequest& request,
4608  VmaSuballocationType type,
4609  VkDeviceSize allocSize,
4610  bool upperAddress,
4611  VmaAllocation hAllocation);
4612 
4613  virtual void Free(const VmaAllocation allocation);
4614  virtual void FreeAtOffset(VkDeviceSize offset);
4615 
4616 private:
4617  uint32_t m_FreeCount;
4618  VkDeviceSize m_SumFreeSize;
4619  VmaSuballocationList m_Suballocations;
4620  // Suballocations that are free and have size greater than certain threshold.
4621  // Sorted by size, ascending.
4622  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4623 
4624  bool ValidateFreeSuballocationList() const;
4625 
4626  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4627  // If yes, fills pOffset and returns true. If no, returns false.
4628  bool CheckAllocation(
4629  uint32_t currentFrameIndex,
4630  uint32_t frameInUseCount,
4631  VkDeviceSize bufferImageGranularity,
4632  VkDeviceSize allocSize,
4633  VkDeviceSize allocAlignment,
4634  VmaSuballocationType allocType,
4635  VmaSuballocationList::const_iterator suballocItem,
4636  bool canMakeOtherLost,
4637  VkDeviceSize* pOffset,
4638  size_t* itemsToMakeLostCount,
4639  VkDeviceSize* pSumFreeSize,
4640  VkDeviceSize* pSumItemSize) const;
4641  // Given free suballocation, it merges it with following one, which must also be free.
4642  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4643  // Releases given suballocation, making it free.
4644  // Merges it with adjacent free suballocations if applicable.
4645  // Returns iterator to new free suballocation at this place.
4646  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4647  // Given free suballocation, it inserts it into sorted list of
4648  // m_FreeSuballocationsBySize if it's suitable.
4649  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4650  // Given free suballocation, it removes it from sorted list of
4651  // m_FreeSuballocationsBySize if it's suitable.
4652  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4653 };
4654 
4655 /*
4656 Allocations and their references in internal data structure look like this:
4657 
4658 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4659 
4660  0 +-------+
4661  | |
4662  | |
4663  | |
4664  +-------+
4665  | Alloc | 1st[m_1stNullItemsBeginCount]
4666  +-------+
4667  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4668  +-------+
4669  | ... |
4670  +-------+
4671  | Alloc | 1st[1st.size() - 1]
4672  +-------+
4673  | |
4674  | |
4675  | |
4676 GetSize() +-------+
4677 
4678 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4679 
4680  0 +-------+
4681  | Alloc | 2nd[0]
4682  +-------+
4683  | Alloc | 2nd[1]
4684  +-------+
4685  | ... |
4686  +-------+
4687  | Alloc | 2nd[2nd.size() - 1]
4688  +-------+
4689  | |
4690  | |
4691  | |
4692  +-------+
4693  | Alloc | 1st[m_1stNullItemsBeginCount]
4694  +-------+
4695  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4696  +-------+
4697  | ... |
4698  +-------+
4699  | Alloc | 1st[1st.size() - 1]
4700  +-------+
4701  | |
4702 GetSize() +-------+
4703 
4704 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4705 
4706  0 +-------+
4707  | |
4708  | |
4709  | |
4710  +-------+
4711  | Alloc | 1st[m_1stNullItemsBeginCount]
4712  +-------+
4713  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4714  +-------+
4715  | ... |
4716  +-------+
4717  | Alloc | 1st[1st.size() - 1]
4718  +-------+
4719  | |
4720  | |
4721  | |
4722  +-------+
4723  | Alloc | 2nd[2nd.size() - 1]
4724  +-------+
4725  | ... |
4726  +-------+
4727  | Alloc | 2nd[1]
4728  +-------+
4729  | Alloc | 2nd[0]
4730 GetSize() +-------+
4731 
4732 */
4733 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4734 {
4735  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4736 public:
4737  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4738  virtual ~VmaBlockMetadata_Linear();
4739  virtual void Init(VkDeviceSize size);
4740 
4741  virtual bool Validate() const;
4742  virtual size_t GetAllocationCount() const;
4743  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4744  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4745  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4746 
4747  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4748  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4749 
4750 #if VMA_STATS_STRING_ENABLED
4751  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4752 #endif
4753 
4754  virtual bool CreateAllocationRequest(
4755  uint32_t currentFrameIndex,
4756  uint32_t frameInUseCount,
4757  VkDeviceSize bufferImageGranularity,
4758  VkDeviceSize allocSize,
4759  VkDeviceSize allocAlignment,
4760  bool upperAddress,
4761  VmaSuballocationType allocType,
4762  bool canMakeOtherLost,
4763  VmaAllocationRequest* pAllocationRequest);
4764 
4765  virtual bool MakeRequestedAllocationsLost(
4766  uint32_t currentFrameIndex,
4767  uint32_t frameInUseCount,
4768  VmaAllocationRequest* pAllocationRequest);
4769 
4770  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4771 
4772  virtual VkResult CheckCorruption(const void* pBlockData);
4773 
4774  virtual void Alloc(
4775  const VmaAllocationRequest& request,
4776  VmaSuballocationType type,
4777  VkDeviceSize allocSize,
4778  bool upperAddress,
4779  VmaAllocation hAllocation);
4780 
4781  virtual void Free(const VmaAllocation allocation);
4782  virtual void FreeAtOffset(VkDeviceSize offset);
4783 
4784 private:
4785  /*
4786  There are two suballocation vectors, used in ping-pong way.
4787  The one with index m_1stVectorIndex is called 1st.
4788  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
4789  2nd can be non-empty only when 1st is not empty.
4790  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
4791  */
4792  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
4793 
4794  enum SECOND_VECTOR_MODE
4795  {
4796  SECOND_VECTOR_EMPTY,
4797  /*
4798  Suballocations in 2nd vector are created later than the ones in 1st, but they
4799  all have smaller offset.
4800  */
4801  SECOND_VECTOR_RING_BUFFER,
4802  /*
4803  Suballocations in 2nd vector are upper side of double stack.
4804  They all have offsets higher than those in 1st vector.
4805  Top of this stack means smaller offsets, but higher indices in this vector.
4806  */
4807  SECOND_VECTOR_DOUBLE_STACK,
4808  };
4809 
4810  VkDeviceSize m_SumFreeSize;
4811  SuballocationVectorType m_Suballocations0, m_Suballocations1;
4812  uint32_t m_1stVectorIndex;
4813  SECOND_VECTOR_MODE m_2ndVectorMode;
4814 
4815  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4816  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4817  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4818  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4819 
4820  // Number of items in 1st vector with hAllocation = null at the beginning.
4821  size_t m_1stNullItemsBeginCount;
4822  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
4823  size_t m_1stNullItemsMiddleCount;
4824  // Number of items in 2nd vector with hAllocation = null.
4825  size_t m_2ndNullItemsCount;
4826 
4827  bool ShouldCompact1st() const;
4828  void CleanupAfterFree();
4829 };
4830 
4831 /*
4832 Represents a single block of device memory (`VkDeviceMemory`) with all the
4833 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4834 
4835 Thread-safety: This class must be externally synchronized.
4836 */
4837 class VmaDeviceMemoryBlock
4838 {
4839  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
4840 public:
4841  VmaBlockMetadata* m_pMetadata;
4842 
4843  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4844 
4845  ~VmaDeviceMemoryBlock()
4846  {
4847  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4848  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4849  }
4850 
4851  // Always call after construction.
4852  void Init(
4853  VmaAllocator hAllocator,
4854  uint32_t newMemoryTypeIndex,
4855  VkDeviceMemory newMemory,
4856  VkDeviceSize newSize,
4857  uint32_t id,
4858  bool linearAlgorithm);
4859  // Always call before destruction.
4860  void Destroy(VmaAllocator allocator);
4861 
4862  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4863  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4864  uint32_t GetId() const { return m_Id; }
4865  void* GetMappedData() const { return m_pMappedData; }
4866 
4867  // Validates all data structures inside this object. If not valid, returns false.
4868  bool Validate() const;
4869 
4870  VkResult CheckCorruption(VmaAllocator hAllocator);
4871 
4872  // ppData can be null.
4873  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4874  void Unmap(VmaAllocator hAllocator, uint32_t count);
4875 
4876  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4877  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4878 
4879  VkResult BindBufferMemory(
4880  const VmaAllocator hAllocator,
4881  const VmaAllocation hAllocation,
4882  VkBuffer hBuffer);
4883  VkResult BindImageMemory(
4884  const VmaAllocator hAllocator,
4885  const VmaAllocation hAllocation,
4886  VkImage hImage);
4887 
4888 private:
4889  uint32_t m_MemoryTypeIndex;
4890  uint32_t m_Id;
4891  VkDeviceMemory m_hMemory;
4892 
4893  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4894  // Also protects m_MapCount, m_pMappedData.
4895  VMA_MUTEX m_Mutex;
4896  uint32_t m_MapCount;
4897  void* m_pMappedData;
4898 };
4899 
4900 struct VmaPointerLess
4901 {
4902  bool operator()(const void* lhs, const void* rhs) const
4903  {
4904  return lhs < rhs;
4905  }
4906 };
4907 
4908 class VmaDefragmentator;
4909 
4910 /*
4911 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4912 Vulkan memory type.
4913 
4914 Synchronized internally with a mutex.
4915 */
4916 struct VmaBlockVector
4917 {
4918  VMA_CLASS_NO_COPY(VmaBlockVector)
4919 public:
4920  VmaBlockVector(
4921  VmaAllocator hAllocator,
4922  uint32_t memoryTypeIndex,
4923  VkDeviceSize preferredBlockSize,
4924  size_t minBlockCount,
4925  size_t maxBlockCount,
4926  VkDeviceSize bufferImageGranularity,
4927  uint32_t frameInUseCount,
4928  bool isCustomPool,
4929  bool linearAlgorithm);
4930  ~VmaBlockVector();
4931 
4932  VkResult CreateMinBlocks();
4933 
4934  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4935  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4936  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4937  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4938  bool UsesLinearAlgorithm() const { return m_LinearAlgorithm; }
4939 
4940  void GetPoolStats(VmaPoolStats* pStats);
4941 
4942  bool IsEmpty() const { return m_Blocks.empty(); }
4943  bool IsCorruptionDetectionEnabled() const;
4944 
4945  VkResult Allocate(
4946  VmaPool hCurrentPool,
4947  uint32_t currentFrameIndex,
4948  VkDeviceSize size,
4949  VkDeviceSize alignment,
4950  const VmaAllocationCreateInfo& createInfo,
4951  VmaSuballocationType suballocType,
4952  VmaAllocation* pAllocation);
4953 
4954  void Free(
4955  VmaAllocation hAllocation);
4956 
4957  // Adds statistics of this BlockVector to pStats.
4958  void AddStats(VmaStats* pStats);
4959 
4960 #if VMA_STATS_STRING_ENABLED
4961  void PrintDetailedMap(class VmaJsonWriter& json);
4962 #endif
4963 
4964  void MakePoolAllocationsLost(
4965  uint32_t currentFrameIndex,
4966  size_t* pLostAllocationCount);
4967  VkResult CheckCorruption();
4968 
4969  VmaDefragmentator* EnsureDefragmentator(
4970  VmaAllocator hAllocator,
4971  uint32_t currentFrameIndex);
4972 
4973  VkResult Defragment(
4974  VmaDefragmentationStats* pDefragmentationStats,
4975  VkDeviceSize& maxBytesToMove,
4976  uint32_t& maxAllocationsToMove);
4977 
4978  void DestroyDefragmentator();
4979 
4980 private:
4981  friend class VmaDefragmentator;
4982 
4983  const VmaAllocator m_hAllocator;
4984  const uint32_t m_MemoryTypeIndex;
4985  const VkDeviceSize m_PreferredBlockSize;
4986  const size_t m_MinBlockCount;
4987  const size_t m_MaxBlockCount;
4988  const VkDeviceSize m_BufferImageGranularity;
4989  const uint32_t m_FrameInUseCount;
4990  const bool m_IsCustomPool;
4991  const bool m_LinearAlgorithm;
4992  bool m_HasEmptyBlock;
4993  VMA_MUTEX m_Mutex;
4994  // Incrementally sorted by sumFreeSize, ascending.
4995  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4996  /* There can be at most one allocation that is completely empty - a
4997  hysteresis to avoid pessimistic case of alternating creation and destruction
4998  of a VkDeviceMemory. */
4999  VmaDefragmentator* m_pDefragmentator;
5000  uint32_t m_NextBlockId;
5001 
5002  VkDeviceSize CalcMaxBlockSize() const;
5003 
5004  // Finds and removes given block from vector.
5005  void Remove(VmaDeviceMemoryBlock* pBlock);
5006 
5007  // Performs single step in sorting m_Blocks. They may not be fully sorted
5008  // after this call.
5009  void IncrementallySortBlocks();
5010 
5011  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5012 };
5013 
5014 struct VmaPool_T
5015 {
5016  VMA_CLASS_NO_COPY(VmaPool_T)
5017 public:
5018  VmaBlockVector m_BlockVector;
5019 
5020  VmaPool_T(
5021  VmaAllocator hAllocator,
5022  const VmaPoolCreateInfo& createInfo);
5023  ~VmaPool_T();
5024 
5025  uint32_t GetId() const { return m_Id; }
5026  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5027 
5028 #if VMA_STATS_STRING_ENABLED
5029  //void PrintDetailedMap(class VmaStringBuilder& sb);
5030 #endif
5031 
5032 private:
5033  uint32_t m_Id;
5034 };
5035 
5036 class VmaDefragmentator
5037 {
5038  VMA_CLASS_NO_COPY(VmaDefragmentator)
5039 private:
5040  const VmaAllocator m_hAllocator;
5041  VmaBlockVector* const m_pBlockVector;
5042  uint32_t m_CurrentFrameIndex;
5043  VkDeviceSize m_BytesMoved;
5044  uint32_t m_AllocationsMoved;
5045 
5046  struct AllocationInfo
5047  {
5048  VmaAllocation m_hAllocation;
5049  VkBool32* m_pChanged;
5050 
5051  AllocationInfo() :
5052  m_hAllocation(VK_NULL_HANDLE),
5053  m_pChanged(VMA_NULL)
5054  {
5055  }
5056  };
5057 
5058  struct AllocationInfoSizeGreater
5059  {
5060  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5061  {
5062  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5063  }
5064  };
5065 
5066  // Used between AddAllocation and Defragment.
5067  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5068 
5069  struct BlockInfo
5070  {
5071  VmaDeviceMemoryBlock* m_pBlock;
5072  bool m_HasNonMovableAllocations;
5073  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5074 
5075  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5076  m_pBlock(VMA_NULL),
5077  m_HasNonMovableAllocations(true),
5078  m_Allocations(pAllocationCallbacks),
5079  m_pMappedDataForDefragmentation(VMA_NULL)
5080  {
5081  }
5082 
5083  void CalcHasNonMovableAllocations()
5084  {
5085  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5086  const size_t defragmentAllocCount = m_Allocations.size();
5087  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5088  }
5089 
5090  void SortAllocationsBySizeDescecnding()
5091  {
5092  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5093  }
5094 
5095  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5096  void Unmap(VmaAllocator hAllocator);
5097 
5098  private:
5099  // Not null if mapped for defragmentation only, not originally mapped.
5100  void* m_pMappedDataForDefragmentation;
5101  };
5102 
5103  struct BlockPointerLess
5104  {
5105  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5106  {
5107  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5108  }
5109  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5110  {
5111  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5112  }
5113  };
5114 
5115  // 1. Blocks with some non-movable allocations go first.
5116  // 2. Blocks with smaller sumFreeSize go first.
5117  struct BlockInfoCompareMoveDestination
5118  {
5119  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5120  {
5121  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5122  {
5123  return true;
5124  }
5125  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5126  {
5127  return false;
5128  }
5129  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5130  {
5131  return true;
5132  }
5133  return false;
5134  }
5135  };
5136 
5137  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5138  BlockInfoVector m_Blocks;
5139 
5140  VkResult DefragmentRound(
5141  VkDeviceSize maxBytesToMove,
5142  uint32_t maxAllocationsToMove);
5143 
5144  static bool MoveMakesSense(
5145  size_t dstBlockIndex, VkDeviceSize dstOffset,
5146  size_t srcBlockIndex, VkDeviceSize srcOffset);
5147 
5148 public:
5149  VmaDefragmentator(
5150  VmaAllocator hAllocator,
5151  VmaBlockVector* pBlockVector,
5152  uint32_t currentFrameIndex);
5153 
5154  ~VmaDefragmentator();
5155 
5156  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5157  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5158 
5159  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5160 
5161  VkResult Defragment(
5162  VkDeviceSize maxBytesToMove,
5163  uint32_t maxAllocationsToMove);
5164 };
5165 
5166 #if VMA_RECORDING_ENABLED
5167 
5168 class VmaRecorder
5169 {
5170 public:
5171  VmaRecorder();
5172  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5173  void WriteConfiguration(
5174  const VkPhysicalDeviceProperties& devProps,
5175  const VkPhysicalDeviceMemoryProperties& memProps,
5176  bool dedicatedAllocationExtensionEnabled);
5177  ~VmaRecorder();
5178 
5179  void RecordCreateAllocator(uint32_t frameIndex);
5180  void RecordDestroyAllocator(uint32_t frameIndex);
5181  void RecordCreatePool(uint32_t frameIndex,
5182  const VmaPoolCreateInfo& createInfo,
5183  VmaPool pool);
5184  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5185  void RecordAllocateMemory(uint32_t frameIndex,
5186  const VkMemoryRequirements& vkMemReq,
5187  const VmaAllocationCreateInfo& createInfo,
5188  VmaAllocation allocation);
5189  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5190  const VkMemoryRequirements& vkMemReq,
5191  bool requiresDedicatedAllocation,
5192  bool prefersDedicatedAllocation,
5193  const VmaAllocationCreateInfo& createInfo,
5194  VmaAllocation allocation);
5195  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5196  const VkMemoryRequirements& vkMemReq,
5197  bool requiresDedicatedAllocation,
5198  bool prefersDedicatedAllocation,
5199  const VmaAllocationCreateInfo& createInfo,
5200  VmaAllocation allocation);
5201  void RecordFreeMemory(uint32_t frameIndex,
5202  VmaAllocation allocation);
5203  void RecordSetAllocationUserData(uint32_t frameIndex,
5204  VmaAllocation allocation,
5205  const void* pUserData);
5206  void RecordCreateLostAllocation(uint32_t frameIndex,
5207  VmaAllocation allocation);
5208  void RecordMapMemory(uint32_t frameIndex,
5209  VmaAllocation allocation);
5210  void RecordUnmapMemory(uint32_t frameIndex,
5211  VmaAllocation allocation);
5212  void RecordFlushAllocation(uint32_t frameIndex,
5213  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5214  void RecordInvalidateAllocation(uint32_t frameIndex,
5215  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5216  void RecordCreateBuffer(uint32_t frameIndex,
5217  const VkBufferCreateInfo& bufCreateInfo,
5218  const VmaAllocationCreateInfo& allocCreateInfo,
5219  VmaAllocation allocation);
5220  void RecordCreateImage(uint32_t frameIndex,
5221  const VkImageCreateInfo& imageCreateInfo,
5222  const VmaAllocationCreateInfo& allocCreateInfo,
5223  VmaAllocation allocation);
5224  void RecordDestroyBuffer(uint32_t frameIndex,
5225  VmaAllocation allocation);
5226  void RecordDestroyImage(uint32_t frameIndex,
5227  VmaAllocation allocation);
5228  void RecordTouchAllocation(uint32_t frameIndex,
5229  VmaAllocation allocation);
5230  void RecordGetAllocationInfo(uint32_t frameIndex,
5231  VmaAllocation allocation);
5232  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5233  VmaPool pool);
5234 
5235 private:
5236  struct CallParams
5237  {
5238  uint32_t threadId;
5239  double time;
5240  };
5241 
5242  class UserDataString
5243  {
5244  public:
5245  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5246  const char* GetString() const { return m_Str; }
5247 
5248  private:
5249  char m_PtrStr[17];
5250  const char* m_Str;
5251  };
5252 
5253  bool m_UseMutex;
5254  VmaRecordFlags m_Flags;
5255  FILE* m_File;
5256  VMA_MUTEX m_FileMutex;
5257  int64_t m_Freq;
5258  int64_t m_StartCounter;
5259 
5260  void GetBasicParams(CallParams& outParams);
5261  void Flush();
5262 };
5263 
5264 #endif // #if VMA_RECORDING_ENABLED
5265 
5266 // Main allocator object.
5267 struct VmaAllocator_T
5268 {
5269  VMA_CLASS_NO_COPY(VmaAllocator_T)
5270 public:
5271  bool m_UseMutex;
5272  bool m_UseKhrDedicatedAllocation;
5273  VkDevice m_hDevice;
5274  bool m_AllocationCallbacksSpecified;
5275  VkAllocationCallbacks m_AllocationCallbacks;
5276  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5277 
5278  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5279  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5280  VMA_MUTEX m_HeapSizeLimitMutex;
5281 
5282  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5283  VkPhysicalDeviceMemoryProperties m_MemProps;
5284 
5285  // Default pools.
5286  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5287 
5288  // Each vector is sorted by memory (handle value).
5289  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5290  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5291  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5292 
5293  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5294  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5295  ~VmaAllocator_T();
5296 
5297  const VkAllocationCallbacks* GetAllocationCallbacks() const
5298  {
5299  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5300  }
5301  const VmaVulkanFunctions& GetVulkanFunctions() const
5302  {
5303  return m_VulkanFunctions;
5304  }
5305 
5306  VkDeviceSize GetBufferImageGranularity() const
5307  {
5308  return VMA_MAX(
5309  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5310  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5311  }
5312 
5313  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5314  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5315 
5316  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5317  {
5318  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5319  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5320  }
5321  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5322  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5323  {
5324  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5325  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5326  }
5327  // Minimum alignment for all allocations in specific memory type.
5328  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5329  {
5330  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5331  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5332  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5333  }
5334 
5335  bool IsIntegratedGpu() const
5336  {
5337  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5338  }
5339 
5340 #if VMA_RECORDING_ENABLED
5341  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5342 #endif
5343 
5344  void GetBufferMemoryRequirements(
5345  VkBuffer hBuffer,
5346  VkMemoryRequirements& memReq,
5347  bool& requiresDedicatedAllocation,
5348  bool& prefersDedicatedAllocation) const;
5349  void GetImageMemoryRequirements(
5350  VkImage hImage,
5351  VkMemoryRequirements& memReq,
5352  bool& requiresDedicatedAllocation,
5353  bool& prefersDedicatedAllocation) const;
5354 
5355  // Main allocation function.
5356  VkResult AllocateMemory(
5357  const VkMemoryRequirements& vkMemReq,
5358  bool requiresDedicatedAllocation,
5359  bool prefersDedicatedAllocation,
5360  VkBuffer dedicatedBuffer,
5361  VkImage dedicatedImage,
5362  const VmaAllocationCreateInfo& createInfo,
5363  VmaSuballocationType suballocType,
5364  VmaAllocation* pAllocation);
5365 
5366  // Main deallocation function.
5367  void FreeMemory(const VmaAllocation allocation);
5368 
5369  void CalculateStats(VmaStats* pStats);
5370 
5371 #if VMA_STATS_STRING_ENABLED
5372  void PrintDetailedMap(class VmaJsonWriter& json);
5373 #endif
5374 
5375  VkResult Defragment(
5376  VmaAllocation* pAllocations,
5377  size_t allocationCount,
5378  VkBool32* pAllocationsChanged,
5379  const VmaDefragmentationInfo* pDefragmentationInfo,
5380  VmaDefragmentationStats* pDefragmentationStats);
5381 
5382  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5383  bool TouchAllocation(VmaAllocation hAllocation);
5384 
5385  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5386  void DestroyPool(VmaPool pool);
5387  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5388 
5389  void SetCurrentFrameIndex(uint32_t frameIndex);
5390  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5391 
5392  void MakePoolAllocationsLost(
5393  VmaPool hPool,
5394  size_t* pLostAllocationCount);
5395  VkResult CheckPoolCorruption(VmaPool hPool);
5396  VkResult CheckCorruption(uint32_t memoryTypeBits);
5397 
5398  void CreateLostAllocation(VmaAllocation* pAllocation);
5399 
5400  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5401  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5402 
5403  VkResult Map(VmaAllocation hAllocation, void** ppData);
5404  void Unmap(VmaAllocation hAllocation);
5405 
5406  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5407  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5408 
5409  void FlushOrInvalidateAllocation(
5410  VmaAllocation hAllocation,
5411  VkDeviceSize offset, VkDeviceSize size,
5412  VMA_CACHE_OPERATION op);
5413 
5414  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5415 
5416 private:
5417  VkDeviceSize m_PreferredLargeHeapBlockSize;
5418 
5419  VkPhysicalDevice m_PhysicalDevice;
5420  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5421 
5422  VMA_MUTEX m_PoolsMutex;
5423  // Protected by m_PoolsMutex. Sorted by pointer value.
5424  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5425  uint32_t m_NextPoolId;
5426 
5427  VmaVulkanFunctions m_VulkanFunctions;
5428 
5429 #if VMA_RECORDING_ENABLED
5430  VmaRecorder* m_pRecorder;
5431 #endif
5432 
5433  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5434 
5435  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5436 
5437  VkResult AllocateMemoryOfType(
5438  VkDeviceSize size,
5439  VkDeviceSize alignment,
5440  bool dedicatedAllocation,
5441  VkBuffer dedicatedBuffer,
5442  VkImage dedicatedImage,
5443  const VmaAllocationCreateInfo& createInfo,
5444  uint32_t memTypeIndex,
5445  VmaSuballocationType suballocType,
5446  VmaAllocation* pAllocation);
5447 
5448  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5449  VkResult AllocateDedicatedMemory(
5450  VkDeviceSize size,
5451  VmaSuballocationType suballocType,
5452  uint32_t memTypeIndex,
5453  bool map,
5454  bool isUserDataString,
5455  void* pUserData,
5456  VkBuffer dedicatedBuffer,
5457  VkImage dedicatedImage,
5458  VmaAllocation* pAllocation);
5459 
5460  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5461  void FreeDedicatedMemory(VmaAllocation allocation);
5462 };
5463 
5465 // Memory allocation #2 after VmaAllocator_T definition
5466 
5467 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5468 {
5469  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5470 }
5471 
5472 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5473 {
5474  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5475 }
5476 
5477 template<typename T>
5478 static T* VmaAllocate(VmaAllocator hAllocator)
5479 {
5480  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5481 }
5482 
5483 template<typename T>
5484 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5485 {
5486  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5487 }
5488 
5489 template<typename T>
5490 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5491 {
5492  if(ptr != VMA_NULL)
5493  {
5494  ptr->~T();
5495  VmaFree(hAllocator, ptr);
5496  }
5497 }
5498 
5499 template<typename T>
5500 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5501 {
5502  if(ptr != VMA_NULL)
5503  {
5504  for(size_t i = count; i--; )
5505  ptr[i].~T();
5506  VmaFree(hAllocator, ptr);
5507  }
5508 }
5509 
5511 // VmaStringBuilder
5512 
5513 #if VMA_STATS_STRING_ENABLED
5514 
5515 class VmaStringBuilder
5516 {
5517 public:
5518  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5519  size_t GetLength() const { return m_Data.size(); }
5520  const char* GetData() const { return m_Data.data(); }
5521 
5522  void Add(char ch) { m_Data.push_back(ch); }
5523  void Add(const char* pStr);
5524  void AddNewLine() { Add('\n'); }
5525  void AddNumber(uint32_t num);
5526  void AddNumber(uint64_t num);
5527  void AddPointer(const void* ptr);
5528 
5529 private:
5530  VmaVector< char, VmaStlAllocator<char> > m_Data;
5531 };
5532 
5533 void VmaStringBuilder::Add(const char* pStr)
5534 {
5535  const size_t strLen = strlen(pStr);
5536  if(strLen > 0)
5537  {
5538  const size_t oldCount = m_Data.size();
5539  m_Data.resize(oldCount + strLen);
5540  memcpy(m_Data.data() + oldCount, pStr, strLen);
5541  }
5542 }
5543 
5544 void VmaStringBuilder::AddNumber(uint32_t num)
5545 {
5546  char buf[11];
5547  VmaUint32ToStr(buf, sizeof(buf), num);
5548  Add(buf);
5549 }
5550 
5551 void VmaStringBuilder::AddNumber(uint64_t num)
5552 {
5553  char buf[21];
5554  VmaUint64ToStr(buf, sizeof(buf), num);
5555  Add(buf);
5556 }
5557 
5558 void VmaStringBuilder::AddPointer(const void* ptr)
5559 {
5560  char buf[21];
5561  VmaPtrToStr(buf, sizeof(buf), ptr);
5562  Add(buf);
5563 }
5564 
5565 #endif // #if VMA_STATS_STRING_ENABLED
5566 
5568 // VmaJsonWriter
5569 
5570 #if VMA_STATS_STRING_ENABLED
5571 
5572 class VmaJsonWriter
5573 {
5574  VMA_CLASS_NO_COPY(VmaJsonWriter)
5575 public:
5576  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5577  ~VmaJsonWriter();
5578 
5579  void BeginObject(bool singleLine = false);
5580  void EndObject();
5581 
5582  void BeginArray(bool singleLine = false);
5583  void EndArray();
5584 
5585  void WriteString(const char* pStr);
5586  void BeginString(const char* pStr = VMA_NULL);
5587  void ContinueString(const char* pStr);
5588  void ContinueString(uint32_t n);
5589  void ContinueString(uint64_t n);
5590  void ContinueString_Pointer(const void* ptr);
5591  void EndString(const char* pStr = VMA_NULL);
5592 
5593  void WriteNumber(uint32_t n);
5594  void WriteNumber(uint64_t n);
5595  void WriteBool(bool b);
5596  void WriteNull();
5597 
5598 private:
5599  static const char* const INDENT;
5600 
5601  enum COLLECTION_TYPE
5602  {
5603  COLLECTION_TYPE_OBJECT,
5604  COLLECTION_TYPE_ARRAY,
5605  };
5606  struct StackItem
5607  {
5608  COLLECTION_TYPE type;
5609  uint32_t valueCount;
5610  bool singleLineMode;
5611  };
5612 
5613  VmaStringBuilder& m_SB;
5614  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5615  bool m_InsideString;
5616 
5617  void BeginValue(bool isString);
5618  void WriteIndent(bool oneLess = false);
5619 };
5620 
5621 const char* const VmaJsonWriter::INDENT = " ";
5622 
5623 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5624  m_SB(sb),
5625  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5626  m_InsideString(false)
5627 {
5628 }
5629 
5630 VmaJsonWriter::~VmaJsonWriter()
5631 {
5632  VMA_ASSERT(!m_InsideString);
5633  VMA_ASSERT(m_Stack.empty());
5634 }
5635 
5636 void VmaJsonWriter::BeginObject(bool singleLine)
5637 {
5638  VMA_ASSERT(!m_InsideString);
5639 
5640  BeginValue(false);
5641  m_SB.Add('{');
5642 
5643  StackItem item;
5644  item.type = COLLECTION_TYPE_OBJECT;
5645  item.valueCount = 0;
5646  item.singleLineMode = singleLine;
5647  m_Stack.push_back(item);
5648 }
5649 
5650 void VmaJsonWriter::EndObject()
5651 {
5652  VMA_ASSERT(!m_InsideString);
5653 
5654  WriteIndent(true);
5655  m_SB.Add('}');
5656 
5657  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5658  m_Stack.pop_back();
5659 }
5660 
5661 void VmaJsonWriter::BeginArray(bool singleLine)
5662 {
5663  VMA_ASSERT(!m_InsideString);
5664 
5665  BeginValue(false);
5666  m_SB.Add('[');
5667 
5668  StackItem item;
5669  item.type = COLLECTION_TYPE_ARRAY;
5670  item.valueCount = 0;
5671  item.singleLineMode = singleLine;
5672  m_Stack.push_back(item);
5673 }
5674 
5675 void VmaJsonWriter::EndArray()
5676 {
5677  VMA_ASSERT(!m_InsideString);
5678 
5679  WriteIndent(true);
5680  m_SB.Add(']');
5681 
5682  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5683  m_Stack.pop_back();
5684 }
5685 
5686 void VmaJsonWriter::WriteString(const char* pStr)
5687 {
5688  BeginString(pStr);
5689  EndString();
5690 }
5691 
5692 void VmaJsonWriter::BeginString(const char* pStr)
5693 {
5694  VMA_ASSERT(!m_InsideString);
5695 
5696  BeginValue(true);
5697  m_SB.Add('"');
5698  m_InsideString = true;
5699  if(pStr != VMA_NULL && pStr[0] != '\0')
5700  {
5701  ContinueString(pStr);
5702  }
5703 }
5704 
5705 void VmaJsonWriter::ContinueString(const char* pStr)
5706 {
5707  VMA_ASSERT(m_InsideString);
5708 
5709  const size_t strLen = strlen(pStr);
5710  for(size_t i = 0; i < strLen; ++i)
5711  {
5712  char ch = pStr[i];
5713  if(ch == '\\')
5714  {
5715  m_SB.Add("\\\\");
5716  }
5717  else if(ch == '"')
5718  {
5719  m_SB.Add("\\\"");
5720  }
5721  else if(ch >= 32)
5722  {
5723  m_SB.Add(ch);
5724  }
5725  else switch(ch)
5726  {
5727  case '\b':
5728  m_SB.Add("\\b");
5729  break;
5730  case '\f':
5731  m_SB.Add("\\f");
5732  break;
5733  case '\n':
5734  m_SB.Add("\\n");
5735  break;
5736  case '\r':
5737  m_SB.Add("\\r");
5738  break;
5739  case '\t':
5740  m_SB.Add("\\t");
5741  break;
5742  default:
5743  VMA_ASSERT(0 && "Character not currently supported.");
5744  break;
5745  }
5746  }
5747 }
5748 
5749 void VmaJsonWriter::ContinueString(uint32_t n)
5750 {
5751  VMA_ASSERT(m_InsideString);
5752  m_SB.AddNumber(n);
5753 }
5754 
5755 void VmaJsonWriter::ContinueString(uint64_t n)
5756 {
5757  VMA_ASSERT(m_InsideString);
5758  m_SB.AddNumber(n);
5759 }
5760 
5761 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5762 {
5763  VMA_ASSERT(m_InsideString);
5764  m_SB.AddPointer(ptr);
5765 }
5766 
5767 void VmaJsonWriter::EndString(const char* pStr)
5768 {
5769  VMA_ASSERT(m_InsideString);
5770  if(pStr != VMA_NULL && pStr[0] != '\0')
5771  {
5772  ContinueString(pStr);
5773  }
5774  m_SB.Add('"');
5775  m_InsideString = false;
5776 }
5777 
5778 void VmaJsonWriter::WriteNumber(uint32_t n)
5779 {
5780  VMA_ASSERT(!m_InsideString);
5781  BeginValue(false);
5782  m_SB.AddNumber(n);
5783 }
5784 
5785 void VmaJsonWriter::WriteNumber(uint64_t n)
5786 {
5787  VMA_ASSERT(!m_InsideString);
5788  BeginValue(false);
5789  m_SB.AddNumber(n);
5790 }
5791 
5792 void VmaJsonWriter::WriteBool(bool b)
5793 {
5794  VMA_ASSERT(!m_InsideString);
5795  BeginValue(false);
5796  m_SB.Add(b ? "true" : "false");
5797 }
5798 
5799 void VmaJsonWriter::WriteNull()
5800 {
5801  VMA_ASSERT(!m_InsideString);
5802  BeginValue(false);
5803  m_SB.Add("null");
5804 }
5805 
5806 void VmaJsonWriter::BeginValue(bool isString)
5807 {
5808  if(!m_Stack.empty())
5809  {
5810  StackItem& currItem = m_Stack.back();
5811  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5812  currItem.valueCount % 2 == 0)
5813  {
5814  VMA_ASSERT(isString);
5815  }
5816 
5817  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5818  currItem.valueCount % 2 != 0)
5819  {
5820  m_SB.Add(": ");
5821  }
5822  else if(currItem.valueCount > 0)
5823  {
5824  m_SB.Add(", ");
5825  WriteIndent();
5826  }
5827  else
5828  {
5829  WriteIndent();
5830  }
5831  ++currItem.valueCount;
5832  }
5833 }
5834 
5835 void VmaJsonWriter::WriteIndent(bool oneLess)
5836 {
5837  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
5838  {
5839  m_SB.AddNewLine();
5840 
5841  size_t count = m_Stack.size();
5842  if(count > 0 && oneLess)
5843  {
5844  --count;
5845  }
5846  for(size_t i = 0; i < count; ++i)
5847  {
5848  m_SB.Add(INDENT);
5849  }
5850  }
5851 }
5852 
5853 #endif // #if VMA_STATS_STRING_ENABLED
5854 
5856 
5857 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
5858 {
5859  if(IsUserDataString())
5860  {
5861  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
5862 
5863  FreeUserDataString(hAllocator);
5864 
5865  if(pUserData != VMA_NULL)
5866  {
5867  const char* const newStrSrc = (char*)pUserData;
5868  const size_t newStrLen = strlen(newStrSrc);
5869  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
5870  memcpy(newStrDst, newStrSrc, newStrLen + 1);
5871  m_pUserData = newStrDst;
5872  }
5873  }
5874  else
5875  {
5876  m_pUserData = pUserData;
5877  }
5878 }
5879 
5880 void VmaAllocation_T::ChangeBlockAllocation(
5881  VmaAllocator hAllocator,
5882  VmaDeviceMemoryBlock* block,
5883  VkDeviceSize offset)
5884 {
5885  VMA_ASSERT(block != VMA_NULL);
5886  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5887 
5888  // Move mapping reference counter from old block to new block.
5889  if(block != m_BlockAllocation.m_Block)
5890  {
5891  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
5892  if(IsPersistentMap())
5893  ++mapRefCount;
5894  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
5895  block->Map(hAllocator, mapRefCount, VMA_NULL);
5896  }
5897 
5898  m_BlockAllocation.m_Block = block;
5899  m_BlockAllocation.m_Offset = offset;
5900 }
5901 
5902 VkDeviceSize VmaAllocation_T::GetOffset() const
5903 {
5904  switch(m_Type)
5905  {
5906  case ALLOCATION_TYPE_BLOCK:
5907  return m_BlockAllocation.m_Offset;
5908  case ALLOCATION_TYPE_DEDICATED:
5909  return 0;
5910  default:
5911  VMA_ASSERT(0);
5912  return 0;
5913  }
5914 }
5915 
5916 VkDeviceMemory VmaAllocation_T::GetMemory() const
5917 {
5918  switch(m_Type)
5919  {
5920  case ALLOCATION_TYPE_BLOCK:
5921  return m_BlockAllocation.m_Block->GetDeviceMemory();
5922  case ALLOCATION_TYPE_DEDICATED:
5923  return m_DedicatedAllocation.m_hMemory;
5924  default:
5925  VMA_ASSERT(0);
5926  return VK_NULL_HANDLE;
5927  }
5928 }
5929 
5930 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5931 {
5932  switch(m_Type)
5933  {
5934  case ALLOCATION_TYPE_BLOCK:
5935  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5936  case ALLOCATION_TYPE_DEDICATED:
5937  return m_DedicatedAllocation.m_MemoryTypeIndex;
5938  default:
5939  VMA_ASSERT(0);
5940  return UINT32_MAX;
5941  }
5942 }
5943 
5944 void* VmaAllocation_T::GetMappedData() const
5945 {
5946  switch(m_Type)
5947  {
5948  case ALLOCATION_TYPE_BLOCK:
5949  if(m_MapCount != 0)
5950  {
5951  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5952  VMA_ASSERT(pBlockData != VMA_NULL);
5953  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5954  }
5955  else
5956  {
5957  return VMA_NULL;
5958  }
5959  break;
5960  case ALLOCATION_TYPE_DEDICATED:
5961  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5962  return m_DedicatedAllocation.m_pMappedData;
5963  default:
5964  VMA_ASSERT(0);
5965  return VMA_NULL;
5966  }
5967 }
5968 
5969 bool VmaAllocation_T::CanBecomeLost() const
5970 {
5971  switch(m_Type)
5972  {
5973  case ALLOCATION_TYPE_BLOCK:
5974  return m_BlockAllocation.m_CanBecomeLost;
5975  case ALLOCATION_TYPE_DEDICATED:
5976  return false;
5977  default:
5978  VMA_ASSERT(0);
5979  return false;
5980  }
5981 }
5982 
5983 VmaPool VmaAllocation_T::GetPool() const
5984 {
5985  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5986  return m_BlockAllocation.m_hPool;
5987 }
5988 
5989 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5990 {
5991  VMA_ASSERT(CanBecomeLost());
5992 
5993  /*
5994  Warning: This is a carefully designed algorithm.
5995  Do not modify unless you really know what you're doing :)
5996  */
5997  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5998  for(;;)
5999  {
6000  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6001  {
6002  VMA_ASSERT(0);
6003  return false;
6004  }
6005  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6006  {
6007  return false;
6008  }
6009  else // Last use time earlier than current time.
6010  {
6011  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6012  {
6013  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6014  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6015  return true;
6016  }
6017  }
6018  }
6019 }
6020 
6021 #if VMA_STATS_STRING_ENABLED
6022 
6023 // Correspond to values of enum VmaSuballocationType.
6024 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6025  "FREE",
6026  "UNKNOWN",
6027  "BUFFER",
6028  "IMAGE_UNKNOWN",
6029  "IMAGE_LINEAR",
6030  "IMAGE_OPTIMAL",
6031 };
6032 
6033 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6034 {
6035  json.WriteString("Type");
6036  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6037 
6038  json.WriteString("Size");
6039  json.WriteNumber(m_Size);
6040 
6041  if(m_pUserData != VMA_NULL)
6042  {
6043  json.WriteString("UserData");
6044  if(IsUserDataString())
6045  {
6046  json.WriteString((const char*)m_pUserData);
6047  }
6048  else
6049  {
6050  json.BeginString();
6051  json.ContinueString_Pointer(m_pUserData);
6052  json.EndString();
6053  }
6054  }
6055 
6056  json.WriteString("CreationFrameIndex");
6057  json.WriteNumber(m_CreationFrameIndex);
6058 
6059  json.WriteString("LastUseFrameIndex");
6060  json.WriteNumber(GetLastUseFrameIndex());
6061 
6062  if(m_BufferImageUsage != 0)
6063  {
6064  json.WriteString("Usage");
6065  json.WriteNumber(m_BufferImageUsage);
6066  }
6067 }
6068 
6069 #endif
6070 
6071 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6072 {
6073  VMA_ASSERT(IsUserDataString());
6074  if(m_pUserData != VMA_NULL)
6075  {
6076  char* const oldStr = (char*)m_pUserData;
6077  const size_t oldStrLen = strlen(oldStr);
6078  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6079  m_pUserData = VMA_NULL;
6080  }
6081 }
6082 
6083 void VmaAllocation_T::BlockAllocMap()
6084 {
6085  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6086 
6087  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6088  {
6089  ++m_MapCount;
6090  }
6091  else
6092  {
6093  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6094  }
6095 }
6096 
6097 void VmaAllocation_T::BlockAllocUnmap()
6098 {
6099  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6100 
6101  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6102  {
6103  --m_MapCount;
6104  }
6105  else
6106  {
6107  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6108  }
6109 }
6110 
6111 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6112 {
6113  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6114 
6115  if(m_MapCount != 0)
6116  {
6117  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6118  {
6119  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6120  *ppData = m_DedicatedAllocation.m_pMappedData;
6121  ++m_MapCount;
6122  return VK_SUCCESS;
6123  }
6124  else
6125  {
6126  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6127  return VK_ERROR_MEMORY_MAP_FAILED;
6128  }
6129  }
6130  else
6131  {
6132  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6133  hAllocator->m_hDevice,
6134  m_DedicatedAllocation.m_hMemory,
6135  0, // offset
6136  VK_WHOLE_SIZE,
6137  0, // flags
6138  ppData);
6139  if(result == VK_SUCCESS)
6140  {
6141  m_DedicatedAllocation.m_pMappedData = *ppData;
6142  m_MapCount = 1;
6143  }
6144  return result;
6145  }
6146 }
6147 
6148 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6149 {
6150  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6151 
6152  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6153  {
6154  --m_MapCount;
6155  if(m_MapCount == 0)
6156  {
6157  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6158  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6159  hAllocator->m_hDevice,
6160  m_DedicatedAllocation.m_hMemory);
6161  }
6162  }
6163  else
6164  {
6165  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6166  }
6167 }
6168 
6169 #if VMA_STATS_STRING_ENABLED
6170 
6171 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6172 {
6173  json.BeginObject();
6174 
6175  json.WriteString("Blocks");
6176  json.WriteNumber(stat.blockCount);
6177 
6178  json.WriteString("Allocations");
6179  json.WriteNumber(stat.allocationCount);
6180 
6181  json.WriteString("UnusedRanges");
6182  json.WriteNumber(stat.unusedRangeCount);
6183 
6184  json.WriteString("UsedBytes");
6185  json.WriteNumber(stat.usedBytes);
6186 
6187  json.WriteString("UnusedBytes");
6188  json.WriteNumber(stat.unusedBytes);
6189 
6190  if(stat.allocationCount > 1)
6191  {
6192  json.WriteString("AllocationSize");
6193  json.BeginObject(true);
6194  json.WriteString("Min");
6195  json.WriteNumber(stat.allocationSizeMin);
6196  json.WriteString("Avg");
6197  json.WriteNumber(stat.allocationSizeAvg);
6198  json.WriteString("Max");
6199  json.WriteNumber(stat.allocationSizeMax);
6200  json.EndObject();
6201  }
6202 
6203  if(stat.unusedRangeCount > 1)
6204  {
6205  json.WriteString("UnusedRangeSize");
6206  json.BeginObject(true);
6207  json.WriteString("Min");
6208  json.WriteNumber(stat.unusedRangeSizeMin);
6209  json.WriteString("Avg");
6210  json.WriteNumber(stat.unusedRangeSizeAvg);
6211  json.WriteString("Max");
6212  json.WriteNumber(stat.unusedRangeSizeMax);
6213  json.EndObject();
6214  }
6215 
6216  json.EndObject();
6217 }
6218 
6219 #endif // #if VMA_STATS_STRING_ENABLED
6220 
6221 struct VmaSuballocationItemSizeLess
6222 {
6223  bool operator()(
6224  const VmaSuballocationList::iterator lhs,
6225  const VmaSuballocationList::iterator rhs) const
6226  {
6227  return lhs->size < rhs->size;
6228  }
6229  bool operator()(
6230  const VmaSuballocationList::iterator lhs,
6231  VkDeviceSize rhsSize) const
6232  {
6233  return lhs->size < rhsSize;
6234  }
6235 };
6236 
6237 
6239 // class VmaBlockMetadata
6240 
6241 #if VMA_STATS_STRING_ENABLED
6242 
6243 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6244  VkDeviceSize unusedBytes,
6245  size_t allocationCount,
6246  size_t unusedRangeCount) const
6247 {
6248  json.BeginObject();
6249 
6250  json.WriteString("TotalBytes");
6251  json.WriteNumber(GetSize());
6252 
6253  json.WriteString("UnusedBytes");
6254  json.WriteNumber(unusedBytes);
6255 
6256  json.WriteString("Allocations");
6257  json.WriteNumber((uint64_t)allocationCount);
6258 
6259  json.WriteString("UnusedRanges");
6260  json.WriteNumber((uint64_t)unusedRangeCount);
6261 
6262  json.WriteString("Suballocations");
6263  json.BeginArray();
6264 }
6265 
6266 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6267  VkDeviceSize offset,
6268  VmaAllocation hAllocation) const
6269 {
6270  json.BeginObject(true);
6271 
6272  json.WriteString("Offset");
6273  json.WriteNumber(offset);
6274 
6275  hAllocation->PrintParameters(json);
6276 
6277  json.EndObject();
6278 }
6279 
6280 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6281  VkDeviceSize offset,
6282  VkDeviceSize size) const
6283 {
6284  json.BeginObject(true);
6285 
6286  json.WriteString("Offset");
6287  json.WriteNumber(offset);
6288 
6289  json.WriteString("Type");
6290  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6291 
6292  json.WriteString("Size");
6293  json.WriteNumber(size);
6294 
6295  json.EndObject();
6296 }
6297 
6298 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6299 {
6300  json.EndArray();
6301  json.EndObject();
6302 }
6303 
6304 #endif // #if VMA_STATS_STRING_ENABLED
6305 
6307 // class VmaBlockMetadata_Generic
6308 
6309 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6310  m_FreeCount(0),
6311  m_SumFreeSize(0),
6312  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6313  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6314 {
6315 }
6316 
6317 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6318 {
6319 }
6320 
6321 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6322 {
6323  VmaBlockMetadata::Init(size);
6324  m_FreeCount = 1;
6325  m_SumFreeSize = size;
6326 
6327  VmaSuballocation suballoc = {};
6328  suballoc.offset = 0;
6329  suballoc.size = size;
6330  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6331  suballoc.hAllocation = VK_NULL_HANDLE;
6332 
6333  m_Suballocations.push_back(suballoc);
6334  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6335  --suballocItem;
6336  m_FreeSuballocationsBySize.push_back(suballocItem);
6337 }
6338 
6339 bool VmaBlockMetadata_Generic::Validate() const
6340 {
6341  if(m_Suballocations.empty())
6342  {
6343  return false;
6344  }
6345 
6346  // Expected offset of new suballocation as calculated from previous ones.
6347  VkDeviceSize calculatedOffset = 0;
6348  // Expected number of free suballocations as calculated from traversing their list.
6349  uint32_t calculatedFreeCount = 0;
6350  // Expected sum size of free suballocations as calculated from traversing their list.
6351  VkDeviceSize calculatedSumFreeSize = 0;
6352  // Expected number of free suballocations that should be registered in
6353  // m_FreeSuballocationsBySize calculated from traversing their list.
6354  size_t freeSuballocationsToRegister = 0;
6355  // True if previous visited suballocation was free.
6356  bool prevFree = false;
6357 
6358  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6359  suballocItem != m_Suballocations.cend();
6360  ++suballocItem)
6361  {
6362  const VmaSuballocation& subAlloc = *suballocItem;
6363 
6364  // Actual offset of this suballocation doesn't match expected one.
6365  if(subAlloc.offset != calculatedOffset)
6366  {
6367  return false;
6368  }
6369 
6370  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6371  // Two adjacent free suballocations are invalid. They should be merged.
6372  if(prevFree && currFree)
6373  {
6374  return false;
6375  }
6376 
6377  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
6378  {
6379  return false;
6380  }
6381 
6382  if(currFree)
6383  {
6384  calculatedSumFreeSize += subAlloc.size;
6385  ++calculatedFreeCount;
6386  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6387  {
6388  ++freeSuballocationsToRegister;
6389  }
6390 
6391  // Margin required between allocations - every free space must be at least that large.
6392  if(subAlloc.size < VMA_DEBUG_MARGIN)
6393  {
6394  return false;
6395  }
6396  }
6397  else
6398  {
6399  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
6400  {
6401  return false;
6402  }
6403  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
6404  {
6405  return false;
6406  }
6407 
6408  // Margin required between allocations - previous allocation must be free.
6409  if(VMA_DEBUG_MARGIN > 0 && !prevFree)
6410  {
6411  return false;
6412  }
6413  }
6414 
6415  calculatedOffset += subAlloc.size;
6416  prevFree = currFree;
6417  }
6418 
6419  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6420  // match expected one.
6421  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
6422  {
6423  return false;
6424  }
6425 
6426  VkDeviceSize lastSize = 0;
6427  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6428  {
6429  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6430 
6431  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6432  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
6433  {
6434  return false;
6435  }
6436  // They must be sorted by size ascending.
6437  if(suballocItem->size < lastSize)
6438  {
6439  return false;
6440  }
6441 
6442  lastSize = suballocItem->size;
6443  }
6444 
6445  // Check if totals match calculacted values.
6446  if(!ValidateFreeSuballocationList() ||
6447  (calculatedOffset != GetSize()) ||
6448  (calculatedSumFreeSize != m_SumFreeSize) ||
6449  (calculatedFreeCount != m_FreeCount))
6450  {
6451  return false;
6452  }
6453 
6454  return true;
6455 }
6456 
6457 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6458 {
6459  if(!m_FreeSuballocationsBySize.empty())
6460  {
6461  return m_FreeSuballocationsBySize.back()->size;
6462  }
6463  else
6464  {
6465  return 0;
6466  }
6467 }
6468 
6469 bool VmaBlockMetadata_Generic::IsEmpty() const
6470 {
6471  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6472 }
6473 
6474 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6475 {
6476  outInfo.blockCount = 1;
6477 
6478  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6479  outInfo.allocationCount = rangeCount - m_FreeCount;
6480  outInfo.unusedRangeCount = m_FreeCount;
6481 
6482  outInfo.unusedBytes = m_SumFreeSize;
6483  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6484 
6485  outInfo.allocationSizeMin = UINT64_MAX;
6486  outInfo.allocationSizeMax = 0;
6487  outInfo.unusedRangeSizeMin = UINT64_MAX;
6488  outInfo.unusedRangeSizeMax = 0;
6489 
6490  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6491  suballocItem != m_Suballocations.cend();
6492  ++suballocItem)
6493  {
6494  const VmaSuballocation& suballoc = *suballocItem;
6495  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6496  {
6497  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6498  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6499  }
6500  else
6501  {
6502  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6503  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6504  }
6505  }
6506 }
6507 
6508 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6509 {
6510  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6511 
6512  inoutStats.size += GetSize();
6513  inoutStats.unusedSize += m_SumFreeSize;
6514  inoutStats.allocationCount += rangeCount - m_FreeCount;
6515  inoutStats.unusedRangeCount += m_FreeCount;
6516  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6517 }
6518 
6519 #if VMA_STATS_STRING_ENABLED
6520 
6521 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6522 {
6523  PrintDetailedMap_Begin(json,
6524  m_SumFreeSize, // unusedBytes
6525  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6526  m_FreeCount); // unusedRangeCount
6527 
6528  size_t i = 0;
6529  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6530  suballocItem != m_Suballocations.cend();
6531  ++suballocItem, ++i)
6532  {
6533  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6534  {
6535  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6536  }
6537  else
6538  {
6539  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6540  }
6541  }
6542 
6543  PrintDetailedMap_End(json);
6544 }
6545 
6546 #endif // #if VMA_STATS_STRING_ENABLED
6547 
6548 /*
6549 How many suitable free suballocations to analyze before choosing best one.
6550 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
6551  be chosen.
6552 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
6553  suballocations will be analized and best one will be chosen.
6554 - Any other value is also acceptable.
6555 */
6556 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
6557 
6558 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6559  uint32_t currentFrameIndex,
6560  uint32_t frameInUseCount,
6561  VkDeviceSize bufferImageGranularity,
6562  VkDeviceSize allocSize,
6563  VkDeviceSize allocAlignment,
6564  bool upperAddress,
6565  VmaSuballocationType allocType,
6566  bool canMakeOtherLost,
6567  VmaAllocationRequest* pAllocationRequest)
6568 {
6569  VMA_ASSERT(allocSize > 0);
6570  VMA_ASSERT(!upperAddress);
6571  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6572  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6573  VMA_HEAVY_ASSERT(Validate());
6574 
6575  // There is not enough total free space in this block to fullfill the request: Early return.
6576  if(canMakeOtherLost == false && m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6577  {
6578  return false;
6579  }
6580 
6581  // New algorithm, efficiently searching freeSuballocationsBySize.
6582  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6583  if(freeSuballocCount > 0)
6584  {
6585  if(VMA_BEST_FIT)
6586  {
6587  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6588  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6589  m_FreeSuballocationsBySize.data(),
6590  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6591  allocSize + 2 * VMA_DEBUG_MARGIN,
6592  VmaSuballocationItemSizeLess());
6593  size_t index = it - m_FreeSuballocationsBySize.data();
6594  for(; index < freeSuballocCount; ++index)
6595  {
6596  if(CheckAllocation(
6597  currentFrameIndex,
6598  frameInUseCount,
6599  bufferImageGranularity,
6600  allocSize,
6601  allocAlignment,
6602  allocType,
6603  m_FreeSuballocationsBySize[index],
6604  false, // canMakeOtherLost
6605  &pAllocationRequest->offset,
6606  &pAllocationRequest->itemsToMakeLostCount,
6607  &pAllocationRequest->sumFreeSize,
6608  &pAllocationRequest->sumItemSize))
6609  {
6610  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6611  return true;
6612  }
6613  }
6614  }
6615  else
6616  {
6617  // Search staring from biggest suballocations.
6618  for(size_t index = freeSuballocCount; index--; )
6619  {
6620  if(CheckAllocation(
6621  currentFrameIndex,
6622  frameInUseCount,
6623  bufferImageGranularity,
6624  allocSize,
6625  allocAlignment,
6626  allocType,
6627  m_FreeSuballocationsBySize[index],
6628  false, // canMakeOtherLost
6629  &pAllocationRequest->offset,
6630  &pAllocationRequest->itemsToMakeLostCount,
6631  &pAllocationRequest->sumFreeSize,
6632  &pAllocationRequest->sumItemSize))
6633  {
6634  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6635  return true;
6636  }
6637  }
6638  }
6639  }
6640 
6641  if(canMakeOtherLost)
6642  {
6643  // Brute-force algorithm. TODO: Come up with something better.
6644 
6645  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6646  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6647 
6648  VmaAllocationRequest tmpAllocRequest = {};
6649  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6650  suballocIt != m_Suballocations.end();
6651  ++suballocIt)
6652  {
6653  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6654  suballocIt->hAllocation->CanBecomeLost())
6655  {
6656  if(CheckAllocation(
6657  currentFrameIndex,
6658  frameInUseCount,
6659  bufferImageGranularity,
6660  allocSize,
6661  allocAlignment,
6662  allocType,
6663  suballocIt,
6664  canMakeOtherLost,
6665  &tmpAllocRequest.offset,
6666  &tmpAllocRequest.itemsToMakeLostCount,
6667  &tmpAllocRequest.sumFreeSize,
6668  &tmpAllocRequest.sumItemSize))
6669  {
6670  tmpAllocRequest.item = suballocIt;
6671 
6672  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
6673  {
6674  *pAllocationRequest = tmpAllocRequest;
6675  }
6676  }
6677  }
6678  }
6679 
6680  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
6681  {
6682  return true;
6683  }
6684  }
6685 
6686  return false;
6687 }
6688 
6689 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
6690  uint32_t currentFrameIndex,
6691  uint32_t frameInUseCount,
6692  VmaAllocationRequest* pAllocationRequest)
6693 {
6694  while(pAllocationRequest->itemsToMakeLostCount > 0)
6695  {
6696  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
6697  {
6698  ++pAllocationRequest->item;
6699  }
6700  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6701  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
6702  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
6703  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6704  {
6705  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
6706  --pAllocationRequest->itemsToMakeLostCount;
6707  }
6708  else
6709  {
6710  return false;
6711  }
6712  }
6713 
6714  VMA_HEAVY_ASSERT(Validate());
6715  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6716  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
6717 
6718  return true;
6719 }
6720 
6721 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6722 {
6723  uint32_t lostAllocationCount = 0;
6724  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6725  it != m_Suballocations.end();
6726  ++it)
6727  {
6728  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
6729  it->hAllocation->CanBecomeLost() &&
6730  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6731  {
6732  it = FreeSuballocation(it);
6733  ++lostAllocationCount;
6734  }
6735  }
6736  return lostAllocationCount;
6737 }
6738 
6739 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
6740 {
6741  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6742  it != m_Suballocations.end();
6743  ++it)
6744  {
6745  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6746  {
6747  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
6748  {
6749  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
6750  return VK_ERROR_VALIDATION_FAILED_EXT;
6751  }
6752  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
6753  {
6754  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
6755  return VK_ERROR_VALIDATION_FAILED_EXT;
6756  }
6757  }
6758  }
6759 
6760  return VK_SUCCESS;
6761 }
6762 
6763 void VmaBlockMetadata_Generic::Alloc(
6764  const VmaAllocationRequest& request,
6765  VmaSuballocationType type,
6766  VkDeviceSize allocSize,
6767  bool upperAddress,
6768  VmaAllocation hAllocation)
6769 {
6770  VMA_ASSERT(!upperAddress);
6771  VMA_ASSERT(request.item != m_Suballocations.end());
6772  VmaSuballocation& suballoc = *request.item;
6773  // Given suballocation is a free block.
6774  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6775  // Given offset is inside this suballocation.
6776  VMA_ASSERT(request.offset >= suballoc.offset);
6777  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
6778  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
6779  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
6780 
6781  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
6782  // it to become used.
6783  UnregisterFreeSuballocation(request.item);
6784 
6785  suballoc.offset = request.offset;
6786  suballoc.size = allocSize;
6787  suballoc.type = type;
6788  suballoc.hAllocation = hAllocation;
6789 
6790  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
6791  if(paddingEnd)
6792  {
6793  VmaSuballocation paddingSuballoc = {};
6794  paddingSuballoc.offset = request.offset + allocSize;
6795  paddingSuballoc.size = paddingEnd;
6796  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6797  VmaSuballocationList::iterator next = request.item;
6798  ++next;
6799  const VmaSuballocationList::iterator paddingEndItem =
6800  m_Suballocations.insert(next, paddingSuballoc);
6801  RegisterFreeSuballocation(paddingEndItem);
6802  }
6803 
6804  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
6805  if(paddingBegin)
6806  {
6807  VmaSuballocation paddingSuballoc = {};
6808  paddingSuballoc.offset = request.offset - paddingBegin;
6809  paddingSuballoc.size = paddingBegin;
6810  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6811  const VmaSuballocationList::iterator paddingBeginItem =
6812  m_Suballocations.insert(request.item, paddingSuballoc);
6813  RegisterFreeSuballocation(paddingBeginItem);
6814  }
6815 
6816  // Update totals.
6817  m_FreeCount = m_FreeCount - 1;
6818  if(paddingBegin > 0)
6819  {
6820  ++m_FreeCount;
6821  }
6822  if(paddingEnd > 0)
6823  {
6824  ++m_FreeCount;
6825  }
6826  m_SumFreeSize -= allocSize;
6827 }
6828 
6829 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
6830 {
6831  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6832  suballocItem != m_Suballocations.end();
6833  ++suballocItem)
6834  {
6835  VmaSuballocation& suballoc = *suballocItem;
6836  if(suballoc.hAllocation == allocation)
6837  {
6838  FreeSuballocation(suballocItem);
6839  VMA_HEAVY_ASSERT(Validate());
6840  return;
6841  }
6842  }
6843  VMA_ASSERT(0 && "Not found!");
6844 }
6845 
6846 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
6847 {
6848  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6849  suballocItem != m_Suballocations.end();
6850  ++suballocItem)
6851  {
6852  VmaSuballocation& suballoc = *suballocItem;
6853  if(suballoc.offset == offset)
6854  {
6855  FreeSuballocation(suballocItem);
6856  return;
6857  }
6858  }
6859  VMA_ASSERT(0 && "Not found!");
6860 }
6861 
6862 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
6863 {
6864  VkDeviceSize lastSize = 0;
6865  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
6866  {
6867  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
6868 
6869  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6870  {
6871  VMA_ASSERT(0);
6872  return false;
6873  }
6874  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6875  {
6876  VMA_ASSERT(0);
6877  return false;
6878  }
6879  if(it->size < lastSize)
6880  {
6881  VMA_ASSERT(0);
6882  return false;
6883  }
6884 
6885  lastSize = it->size;
6886  }
6887  return true;
6888 }
6889 
6890 bool VmaBlockMetadata_Generic::CheckAllocation(
6891  uint32_t currentFrameIndex,
6892  uint32_t frameInUseCount,
6893  VkDeviceSize bufferImageGranularity,
6894  VkDeviceSize allocSize,
6895  VkDeviceSize allocAlignment,
6896  VmaSuballocationType allocType,
6897  VmaSuballocationList::const_iterator suballocItem,
6898  bool canMakeOtherLost,
6899  VkDeviceSize* pOffset,
6900  size_t* itemsToMakeLostCount,
6901  VkDeviceSize* pSumFreeSize,
6902  VkDeviceSize* pSumItemSize) const
6903 {
6904  VMA_ASSERT(allocSize > 0);
6905  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6906  VMA_ASSERT(suballocItem != m_Suballocations.cend());
6907  VMA_ASSERT(pOffset != VMA_NULL);
6908 
6909  *itemsToMakeLostCount = 0;
6910  *pSumFreeSize = 0;
6911  *pSumItemSize = 0;
6912 
6913  if(canMakeOtherLost)
6914  {
6915  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6916  {
6917  *pSumFreeSize = suballocItem->size;
6918  }
6919  else
6920  {
6921  if(suballocItem->hAllocation->CanBecomeLost() &&
6922  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6923  {
6924  ++*itemsToMakeLostCount;
6925  *pSumItemSize = suballocItem->size;
6926  }
6927  else
6928  {
6929  return false;
6930  }
6931  }
6932 
6933  // Remaining size is too small for this request: Early return.
6934  if(GetSize() - suballocItem->offset < allocSize)
6935  {
6936  return false;
6937  }
6938 
6939  // Start from offset equal to beginning of this suballocation.
6940  *pOffset = suballocItem->offset;
6941 
6942  // Apply VMA_DEBUG_MARGIN at the beginning.
6943  if(VMA_DEBUG_MARGIN > 0)
6944  {
6945  *pOffset += VMA_DEBUG_MARGIN;
6946  }
6947 
6948  // Apply alignment.
6949  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6950 
6951  // Check previous suballocations for BufferImageGranularity conflicts.
6952  // Make bigger alignment if necessary.
6953  if(bufferImageGranularity > 1)
6954  {
6955  bool bufferImageGranularityConflict = false;
6956  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6957  while(prevSuballocItem != m_Suballocations.cbegin())
6958  {
6959  --prevSuballocItem;
6960  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6961  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6962  {
6963  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6964  {
6965  bufferImageGranularityConflict = true;
6966  break;
6967  }
6968  }
6969  else
6970  // Already on previous page.
6971  break;
6972  }
6973  if(bufferImageGranularityConflict)
6974  {
6975  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6976  }
6977  }
6978 
6979  // Now that we have final *pOffset, check if we are past suballocItem.
6980  // If yes, return false - this function should be called for another suballocItem as starting point.
6981  if(*pOffset >= suballocItem->offset + suballocItem->size)
6982  {
6983  return false;
6984  }
6985 
6986  // Calculate padding at the beginning based on current offset.
6987  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
6988 
6989  // Calculate required margin at the end.
6990  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
6991 
6992  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
6993  // Another early return check.
6994  if(suballocItem->offset + totalSize > GetSize())
6995  {
6996  return false;
6997  }
6998 
6999  // Advance lastSuballocItem until desired size is reached.
7000  // Update itemsToMakeLostCount.
7001  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7002  if(totalSize > suballocItem->size)
7003  {
7004  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7005  while(remainingSize > 0)
7006  {
7007  ++lastSuballocItem;
7008  if(lastSuballocItem == m_Suballocations.cend())
7009  {
7010  return false;
7011  }
7012  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7013  {
7014  *pSumFreeSize += lastSuballocItem->size;
7015  }
7016  else
7017  {
7018  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7019  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7020  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7021  {
7022  ++*itemsToMakeLostCount;
7023  *pSumItemSize += lastSuballocItem->size;
7024  }
7025  else
7026  {
7027  return false;
7028  }
7029  }
7030  remainingSize = (lastSuballocItem->size < remainingSize) ?
7031  remainingSize - lastSuballocItem->size : 0;
7032  }
7033  }
7034 
7035  // Check next suballocations for BufferImageGranularity conflicts.
7036  // If conflict exists, we must mark more allocations lost or fail.
7037  if(bufferImageGranularity > 1)
7038  {
7039  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7040  ++nextSuballocItem;
7041  while(nextSuballocItem != m_Suballocations.cend())
7042  {
7043  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7044  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7045  {
7046  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7047  {
7048  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7049  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7050  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7051  {
7052  ++*itemsToMakeLostCount;
7053  }
7054  else
7055  {
7056  return false;
7057  }
7058  }
7059  }
7060  else
7061  {
7062  // Already on next page.
7063  break;
7064  }
7065  ++nextSuballocItem;
7066  }
7067  }
7068  }
7069  else
7070  {
7071  const VmaSuballocation& suballoc = *suballocItem;
7072  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7073 
7074  *pSumFreeSize = suballoc.size;
7075 
7076  // Size of this suballocation is too small for this request: Early return.
7077  if(suballoc.size < allocSize)
7078  {
7079  return false;
7080  }
7081 
7082  // Start from offset equal to beginning of this suballocation.
7083  *pOffset = suballoc.offset;
7084 
7085  // Apply VMA_DEBUG_MARGIN at the beginning.
7086  if(VMA_DEBUG_MARGIN > 0)
7087  {
7088  *pOffset += VMA_DEBUG_MARGIN;
7089  }
7090 
7091  // Apply alignment.
7092  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7093 
7094  // Check previous suballocations for BufferImageGranularity conflicts.
7095  // Make bigger alignment if necessary.
7096  if(bufferImageGranularity > 1)
7097  {
7098  bool bufferImageGranularityConflict = false;
7099  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7100  while(prevSuballocItem != m_Suballocations.cbegin())
7101  {
7102  --prevSuballocItem;
7103  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7104  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7105  {
7106  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7107  {
7108  bufferImageGranularityConflict = true;
7109  break;
7110  }
7111  }
7112  else
7113  // Already on previous page.
7114  break;
7115  }
7116  if(bufferImageGranularityConflict)
7117  {
7118  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7119  }
7120  }
7121 
7122  // Calculate padding at the beginning based on current offset.
7123  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7124 
7125  // Calculate required margin at the end.
7126  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7127 
7128  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7129  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7130  {
7131  return false;
7132  }
7133 
7134  // Check next suballocations for BufferImageGranularity conflicts.
7135  // If conflict exists, allocation cannot be made here.
7136  if(bufferImageGranularity > 1)
7137  {
7138  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7139  ++nextSuballocItem;
7140  while(nextSuballocItem != m_Suballocations.cend())
7141  {
7142  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7143  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7144  {
7145  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7146  {
7147  return false;
7148  }
7149  }
7150  else
7151  {
7152  // Already on next page.
7153  break;
7154  }
7155  ++nextSuballocItem;
7156  }
7157  }
7158  }
7159 
7160  // All tests passed: Success. pOffset is already filled.
7161  return true;
7162 }
7163 
7164 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7165 {
7166  VMA_ASSERT(item != m_Suballocations.end());
7167  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7168 
7169  VmaSuballocationList::iterator nextItem = item;
7170  ++nextItem;
7171  VMA_ASSERT(nextItem != m_Suballocations.end());
7172  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7173 
7174  item->size += nextItem->size;
7175  --m_FreeCount;
7176  m_Suballocations.erase(nextItem);
7177 }
7178 
7179 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7180 {
7181  // Change this suballocation to be marked as free.
7182  VmaSuballocation& suballoc = *suballocItem;
7183  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7184  suballoc.hAllocation = VK_NULL_HANDLE;
7185 
7186  // Update totals.
7187  ++m_FreeCount;
7188  m_SumFreeSize += suballoc.size;
7189 
7190  // Merge with previous and/or next suballocation if it's also free.
7191  bool mergeWithNext = false;
7192  bool mergeWithPrev = false;
7193 
7194  VmaSuballocationList::iterator nextItem = suballocItem;
7195  ++nextItem;
7196  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7197  {
7198  mergeWithNext = true;
7199  }
7200 
7201  VmaSuballocationList::iterator prevItem = suballocItem;
7202  if(suballocItem != m_Suballocations.begin())
7203  {
7204  --prevItem;
7205  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7206  {
7207  mergeWithPrev = true;
7208  }
7209  }
7210 
7211  if(mergeWithNext)
7212  {
7213  UnregisterFreeSuballocation(nextItem);
7214  MergeFreeWithNext(suballocItem);
7215  }
7216 
7217  if(mergeWithPrev)
7218  {
7219  UnregisterFreeSuballocation(prevItem);
7220  MergeFreeWithNext(prevItem);
7221  RegisterFreeSuballocation(prevItem);
7222  return prevItem;
7223  }
7224  else
7225  {
7226  RegisterFreeSuballocation(suballocItem);
7227  return suballocItem;
7228  }
7229 }
7230 
7231 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7232 {
7233  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7234  VMA_ASSERT(item->size > 0);
7235 
7236  // You may want to enable this validation at the beginning or at the end of
7237  // this function, depending on what do you want to check.
7238  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7239 
7240  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7241  {
7242  if(m_FreeSuballocationsBySize.empty())
7243  {
7244  m_FreeSuballocationsBySize.push_back(item);
7245  }
7246  else
7247  {
7248  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7249  }
7250  }
7251 
7252  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7253 }
7254 
7255 
7256 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7257 {
7258  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7259  VMA_ASSERT(item->size > 0);
7260 
7261  // You may want to enable this validation at the beginning or at the end of
7262  // this function, depending on what do you want to check.
7263  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7264 
7265  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7266  {
7267  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7268  m_FreeSuballocationsBySize.data(),
7269  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7270  item,
7271  VmaSuballocationItemSizeLess());
7272  for(size_t index = it - m_FreeSuballocationsBySize.data();
7273  index < m_FreeSuballocationsBySize.size();
7274  ++index)
7275  {
7276  if(m_FreeSuballocationsBySize[index] == item)
7277  {
7278  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7279  return;
7280  }
7281  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7282  }
7283  VMA_ASSERT(0 && "Not found.");
7284  }
7285 
7286  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7287 }
7288 
7290 // class VmaBlockMetadata_Linear
7291 
7292 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7293  m_SumFreeSize(0),
7294  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7295  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7296  m_1stVectorIndex(0),
7297  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7298  m_1stNullItemsBeginCount(0),
7299  m_1stNullItemsMiddleCount(0),
7300  m_2ndNullItemsCount(0)
7301 {
7302 }
7303 
7304 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7305 {
7306 }
7307 
7308 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7309 {
7310  VmaBlockMetadata::Init(size);
7311  m_SumFreeSize = size;
7312 }
7313 
7314 bool VmaBlockMetadata_Linear::Validate() const
7315 {
7316  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7317  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7318 
7319  if(suballocations2nd.empty() != (m_2ndVectorMode == SECOND_VECTOR_EMPTY))
7320  {
7321  return false;
7322  }
7323  if(suballocations1st.empty() && !suballocations2nd.empty() &&
7324  m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7325  {
7326  return false;
7327  }
7328  if(!suballocations1st.empty())
7329  {
7330  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7331  if(suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
7332  {
7333  return false;
7334  }
7335  // Null item at the end should be just pop_back().
7336  if(suballocations1st.back().hAllocation == VK_NULL_HANDLE)
7337  {
7338  return false;
7339  }
7340  }
7341  if(!suballocations2nd.empty())
7342  {
7343  // Null item at the end should be just pop_back().
7344  if(suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
7345  {
7346  return false;
7347  }
7348  }
7349 
7350  if(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount > suballocations1st.size())
7351  {
7352  return false;
7353  }
7354  if(m_2ndNullItemsCount > suballocations2nd.size())
7355  {
7356  return false;
7357  }
7358 
7359  VkDeviceSize sumUsedSize = 0;
7360  const size_t suballoc1stCount = suballocations1st.size();
7361  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7362 
7363  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7364  {
7365  const size_t suballoc2ndCount = suballocations2nd.size();
7366  size_t nullItem2ndCount = 0;
7367  for(size_t i = 0; i < suballoc2ndCount; ++i)
7368  {
7369  const VmaSuballocation& suballoc = suballocations2nd[i];
7370  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7371 
7372  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7373  {
7374  return false;
7375  }
7376  if(suballoc.offset < offset)
7377  {
7378  return false;
7379  }
7380 
7381  if(!currFree)
7382  {
7383  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7384  {
7385  return false;
7386  }
7387  if(suballoc.hAllocation->GetSize() != suballoc.size)
7388  {
7389  return false;
7390  }
7391  sumUsedSize += suballoc.size;
7392  }
7393  else
7394  {
7395  ++nullItem2ndCount;
7396  }
7397 
7398  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7399  }
7400 
7401  if(nullItem2ndCount != m_2ndNullItemsCount)
7402  {
7403  return false;
7404  }
7405  }
7406 
7407  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7408  {
7409  const VmaSuballocation& suballoc = suballocations1st[i];
7410  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE ||
7411  suballoc.hAllocation != VK_NULL_HANDLE)
7412  {
7413  return false;
7414  }
7415  }
7416 
7417  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7418 
7419  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7420  {
7421  const VmaSuballocation& suballoc = suballocations1st[i];
7422  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7423 
7424  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7425  {
7426  return false;
7427  }
7428  if(suballoc.offset < offset)
7429  {
7430  return false;
7431  }
7432  if(i < m_1stNullItemsBeginCount && !currFree)
7433  {
7434  return false;
7435  }
7436 
7437  if(!currFree)
7438  {
7439  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7440  {
7441  return false;
7442  }
7443  if(suballoc.hAllocation->GetSize() != suballoc.size)
7444  {
7445  return false;
7446  }
7447  sumUsedSize += suballoc.size;
7448  }
7449  else
7450  {
7451  ++nullItem1stCount;
7452  }
7453 
7454  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7455  }
7456  if(nullItem1stCount != m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount)
7457  {
7458  return false;
7459  }
7460 
7461  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7462  {
7463  const size_t suballoc2ndCount = suballocations2nd.size();
7464  size_t nullItem2ndCount = 0;
7465  for(size_t i = suballoc2ndCount; i--; )
7466  {
7467  const VmaSuballocation& suballoc = suballocations2nd[i];
7468  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7469 
7470  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7471  {
7472  return false;
7473  }
7474  if(suballoc.offset < offset)
7475  {
7476  return false;
7477  }
7478 
7479  if(!currFree)
7480  {
7481  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7482  {
7483  return false;
7484  }
7485  if(suballoc.hAllocation->GetSize() != suballoc.size)
7486  {
7487  return false;
7488  }
7489  sumUsedSize += suballoc.size;
7490  }
7491  else
7492  {
7493  ++nullItem2ndCount;
7494  }
7495 
7496  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7497  }
7498 
7499  if(nullItem2ndCount != m_2ndNullItemsCount)
7500  {
7501  return false;
7502  }
7503  }
7504 
7505  if(offset > GetSize())
7506  {
7507  return false;
7508  }
7509  if(m_SumFreeSize != GetSize() - sumUsedSize)
7510  {
7511  return false;
7512  }
7513 
7514  return true;
7515 }
7516 
7517 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7518 {
7519  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7520  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7521 }
7522 
7523 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7524 {
7525  const VkDeviceSize size = GetSize();
7526 
7527  /*
7528  We don't consider gaps inside allocation vectors with freed allocations because
7529  they are not suitable for reuse in linear allocator. We consider only space that
7530  is available for new allocations.
7531  */
7532  if(IsEmpty())
7533  {
7534  return size;
7535  }
7536 
7537  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7538 
7539  switch(m_2ndVectorMode)
7540  {
7541  case SECOND_VECTOR_EMPTY:
7542  /*
7543  Available space is after end of 1st, as well as before beginning of 1st (which
7544  whould make it a ring buffer).
7545  */
7546  {
7547  const size_t suballocations1stCount = suballocations1st.size();
7548  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7549  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7550  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7551  return VMA_MAX(
7552  firstSuballoc.offset,
7553  size - (lastSuballoc.offset + lastSuballoc.size));
7554  }
7555  break;
7556 
7557  case SECOND_VECTOR_RING_BUFFER:
7558  /*
7559  Available space is only between end of 2nd and beginning of 1st.
7560  */
7561  {
7562  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7563  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7564  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7565  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7566  }
7567  break;
7568 
7569  case SECOND_VECTOR_DOUBLE_STACK:
7570  /*
7571  Available space is only between end of 1st and top of 2nd.
7572  */
7573  {
7574  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7575  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7576  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7577  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7578  }
7579  break;
7580 
7581  default:
7582  VMA_ASSERT(0);
7583  return 0;
7584  }
7585 }
7586 
7587 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7588 {
7589  const VkDeviceSize size = GetSize();
7590  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7591  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7592  const size_t suballoc1stCount = suballocations1st.size();
7593  const size_t suballoc2ndCount = suballocations2nd.size();
7594 
7595  outInfo.blockCount = 1;
7596  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7597  outInfo.unusedRangeCount = 0;
7598  outInfo.usedBytes = 0;
7599  outInfo.allocationSizeMin = UINT64_MAX;
7600  outInfo.allocationSizeMax = 0;
7601  outInfo.unusedRangeSizeMin = UINT64_MAX;
7602  outInfo.unusedRangeSizeMax = 0;
7603 
7604  VkDeviceSize lastOffset = 0;
7605 
7606  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7607  {
7608  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7609  size_t nextAlloc2ndIndex = 0;
7610  while(lastOffset < freeSpace2ndTo1stEnd)
7611  {
7612  // Find next non-null allocation or move nextAllocIndex to the end.
7613  while(nextAlloc2ndIndex < suballoc2ndCount &&
7614  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7615  {
7616  ++nextAlloc2ndIndex;
7617  }
7618 
7619  // Found non-null allocation.
7620  if(nextAlloc2ndIndex < suballoc2ndCount)
7621  {
7622  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7623 
7624  // 1. Process free space before this allocation.
7625  if(lastOffset < suballoc.offset)
7626  {
7627  // There is free space from lastOffset to suballoc.offset.
7628  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7629  ++outInfo.unusedRangeCount;
7630  outInfo.unusedBytes += unusedRangeSize;
7631  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7632  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7633  }
7634 
7635  // 2. Process this allocation.
7636  // There is allocation with suballoc.offset, suballoc.size.
7637  outInfo.usedBytes += suballoc.size;
7638  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7639  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7640 
7641  // 3. Prepare for next iteration.
7642  lastOffset = suballoc.offset + suballoc.size;
7643  ++nextAlloc2ndIndex;
7644  }
7645  // We are at the end.
7646  else
7647  {
7648  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7649  if(lastOffset < freeSpace2ndTo1stEnd)
7650  {
7651  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7652  ++outInfo.unusedRangeCount;
7653  outInfo.unusedBytes += unusedRangeSize;
7654  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7655  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7656  }
7657 
7658  // End of loop.
7659  lastOffset = freeSpace2ndTo1stEnd;
7660  }
7661  }
7662  }
7663 
7664  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7665  const VkDeviceSize freeSpace1stTo2ndEnd =
7666  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7667  while(lastOffset < freeSpace1stTo2ndEnd)
7668  {
7669  // Find next non-null allocation or move nextAllocIndex to the end.
7670  while(nextAlloc1stIndex < suballoc1stCount &&
7671  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7672  {
7673  ++nextAlloc1stIndex;
7674  }
7675 
7676  // Found non-null allocation.
7677  if(nextAlloc1stIndex < suballoc1stCount)
7678  {
7679  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7680 
7681  // 1. Process free space before this allocation.
7682  if(lastOffset < suballoc.offset)
7683  {
7684  // There is free space from lastOffset to suballoc.offset.
7685  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7686  ++outInfo.unusedRangeCount;
7687  outInfo.unusedBytes += unusedRangeSize;
7688  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7689  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7690  }
7691 
7692  // 2. Process this allocation.
7693  // There is allocation with suballoc.offset, suballoc.size.
7694  outInfo.usedBytes += suballoc.size;
7695  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7696  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7697 
7698  // 3. Prepare for next iteration.
7699  lastOffset = suballoc.offset + suballoc.size;
7700  ++nextAlloc1stIndex;
7701  }
7702  // We are at the end.
7703  else
7704  {
7705  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7706  if(lastOffset < freeSpace1stTo2ndEnd)
7707  {
7708  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7709  ++outInfo.unusedRangeCount;
7710  outInfo.unusedBytes += unusedRangeSize;
7711  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7712  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7713  }
7714 
7715  // End of loop.
7716  lastOffset = freeSpace1stTo2ndEnd;
7717  }
7718  }
7719 
7720  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7721  {
7722  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7723  while(lastOffset < size)
7724  {
7725  // Find next non-null allocation or move nextAllocIndex to the end.
7726  while(nextAlloc2ndIndex != SIZE_MAX &&
7727  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7728  {
7729  --nextAlloc2ndIndex;
7730  }
7731 
7732  // Found non-null allocation.
7733  if(nextAlloc2ndIndex != SIZE_MAX)
7734  {
7735  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7736 
7737  // 1. Process free space before this allocation.
7738  if(lastOffset < suballoc.offset)
7739  {
7740  // There is free space from lastOffset to suballoc.offset.
7741  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7742  ++outInfo.unusedRangeCount;
7743  outInfo.unusedBytes += unusedRangeSize;
7744  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7745  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7746  }
7747 
7748  // 2. Process this allocation.
7749  // There is allocation with suballoc.offset, suballoc.size.
7750  outInfo.usedBytes += suballoc.size;
7751  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7752  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7753 
7754  // 3. Prepare for next iteration.
7755  lastOffset = suballoc.offset + suballoc.size;
7756  --nextAlloc2ndIndex;
7757  }
7758  // We are at the end.
7759  else
7760  {
7761  // There is free space from lastOffset to size.
7762  if(lastOffset < size)
7763  {
7764  const VkDeviceSize unusedRangeSize = size - lastOffset;
7765  ++outInfo.unusedRangeCount;
7766  outInfo.unusedBytes += unusedRangeSize;
7767  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7768  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7769  }
7770 
7771  // End of loop.
7772  lastOffset = size;
7773  }
7774  }
7775  }
7776 
7777  outInfo.unusedBytes = size - outInfo.usedBytes;
7778 }
7779 
7780 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
7781 {
7782  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7783  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7784  const VkDeviceSize size = GetSize();
7785  const size_t suballoc1stCount = suballocations1st.size();
7786  const size_t suballoc2ndCount = suballocations2nd.size();
7787 
7788  inoutStats.size += size;
7789 
7790  VkDeviceSize lastOffset = 0;
7791 
7792  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7793  {
7794  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7795  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
7796  while(lastOffset < freeSpace2ndTo1stEnd)
7797  {
7798  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7799  while(nextAlloc2ndIndex < suballoc2ndCount &&
7800  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7801  {
7802  ++nextAlloc2ndIndex;
7803  }
7804 
7805  // Found non-null allocation.
7806  if(nextAlloc2ndIndex < suballoc2ndCount)
7807  {
7808  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7809 
7810  // 1. Process free space before this allocation.
7811  if(lastOffset < suballoc.offset)
7812  {
7813  // There is free space from lastOffset to suballoc.offset.
7814  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7815  inoutStats.unusedSize += unusedRangeSize;
7816  ++inoutStats.unusedRangeCount;
7817  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7818  }
7819 
7820  // 2. Process this allocation.
7821  // There is allocation with suballoc.offset, suballoc.size.
7822  ++inoutStats.allocationCount;
7823 
7824  // 3. Prepare for next iteration.
7825  lastOffset = suballoc.offset + suballoc.size;
7826  ++nextAlloc2ndIndex;
7827  }
7828  // We are at the end.
7829  else
7830  {
7831  if(lastOffset < freeSpace2ndTo1stEnd)
7832  {
7833  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7834  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7835  inoutStats.unusedSize += unusedRangeSize;
7836  ++inoutStats.unusedRangeCount;
7837  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7838  }
7839 
7840  // End of loop.
7841  lastOffset = freeSpace2ndTo1stEnd;
7842  }
7843  }
7844  }
7845 
7846  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7847  const VkDeviceSize freeSpace1stTo2ndEnd =
7848  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7849  while(lastOffset < freeSpace1stTo2ndEnd)
7850  {
7851  // Find next non-null allocation or move nextAllocIndex to the end.
7852  while(nextAlloc1stIndex < suballoc1stCount &&
7853  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7854  {
7855  ++nextAlloc1stIndex;
7856  }
7857 
7858  // Found non-null allocation.
7859  if(nextAlloc1stIndex < suballoc1stCount)
7860  {
7861  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7862 
7863  // 1. Process free space before this allocation.
7864  if(lastOffset < suballoc.offset)
7865  {
7866  // There is free space from lastOffset to suballoc.offset.
7867  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7868  inoutStats.unusedSize += unusedRangeSize;
7869  ++inoutStats.unusedRangeCount;
7870  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7871  }
7872 
7873  // 2. Process this allocation.
7874  // There is allocation with suballoc.offset, suballoc.size.
7875  ++inoutStats.allocationCount;
7876 
7877  // 3. Prepare for next iteration.
7878  lastOffset = suballoc.offset + suballoc.size;
7879  ++nextAlloc1stIndex;
7880  }
7881  // We are at the end.
7882  else
7883  {
7884  if(lastOffset < freeSpace1stTo2ndEnd)
7885  {
7886  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7887  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7888  inoutStats.unusedSize += unusedRangeSize;
7889  ++inoutStats.unusedRangeCount;
7890  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7891  }
7892 
7893  // End of loop.
7894  lastOffset = freeSpace1stTo2ndEnd;
7895  }
7896  }
7897 
7898  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7899  {
7900  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7901  while(lastOffset < size)
7902  {
7903  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7904  while(nextAlloc2ndIndex != SIZE_MAX &&
7905  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7906  {
7907  --nextAlloc2ndIndex;
7908  }
7909 
7910  // Found non-null allocation.
7911  if(nextAlloc2ndIndex != SIZE_MAX)
7912  {
7913  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7914 
7915  // 1. Process free space before this allocation.
7916  if(lastOffset < suballoc.offset)
7917  {
7918  // There is free space from lastOffset to suballoc.offset.
7919  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7920  inoutStats.unusedSize += unusedRangeSize;
7921  ++inoutStats.unusedRangeCount;
7922  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7923  }
7924 
7925  // 2. Process this allocation.
7926  // There is allocation with suballoc.offset, suballoc.size.
7927  ++inoutStats.allocationCount;
7928 
7929  // 3. Prepare for next iteration.
7930  lastOffset = suballoc.offset + suballoc.size;
7931  --nextAlloc2ndIndex;
7932  }
7933  // We are at the end.
7934  else
7935  {
7936  if(lastOffset < size)
7937  {
7938  // There is free space from lastOffset to size.
7939  const VkDeviceSize unusedRangeSize = size - lastOffset;
7940  inoutStats.unusedSize += unusedRangeSize;
7941  ++inoutStats.unusedRangeCount;
7942  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7943  }
7944 
7945  // End of loop.
7946  lastOffset = size;
7947  }
7948  }
7949  }
7950 }
7951 
7952 #if VMA_STATS_STRING_ENABLED
7953 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
7954 {
7955  const VkDeviceSize size = GetSize();
7956  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7957  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7958  const size_t suballoc1stCount = suballocations1st.size();
7959  const size_t suballoc2ndCount = suballocations2nd.size();
7960 
7961  // FIRST PASS
7962 
7963  size_t unusedRangeCount = 0;
7964  VkDeviceSize usedBytes = 0;
7965 
7966  VkDeviceSize lastOffset = 0;
7967 
7968  size_t alloc2ndCount = 0;
7969  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7970  {
7971  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7972  size_t nextAlloc2ndIndex = 0;
7973  while(lastOffset < freeSpace2ndTo1stEnd)
7974  {
7975  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7976  while(nextAlloc2ndIndex < suballoc2ndCount &&
7977  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7978  {
7979  ++nextAlloc2ndIndex;
7980  }
7981 
7982  // Found non-null allocation.
7983  if(nextAlloc2ndIndex < suballoc2ndCount)
7984  {
7985  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7986 
7987  // 1. Process free space before this allocation.
7988  if(lastOffset < suballoc.offset)
7989  {
7990  // There is free space from lastOffset to suballoc.offset.
7991  ++unusedRangeCount;
7992  }
7993 
7994  // 2. Process this allocation.
7995  // There is allocation with suballoc.offset, suballoc.size.
7996  ++alloc2ndCount;
7997  usedBytes += suballoc.size;
7998 
7999  // 3. Prepare for next iteration.
8000  lastOffset = suballoc.offset + suballoc.size;
8001  ++nextAlloc2ndIndex;
8002  }
8003  // We are at the end.
8004  else
8005  {
8006  if(lastOffset < freeSpace2ndTo1stEnd)
8007  {
8008  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8009  ++unusedRangeCount;
8010  }
8011 
8012  // End of loop.
8013  lastOffset = freeSpace2ndTo1stEnd;
8014  }
8015  }
8016  }
8017 
8018  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8019  size_t alloc1stCount = 0;
8020  const VkDeviceSize freeSpace1stTo2ndEnd =
8021  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8022  while(lastOffset < freeSpace1stTo2ndEnd)
8023  {
8024  // Find next non-null allocation or move nextAllocIndex to the end.
8025  while(nextAlloc1stIndex < suballoc1stCount &&
8026  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8027  {
8028  ++nextAlloc1stIndex;
8029  }
8030 
8031  // Found non-null allocation.
8032  if(nextAlloc1stIndex < suballoc1stCount)
8033  {
8034  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8035 
8036  // 1. Process free space before this allocation.
8037  if(lastOffset < suballoc.offset)
8038  {
8039  // There is free space from lastOffset to suballoc.offset.
8040  ++unusedRangeCount;
8041  }
8042 
8043  // 2. Process this allocation.
8044  // There is allocation with suballoc.offset, suballoc.size.
8045  ++alloc1stCount;
8046  usedBytes += suballoc.size;
8047 
8048  // 3. Prepare for next iteration.
8049  lastOffset = suballoc.offset + suballoc.size;
8050  ++nextAlloc1stIndex;
8051  }
8052  // We are at the end.
8053  else
8054  {
8055  if(lastOffset < size)
8056  {
8057  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8058  ++unusedRangeCount;
8059  }
8060 
8061  // End of loop.
8062  lastOffset = freeSpace1stTo2ndEnd;
8063  }
8064  }
8065 
8066  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8067  {
8068  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8069  while(lastOffset < size)
8070  {
8071  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8072  while(nextAlloc2ndIndex != SIZE_MAX &&
8073  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8074  {
8075  --nextAlloc2ndIndex;
8076  }
8077 
8078  // Found non-null allocation.
8079  if(nextAlloc2ndIndex != SIZE_MAX)
8080  {
8081  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8082 
8083  // 1. Process free space before this allocation.
8084  if(lastOffset < suballoc.offset)
8085  {
8086  // There is free space from lastOffset to suballoc.offset.
8087  ++unusedRangeCount;
8088  }
8089 
8090  // 2. Process this allocation.
8091  // There is allocation with suballoc.offset, suballoc.size.
8092  ++alloc2ndCount;
8093  usedBytes += suballoc.size;
8094 
8095  // 3. Prepare for next iteration.
8096  lastOffset = suballoc.offset + suballoc.size;
8097  --nextAlloc2ndIndex;
8098  }
8099  // We are at the end.
8100  else
8101  {
8102  if(lastOffset < size)
8103  {
8104  // There is free space from lastOffset to size.
8105  ++unusedRangeCount;
8106  }
8107 
8108  // End of loop.
8109  lastOffset = size;
8110  }
8111  }
8112  }
8113 
8114  const VkDeviceSize unusedBytes = size - usedBytes;
8115  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8116 
8117  // SECOND PASS
8118  lastOffset = 0;
8119 
8120  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8121  {
8122  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8123  size_t nextAlloc2ndIndex = 0;
8124  while(lastOffset < freeSpace2ndTo1stEnd)
8125  {
8126  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8127  while(nextAlloc2ndIndex < suballoc2ndCount &&
8128  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8129  {
8130  ++nextAlloc2ndIndex;
8131  }
8132 
8133  // Found non-null allocation.
8134  if(nextAlloc2ndIndex < suballoc2ndCount)
8135  {
8136  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8137 
8138  // 1. Process free space before this allocation.
8139  if(lastOffset < suballoc.offset)
8140  {
8141  // There is free space from lastOffset to suballoc.offset.
8142  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8143  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8144  }
8145 
8146  // 2. Process this allocation.
8147  // There is allocation with suballoc.offset, suballoc.size.
8148  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8149 
8150  // 3. Prepare for next iteration.
8151  lastOffset = suballoc.offset + suballoc.size;
8152  ++nextAlloc2ndIndex;
8153  }
8154  // We are at the end.
8155  else
8156  {
8157  if(lastOffset < freeSpace2ndTo1stEnd)
8158  {
8159  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8160  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8161  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8162  }
8163 
8164  // End of loop.
8165  lastOffset = freeSpace2ndTo1stEnd;
8166  }
8167  }
8168  }
8169 
8170  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8171  while(lastOffset < freeSpace1stTo2ndEnd)
8172  {
8173  // Find next non-null allocation or move nextAllocIndex to the end.
8174  while(nextAlloc1stIndex < suballoc1stCount &&
8175  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8176  {
8177  ++nextAlloc1stIndex;
8178  }
8179 
8180  // Found non-null allocation.
8181  if(nextAlloc1stIndex < suballoc1stCount)
8182  {
8183  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8184 
8185  // 1. Process free space before this allocation.
8186  if(lastOffset < suballoc.offset)
8187  {
8188  // There is free space from lastOffset to suballoc.offset.
8189  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8190  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8191  }
8192 
8193  // 2. Process this allocation.
8194  // There is allocation with suballoc.offset, suballoc.size.
8195  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8196 
8197  // 3. Prepare for next iteration.
8198  lastOffset = suballoc.offset + suballoc.size;
8199  ++nextAlloc1stIndex;
8200  }
8201  // We are at the end.
8202  else
8203  {
8204  if(lastOffset < freeSpace1stTo2ndEnd)
8205  {
8206  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8207  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8208  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8209  }
8210 
8211  // End of loop.
8212  lastOffset = freeSpace1stTo2ndEnd;
8213  }
8214  }
8215 
8216  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8217  {
8218  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8219  while(lastOffset < size)
8220  {
8221  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8222  while(nextAlloc2ndIndex != SIZE_MAX &&
8223  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8224  {
8225  --nextAlloc2ndIndex;
8226  }
8227 
8228  // Found non-null allocation.
8229  if(nextAlloc2ndIndex != SIZE_MAX)
8230  {
8231  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8232 
8233  // 1. Process free space before this allocation.
8234  if(lastOffset < suballoc.offset)
8235  {
8236  // There is free space from lastOffset to suballoc.offset.
8237  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8238  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8239  }
8240 
8241  // 2. Process this allocation.
8242  // There is allocation with suballoc.offset, suballoc.size.
8243  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8244 
8245  // 3. Prepare for next iteration.
8246  lastOffset = suballoc.offset + suballoc.size;
8247  --nextAlloc2ndIndex;
8248  }
8249  // We are at the end.
8250  else
8251  {
8252  if(lastOffset < size)
8253  {
8254  // There is free space from lastOffset to size.
8255  const VkDeviceSize unusedRangeSize = size - lastOffset;
8256  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8257  }
8258 
8259  // End of loop.
8260  lastOffset = size;
8261  }
8262  }
8263  }
8264 
8265  PrintDetailedMap_End(json);
8266 }
8267 #endif // #if VMA_STATS_STRING_ENABLED
8268 
8269 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8270  uint32_t currentFrameIndex,
8271  uint32_t frameInUseCount,
8272  VkDeviceSize bufferImageGranularity,
8273  VkDeviceSize allocSize,
8274  VkDeviceSize allocAlignment,
8275  bool upperAddress,
8276  VmaSuballocationType allocType,
8277  bool canMakeOtherLost,
8278  VmaAllocationRequest* pAllocationRequest)
8279 {
8280  VMA_ASSERT(allocSize > 0);
8281  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8282  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8283  VMA_HEAVY_ASSERT(Validate());
8284 
8285  const VkDeviceSize size = GetSize();
8286  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8287  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8288 
8289  if(upperAddress)
8290  {
8291  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8292  {
8293  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8294  return false;
8295  }
8296 
8297  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8298  if(allocSize > size)
8299  {
8300  return false;
8301  }
8302  VkDeviceSize resultBaseOffset = size - allocSize;
8303  if(!suballocations2nd.empty())
8304  {
8305  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8306  resultBaseOffset = lastSuballoc.offset - allocSize;
8307  if(allocSize > lastSuballoc.offset)
8308  {
8309  return false;
8310  }
8311  }
8312 
8313  // Start from offset equal to end of free space.
8314  VkDeviceSize resultOffset = resultBaseOffset;
8315 
8316  // Apply VMA_DEBUG_MARGIN at the end.
8317  if(VMA_DEBUG_MARGIN > 0)
8318  {
8319  if(resultOffset < VMA_DEBUG_MARGIN)
8320  {
8321  return false;
8322  }
8323  resultOffset -= VMA_DEBUG_MARGIN;
8324  }
8325 
8326  // Apply alignment.
8327  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8328 
8329  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8330  // Make bigger alignment if necessary.
8331  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8332  {
8333  bool bufferImageGranularityConflict = false;
8334  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8335  {
8336  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8337  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8338  {
8339  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8340  {
8341  bufferImageGranularityConflict = true;
8342  break;
8343  }
8344  }
8345  else
8346  // Already on previous page.
8347  break;
8348  }
8349  if(bufferImageGranularityConflict)
8350  {
8351  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8352  }
8353  }
8354 
8355  // There is enough free space.
8356  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8357  suballocations1st.back().offset + suballocations1st.back().size :
8358  0;
8359  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8360  {
8361  // Check previous suballocations for BufferImageGranularity conflicts.
8362  // If conflict exists, allocation cannot be made here.
8363  if(bufferImageGranularity > 1)
8364  {
8365  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8366  {
8367  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8368  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8369  {
8370  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8371  {
8372  return false;
8373  }
8374  }
8375  else
8376  {
8377  // Already on next page.
8378  break;
8379  }
8380  }
8381  }
8382 
8383  // All tests passed: Success.
8384  pAllocationRequest->offset = resultOffset;
8385  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8386  pAllocationRequest->sumItemSize = 0;
8387  // pAllocationRequest->item unused.
8388  pAllocationRequest->itemsToMakeLostCount = 0;
8389  return true;
8390  }
8391  }
8392  else // !upperAddress
8393  {
8394  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8395  {
8396  // Try to allocate at the end of 1st vector.
8397 
8398  VkDeviceSize resultBaseOffset = 0;
8399  if(!suballocations1st.empty())
8400  {
8401  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8402  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8403  }
8404 
8405  // Start from offset equal to beginning of free space.
8406  VkDeviceSize resultOffset = resultBaseOffset;
8407 
8408  // Apply VMA_DEBUG_MARGIN at the beginning.
8409  if(VMA_DEBUG_MARGIN > 0)
8410  {
8411  resultOffset += VMA_DEBUG_MARGIN;
8412  }
8413 
8414  // Apply alignment.
8415  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8416 
8417  // Check previous suballocations for BufferImageGranularity conflicts.
8418  // Make bigger alignment if necessary.
8419  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8420  {
8421  bool bufferImageGranularityConflict = false;
8422  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8423  {
8424  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8425  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8426  {
8427  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8428  {
8429  bufferImageGranularityConflict = true;
8430  break;
8431  }
8432  }
8433  else
8434  // Already on previous page.
8435  break;
8436  }
8437  if(bufferImageGranularityConflict)
8438  {
8439  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8440  }
8441  }
8442 
8443  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8444  suballocations2nd.back().offset : size;
8445 
8446  // There is enough free space at the end after alignment.
8447  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8448  {
8449  // Check next suballocations for BufferImageGranularity conflicts.
8450  // If conflict exists, allocation cannot be made here.
8451  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8452  {
8453  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8454  {
8455  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8456  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8457  {
8458  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8459  {
8460  return false;
8461  }
8462  }
8463  else
8464  {
8465  // Already on previous page.
8466  break;
8467  }
8468  }
8469  }
8470 
8471  // All tests passed: Success.
8472  pAllocationRequest->offset = resultOffset;
8473  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8474  pAllocationRequest->sumItemSize = 0;
8475  // pAllocationRequest->item unused.
8476  pAllocationRequest->itemsToMakeLostCount = 0;
8477  return true;
8478  }
8479  }
8480 
8481  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8482  // beginning of 1st vector as the end of free space.
8483  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8484  {
8485  VMA_ASSERT(!suballocations1st.empty());
8486 
8487  VkDeviceSize resultBaseOffset = 0;
8488  if(!suballocations2nd.empty())
8489  {
8490  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8491  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8492  }
8493 
8494  // Start from offset equal to beginning of free space.
8495  VkDeviceSize resultOffset = resultBaseOffset;
8496 
8497  // Apply VMA_DEBUG_MARGIN at the beginning.
8498  if(VMA_DEBUG_MARGIN > 0)
8499  {
8500  resultOffset += VMA_DEBUG_MARGIN;
8501  }
8502 
8503  // Apply alignment.
8504  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8505 
8506  // Check previous suballocations for BufferImageGranularity conflicts.
8507  // Make bigger alignment if necessary.
8508  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8509  {
8510  bool bufferImageGranularityConflict = false;
8511  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8512  {
8513  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8514  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8515  {
8516  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8517  {
8518  bufferImageGranularityConflict = true;
8519  break;
8520  }
8521  }
8522  else
8523  // Already on previous page.
8524  break;
8525  }
8526  if(bufferImageGranularityConflict)
8527  {
8528  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8529  }
8530  }
8531 
8532  pAllocationRequest->itemsToMakeLostCount = 0;
8533  pAllocationRequest->sumItemSize = 0;
8534  size_t index1st = m_1stNullItemsBeginCount;
8535 
8536  if(canMakeOtherLost)
8537  {
8538  while(index1st < suballocations1st.size() &&
8539  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8540  {
8541  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8542  const VmaSuballocation& suballoc = suballocations1st[index1st];
8543  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8544  {
8545  // No problem.
8546  }
8547  else
8548  {
8549  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8550  if(suballoc.hAllocation->CanBecomeLost() &&
8551  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8552  {
8553  ++pAllocationRequest->itemsToMakeLostCount;
8554  pAllocationRequest->sumItemSize += suballoc.size;
8555  }
8556  else
8557  {
8558  return false;
8559  }
8560  }
8561  ++index1st;
8562  }
8563 
8564  // Check next suballocations for BufferImageGranularity conflicts.
8565  // If conflict exists, we must mark more allocations lost or fail.
8566  if(bufferImageGranularity > 1)
8567  {
8568  while(index1st < suballocations1st.size())
8569  {
8570  const VmaSuballocation& suballoc = suballocations1st[index1st];
8571  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8572  {
8573  if(suballoc.hAllocation != VK_NULL_HANDLE)
8574  {
8575  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8576  if(suballoc.hAllocation->CanBecomeLost() &&
8577  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8578  {
8579  ++pAllocationRequest->itemsToMakeLostCount;
8580  pAllocationRequest->sumItemSize += suballoc.size;
8581  }
8582  else
8583  {
8584  return false;
8585  }
8586  }
8587  }
8588  else
8589  {
8590  // Already on next page.
8591  break;
8592  }
8593  ++index1st;
8594  }
8595  }
8596  }
8597 
8598  // There is enough free space at the end after alignment.
8599  if(index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size ||
8600  index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)
8601  {
8602  // Check next suballocations for BufferImageGranularity conflicts.
8603  // If conflict exists, allocation cannot be made here.
8604  if(bufferImageGranularity > 1)
8605  {
8606  for(size_t nextSuballocIndex = index1st;
8607  nextSuballocIndex < suballocations1st.size();
8608  nextSuballocIndex++)
8609  {
8610  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8611  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8612  {
8613  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8614  {
8615  return false;
8616  }
8617  }
8618  else
8619  {
8620  // Already on next page.
8621  break;
8622  }
8623  }
8624  }
8625 
8626  // All tests passed: Success.
8627  pAllocationRequest->offset = resultOffset;
8628  pAllocationRequest->sumFreeSize =
8629  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8630  - resultBaseOffset
8631  - pAllocationRequest->sumItemSize;
8632  // pAllocationRequest->item unused.
8633  return true;
8634  }
8635  }
8636  }
8637 
8638  return false;
8639 }
8640 
8641 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8642  uint32_t currentFrameIndex,
8643  uint32_t frameInUseCount,
8644  VmaAllocationRequest* pAllocationRequest)
8645 {
8646  if(pAllocationRequest->itemsToMakeLostCount == 0)
8647  {
8648  return true;
8649  }
8650 
8651  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8652 
8653  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8654  size_t index1st = m_1stNullItemsBeginCount;
8655  size_t madeLostCount = 0;
8656  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8657  {
8658  VMA_ASSERT(index1st < suballocations1st.size());
8659  VmaSuballocation& suballoc = suballocations1st[index1st];
8660  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8661  {
8662  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8663  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8664  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8665  {
8666  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8667  suballoc.hAllocation = VK_NULL_HANDLE;
8668  m_SumFreeSize += suballoc.size;
8669  ++m_1stNullItemsMiddleCount;
8670  ++madeLostCount;
8671  }
8672  else
8673  {
8674  return false;
8675  }
8676  }
8677  ++index1st;
8678  }
8679 
8680  CleanupAfterFree();
8681  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8682 
8683  return true;
8684 }
8685 
8686 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8687 {
8688  uint32_t lostAllocationCount = 0;
8689 
8690  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8691  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8692  {
8693  VmaSuballocation& suballoc = suballocations1st[i];
8694  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8695  suballoc.hAllocation->CanBecomeLost() &&
8696  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8697  {
8698  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8699  suballoc.hAllocation = VK_NULL_HANDLE;
8700  ++m_1stNullItemsMiddleCount;
8701  m_SumFreeSize += suballoc.size;
8702  ++lostAllocationCount;
8703  }
8704  }
8705 
8706  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8707  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8708  {
8709  VmaSuballocation& suballoc = suballocations2nd[i];
8710  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8711  suballoc.hAllocation->CanBecomeLost() &&
8712  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8713  {
8714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8715  suballoc.hAllocation = VK_NULL_HANDLE;
8716  ++m_2ndNullItemsCount;
8717  ++lostAllocationCount;
8718  }
8719  }
8720 
8721  if(lostAllocationCount)
8722  {
8723  CleanupAfterFree();
8724  }
8725 
8726  return lostAllocationCount;
8727 }
8728 
8729 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8730 {
8731  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8732  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8733  {
8734  const VmaSuballocation& suballoc = suballocations1st[i];
8735  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8736  {
8737  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8738  {
8739  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8740  return VK_ERROR_VALIDATION_FAILED_EXT;
8741  }
8742  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8743  {
8744  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8745  return VK_ERROR_VALIDATION_FAILED_EXT;
8746  }
8747  }
8748  }
8749 
8750  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8751  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8752  {
8753  const VmaSuballocation& suballoc = suballocations2nd[i];
8754  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8755  {
8756  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8757  {
8758  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8759  return VK_ERROR_VALIDATION_FAILED_EXT;
8760  }
8761  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8762  {
8763  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8764  return VK_ERROR_VALIDATION_FAILED_EXT;
8765  }
8766  }
8767  }
8768 
8769  return VK_SUCCESS;
8770 }
8771 
8772 void VmaBlockMetadata_Linear::Alloc(
8773  const VmaAllocationRequest& request,
8774  VmaSuballocationType type,
8775  VkDeviceSize allocSize,
8776  bool upperAddress,
8777  VmaAllocation hAllocation)
8778 {
8779  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
8780 
8781  if(upperAddress)
8782  {
8783  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
8784  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
8785  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8786  suballocations2nd.push_back(newSuballoc);
8787  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
8788  }
8789  else
8790  {
8791  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8792 
8793  // First allocation.
8794  if(suballocations1st.empty())
8795  {
8796  suballocations1st.push_back(newSuballoc);
8797  }
8798  else
8799  {
8800  // New allocation at the end of 1st vector.
8801  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
8802  {
8803  // Check if it fits before the end of the block.
8804  VMA_ASSERT(request.offset + allocSize <= GetSize());
8805  suballocations1st.push_back(newSuballoc);
8806  }
8807  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
8808  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
8809  {
8810  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8811 
8812  switch(m_2ndVectorMode)
8813  {
8814  case SECOND_VECTOR_EMPTY:
8815  // First allocation from second part ring buffer.
8816  VMA_ASSERT(suballocations2nd.empty());
8817  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
8818  break;
8819  case SECOND_VECTOR_RING_BUFFER:
8820  // 2-part ring buffer is already started.
8821  VMA_ASSERT(!suballocations2nd.empty());
8822  break;
8823  case SECOND_VECTOR_DOUBLE_STACK:
8824  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
8825  break;
8826  default:
8827  VMA_ASSERT(0);
8828  }
8829 
8830  suballocations2nd.push_back(newSuballoc);
8831  }
8832  else
8833  {
8834  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
8835  }
8836  }
8837  }
8838 
8839  m_SumFreeSize -= newSuballoc.size;
8840 }
8841 
8842 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
8843 {
8844  FreeAtOffset(allocation->GetOffset());
8845 }
8846 
8847 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
8848 {
8849  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8850  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8851 
8852  if(!suballocations1st.empty())
8853  {
8854  // First allocation: Mark it as next empty at the beginning.
8855  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8856  if(firstSuballoc.offset == offset)
8857  {
8858  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8859  firstSuballoc.hAllocation = VK_NULL_HANDLE;
8860  m_SumFreeSize += firstSuballoc.size;
8861  ++m_1stNullItemsBeginCount;
8862  CleanupAfterFree();
8863  return;
8864  }
8865  }
8866 
8867  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
8868  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
8869  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8870  {
8871  VmaSuballocation& lastSuballoc = suballocations2nd.back();
8872  if(lastSuballoc.offset == offset)
8873  {
8874  m_SumFreeSize += lastSuballoc.size;
8875  suballocations2nd.pop_back();
8876  CleanupAfterFree();
8877  return;
8878  }
8879  }
8880  // Last allocation in 1st vector.
8881  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
8882  {
8883  VmaSuballocation& lastSuballoc = suballocations1st.back();
8884  if(lastSuballoc.offset == offset)
8885  {
8886  m_SumFreeSize += lastSuballoc.size;
8887  suballocations1st.pop_back();
8888  CleanupAfterFree();
8889  return;
8890  }
8891  }
8892 
8893  // Item from the middle of 1st vector.
8894  {
8895  VmaSuballocation refSuballoc;
8896  refSuballoc.offset = offset;
8897  // Rest of members stays uninitialized intentionally for better performance.
8898  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
8899  suballocations1st.begin() + m_1stNullItemsBeginCount,
8900  suballocations1st.end(),
8901  refSuballoc);
8902  if(it != suballocations1st.end())
8903  {
8904  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8905  it->hAllocation = VK_NULL_HANDLE;
8906  ++m_1stNullItemsMiddleCount;
8907  m_SumFreeSize += it->size;
8908  CleanupAfterFree();
8909  return;
8910  }
8911  }
8912 
8913  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8914  {
8915  // Item from the middle of 2nd vector.
8916  VmaSuballocation refSuballoc;
8917  refSuballoc.offset = offset;
8918  // Rest of members stays uninitialized intentionally for better performance.
8919  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8920  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
8921  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
8922  if(it != suballocations2nd.end())
8923  {
8924  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8925  it->hAllocation = VK_NULL_HANDLE;
8926  ++m_2ndNullItemsCount;
8927  m_SumFreeSize += it->size;
8928  CleanupAfterFree();
8929  return;
8930  }
8931  }
8932 
8933  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
8934 }
8935 
8936 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
8937 {
8938  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8939  const size_t suballocCount = AccessSuballocations1st().size();
8940  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
8941 }
8942 
8943 void VmaBlockMetadata_Linear::CleanupAfterFree()
8944 {
8945  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8946  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8947 
8948  if(IsEmpty())
8949  {
8950  suballocations1st.clear();
8951  suballocations2nd.clear();
8952  m_1stNullItemsBeginCount = 0;
8953  m_1stNullItemsMiddleCount = 0;
8954  m_2ndNullItemsCount = 0;
8955  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8956  }
8957  else
8958  {
8959  const size_t suballoc1stCount = suballocations1st.size();
8960  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8961  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
8962 
8963  // Find more null items at the beginning of 1st vector.
8964  while(m_1stNullItemsBeginCount < suballoc1stCount &&
8965  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
8966  {
8967  ++m_1stNullItemsBeginCount;
8968  --m_1stNullItemsMiddleCount;
8969  }
8970 
8971  // Find more null items at the end of 1st vector.
8972  while(m_1stNullItemsMiddleCount > 0 &&
8973  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
8974  {
8975  --m_1stNullItemsMiddleCount;
8976  suballocations1st.pop_back();
8977  }
8978 
8979  // Find more null items at the end of 2nd vector.
8980  while(m_2ndNullItemsCount > 0 &&
8981  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
8982  {
8983  --m_2ndNullItemsCount;
8984  suballocations2nd.pop_back();
8985  }
8986 
8987  if(ShouldCompact1st())
8988  {
8989  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
8990  size_t srcIndex = m_1stNullItemsBeginCount;
8991  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
8992  {
8993  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
8994  {
8995  ++srcIndex;
8996  }
8997  if(dstIndex != srcIndex)
8998  {
8999  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9000  }
9001  ++srcIndex;
9002  }
9003  suballocations1st.resize(nonNullItemCount);
9004  m_1stNullItemsBeginCount = 0;
9005  m_1stNullItemsMiddleCount = 0;
9006  }
9007 
9008  // 2nd vector became empty.
9009  if(suballocations2nd.empty())
9010  {
9011  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9012  }
9013 
9014  // 1st vector became empty.
9015  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9016  {
9017  suballocations1st.clear();
9018  m_1stNullItemsBeginCount = 0;
9019 
9020  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9021  {
9022  // Swap 1st with 2nd. Now 2nd is empty.
9023  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9024  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9025  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9026  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9027  {
9028  ++m_1stNullItemsBeginCount;
9029  --m_1stNullItemsMiddleCount;
9030  }
9031  m_2ndNullItemsCount = 0;
9032  m_1stVectorIndex ^= 1;
9033  }
9034  }
9035  }
9036 
9037  VMA_HEAVY_ASSERT(Validate());
9038 }
9039 
9040 
9042 // class VmaDeviceMemoryBlock
9043 
9044 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9045  m_pMetadata(VMA_NULL),
9046  m_MemoryTypeIndex(UINT32_MAX),
9047  m_Id(0),
9048  m_hMemory(VK_NULL_HANDLE),
9049  m_MapCount(0),
9050  m_pMappedData(VMA_NULL)
9051 {
9052 }
9053 
9054 void VmaDeviceMemoryBlock::Init(
9055  VmaAllocator hAllocator,
9056  uint32_t newMemoryTypeIndex,
9057  VkDeviceMemory newMemory,
9058  VkDeviceSize newSize,
9059  uint32_t id,
9060  bool linearAlgorithm)
9061 {
9062  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9063 
9064  m_MemoryTypeIndex = newMemoryTypeIndex;
9065  m_Id = id;
9066  m_hMemory = newMemory;
9067 
9068  if(linearAlgorithm)
9069  {
9070  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9071  }
9072  else
9073  {
9074  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9075  }
9076  m_pMetadata->Init(newSize);
9077 }
9078 
9079 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9080 {
9081  // This is the most important assert in the entire library.
9082  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9083  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9084 
9085  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9086  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9087  m_hMemory = VK_NULL_HANDLE;
9088 
9089  vma_delete(allocator, m_pMetadata);
9090  m_pMetadata = VMA_NULL;
9091 }
9092 
9093 bool VmaDeviceMemoryBlock::Validate() const
9094 {
9095  if((m_hMemory == VK_NULL_HANDLE) ||
9096  (m_pMetadata->GetSize() == 0))
9097  {
9098  return false;
9099  }
9100 
9101  return m_pMetadata->Validate();
9102 }
9103 
9104 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9105 {
9106  void* pData = nullptr;
9107  VkResult res = Map(hAllocator, 1, &pData);
9108  if(res != VK_SUCCESS)
9109  {
9110  return res;
9111  }
9112 
9113  res = m_pMetadata->CheckCorruption(pData);
9114 
9115  Unmap(hAllocator, 1);
9116 
9117  return res;
9118 }
9119 
9120 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9121 {
9122  if(count == 0)
9123  {
9124  return VK_SUCCESS;
9125  }
9126 
9127  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9128  if(m_MapCount != 0)
9129  {
9130  m_MapCount += count;
9131  VMA_ASSERT(m_pMappedData != VMA_NULL);
9132  if(ppData != VMA_NULL)
9133  {
9134  *ppData = m_pMappedData;
9135  }
9136  return VK_SUCCESS;
9137  }
9138  else
9139  {
9140  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9141  hAllocator->m_hDevice,
9142  m_hMemory,
9143  0, // offset
9144  VK_WHOLE_SIZE,
9145  0, // flags
9146  &m_pMappedData);
9147  if(result == VK_SUCCESS)
9148  {
9149  if(ppData != VMA_NULL)
9150  {
9151  *ppData = m_pMappedData;
9152  }
9153  m_MapCount = count;
9154  }
9155  return result;
9156  }
9157 }
9158 
9159 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9160 {
9161  if(count == 0)
9162  {
9163  return;
9164  }
9165 
9166  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9167  if(m_MapCount >= count)
9168  {
9169  m_MapCount -= count;
9170  if(m_MapCount == 0)
9171  {
9172  m_pMappedData = VMA_NULL;
9173  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9174  }
9175  }
9176  else
9177  {
9178  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
9179  }
9180 }
9181 
9182 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9183 {
9184  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9185  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9186 
9187  void* pData;
9188  VkResult res = Map(hAllocator, 1, &pData);
9189  if(res != VK_SUCCESS)
9190  {
9191  return res;
9192  }
9193 
9194  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
9195  VmaWriteMagicValue(pData, allocOffset + allocSize);
9196 
9197  Unmap(hAllocator, 1);
9198 
9199  return VK_SUCCESS;
9200 }
9201 
9202 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9203 {
9204  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9205  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9206 
9207  void* pData;
9208  VkResult res = Map(hAllocator, 1, &pData);
9209  if(res != VK_SUCCESS)
9210  {
9211  return res;
9212  }
9213 
9214  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
9215  {
9216  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
9217  }
9218  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
9219  {
9220  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
9221  }
9222 
9223  Unmap(hAllocator, 1);
9224 
9225  return VK_SUCCESS;
9226 }
9227 
9228 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
9229  const VmaAllocator hAllocator,
9230  const VmaAllocation hAllocation,
9231  VkBuffer hBuffer)
9232 {
9233  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9234  hAllocation->GetBlock() == this);
9235  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9236  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9237  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
9238  hAllocator->m_hDevice,
9239  hBuffer,
9240  m_hMemory,
9241  hAllocation->GetOffset());
9242 }
9243 
9244 VkResult VmaDeviceMemoryBlock::BindImageMemory(
9245  const VmaAllocator hAllocator,
9246  const VmaAllocation hAllocation,
9247  VkImage hImage)
9248 {
9249  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9250  hAllocation->GetBlock() == this);
9251  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9252  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9253  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
9254  hAllocator->m_hDevice,
9255  hImage,
9256  m_hMemory,
9257  hAllocation->GetOffset());
9258 }
9259 
9260 static void InitStatInfo(VmaStatInfo& outInfo)
9261 {
9262  memset(&outInfo, 0, sizeof(outInfo));
9263  outInfo.allocationSizeMin = UINT64_MAX;
9264  outInfo.unusedRangeSizeMin = UINT64_MAX;
9265 }
9266 
9267 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
9268 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
9269 {
9270  inoutInfo.blockCount += srcInfo.blockCount;
9271  inoutInfo.allocationCount += srcInfo.allocationCount;
9272  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
9273  inoutInfo.usedBytes += srcInfo.usedBytes;
9274  inoutInfo.unusedBytes += srcInfo.unusedBytes;
9275  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
9276  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
9277  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
9278  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
9279 }
9280 
9281 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
9282 {
9283  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
9284  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
9285  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
9286  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
9287 }
9288 
9289 VmaPool_T::VmaPool_T(
9290  VmaAllocator hAllocator,
9291  const VmaPoolCreateInfo& createInfo) :
9292  m_BlockVector(
9293  hAllocator,
9294  createInfo.memoryTypeIndex,
9295  createInfo.blockSize,
9296  createInfo.minBlockCount,
9297  createInfo.maxBlockCount,
9298  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
9299  createInfo.frameInUseCount,
9300  true, // isCustomPool
9301  (createInfo.flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0), // linearAlgorithm
9302  m_Id(0)
9303 {
9304 }
9305 
9306 VmaPool_T::~VmaPool_T()
9307 {
9308 }
9309 
9310 #if VMA_STATS_STRING_ENABLED
9311 
9312 #endif // #if VMA_STATS_STRING_ENABLED
9313 
9314 VmaBlockVector::VmaBlockVector(
9315  VmaAllocator hAllocator,
9316  uint32_t memoryTypeIndex,
9317  VkDeviceSize preferredBlockSize,
9318  size_t minBlockCount,
9319  size_t maxBlockCount,
9320  VkDeviceSize bufferImageGranularity,
9321  uint32_t frameInUseCount,
9322  bool isCustomPool,
9323  bool linearAlgorithm) :
9324  m_hAllocator(hAllocator),
9325  m_MemoryTypeIndex(memoryTypeIndex),
9326  m_PreferredBlockSize(preferredBlockSize),
9327  m_MinBlockCount(minBlockCount),
9328  m_MaxBlockCount(maxBlockCount),
9329  m_BufferImageGranularity(bufferImageGranularity),
9330  m_FrameInUseCount(frameInUseCount),
9331  m_IsCustomPool(isCustomPool),
9332  m_LinearAlgorithm(linearAlgorithm),
9333  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
9334  m_HasEmptyBlock(false),
9335  m_pDefragmentator(VMA_NULL),
9336  m_NextBlockId(0)
9337 {
9338 }
9339 
9340 VmaBlockVector::~VmaBlockVector()
9341 {
9342  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
9343 
9344  for(size_t i = m_Blocks.size(); i--; )
9345  {
9346  m_Blocks[i]->Destroy(m_hAllocator);
9347  vma_delete(m_hAllocator, m_Blocks[i]);
9348  }
9349 }
9350 
9351 VkResult VmaBlockVector::CreateMinBlocks()
9352 {
9353  for(size_t i = 0; i < m_MinBlockCount; ++i)
9354  {
9355  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
9356  if(res != VK_SUCCESS)
9357  {
9358  return res;
9359  }
9360  }
9361  return VK_SUCCESS;
9362 }
9363 
9364 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
9365 {
9366  pStats->size = 0;
9367  pStats->unusedSize = 0;
9368  pStats->allocationCount = 0;
9369  pStats->unusedRangeCount = 0;
9370  pStats->unusedRangeSizeMax = 0;
9371 
9372  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9373 
9374  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
9375  {
9376  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
9377  VMA_ASSERT(pBlock);
9378  VMA_HEAVY_ASSERT(pBlock->Validate());
9379  pBlock->m_pMetadata->AddPoolStats(*pStats);
9380  }
9381 }
9382 
9383 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
9384 {
9385  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
9386  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
9387  (VMA_DEBUG_MARGIN > 0) &&
9388  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
9389 }
9390 
9391 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
9392 
9393 VkResult VmaBlockVector::Allocate(
9394  VmaPool hCurrentPool,
9395  uint32_t currentFrameIndex,
9396  VkDeviceSize size,
9397  VkDeviceSize alignment,
9398  const VmaAllocationCreateInfo& createInfo,
9399  VmaSuballocationType suballocType,
9400  VmaAllocation* pAllocation)
9401 {
9402  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
9403  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
9404  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
9405  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
9406  const bool canCreateNewBlock =
9407  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
9408  (m_Blocks.size() < m_MaxBlockCount);
9409 
9410  // Upper address can only be used with linear allocator.
9411  if(isUpperAddress && !m_LinearAlgorithm)
9412  {
9413  return VK_ERROR_FEATURE_NOT_PRESENT;
9414  }
9415 
9416  // Early reject: requested allocation size is larger that maximum block size for this block vector.
9417  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
9418  {
9419  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9420  }
9421 
9422  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9423 
9424  /*
9425  Under certain condition, this whole section can be skipped for optimization, so
9426  we move on directly to trying to allocate with canMakeOtherLost. That's the case
9427  e.g. for custom pools with linear algorithm.
9428  */
9429  if(!canMakeOtherLost || canCreateNewBlock)
9430  {
9431  // 1. Search existing allocations. Try to allocate without making other allocations lost.
9432  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9433  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9434  {
9435  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9436  VMA_ASSERT(pCurrBlock);
9437  VmaAllocationRequest currRequest = {};
9438  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
9439  currentFrameIndex,
9440  m_FrameInUseCount,
9441  m_BufferImageGranularity,
9442  size,
9443  alignment,
9444  isUpperAddress,
9445  suballocType,
9446  false, // canMakeOtherLost
9447  &currRequest))
9448  {
9449  // Allocate from pCurrBlock.
9450  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
9451 
9452  if(mapped)
9453  {
9454  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
9455  if(res != VK_SUCCESS)
9456  {
9457  return res;
9458  }
9459  }
9460 
9461  // We no longer have an empty Allocation.
9462  if(pCurrBlock->m_pMetadata->IsEmpty())
9463  {
9464  m_HasEmptyBlock = false;
9465  }
9466 
9467  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9468  pCurrBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
9469  (*pAllocation)->InitBlockAllocation(
9470  hCurrentPool,
9471  pCurrBlock,
9472  currRequest.offset,
9473  alignment,
9474  size,
9475  suballocType,
9476  mapped,
9477  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9478  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
9479  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
9480  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9481  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9482  {
9483  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9484  }
9485  if(IsCorruptionDetectionEnabled())
9486  {
9487  VkResult res = pCurrBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
9488  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9489  }
9490  return VK_SUCCESS;
9491  }
9492  }
9493 
9494  // 2. Try to create new block.
9495  if(canCreateNewBlock)
9496  {
9497  // Calculate optimal size for new block.
9498  VkDeviceSize newBlockSize = m_PreferredBlockSize;
9499  uint32_t newBlockSizeShift = 0;
9500  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
9501 
9502  // Allocating blocks of other sizes is allowed only in default pools.
9503  // In custom pools block size is fixed.
9504  if(m_IsCustomPool == false)
9505  {
9506  // Allocate 1/8, 1/4, 1/2 as first blocks.
9507  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
9508  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
9509  {
9510  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9511  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
9512  {
9513  newBlockSize = smallerNewBlockSize;
9514  ++newBlockSizeShift;
9515  }
9516  else
9517  {
9518  break;
9519  }
9520  }
9521  }
9522 
9523  size_t newBlockIndex = 0;
9524  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
9525  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
9526  if(m_IsCustomPool == false)
9527  {
9528  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
9529  {
9530  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9531  if(smallerNewBlockSize >= size)
9532  {
9533  newBlockSize = smallerNewBlockSize;
9534  ++newBlockSizeShift;
9535  res = CreateBlock(newBlockSize, &newBlockIndex);
9536  }
9537  else
9538  {
9539  break;
9540  }
9541  }
9542  }
9543 
9544  if(res == VK_SUCCESS)
9545  {
9546  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
9547  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
9548 
9549  if(mapped)
9550  {
9551  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
9552  if(res != VK_SUCCESS)
9553  {
9554  return res;
9555  }
9556  }
9557 
9558  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
9559  VmaAllocationRequest allocRequest;
9560  if(pBlock->m_pMetadata->CreateAllocationRequest(
9561  currentFrameIndex,
9562  m_FrameInUseCount,
9563  m_BufferImageGranularity,
9564  size,
9565  alignment,
9566  isUpperAddress,
9567  suballocType,
9568  false, // canMakeOtherLost
9569  &allocRequest))
9570  {
9571  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9572  pBlock->m_pMetadata->Alloc(allocRequest, suballocType, size, isUpperAddress, *pAllocation);
9573  (*pAllocation)->InitBlockAllocation(
9574  hCurrentPool,
9575  pBlock,
9576  allocRequest.offset,
9577  alignment,
9578  size,
9579  suballocType,
9580  mapped,
9581  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9582  VMA_HEAVY_ASSERT(pBlock->Validate());
9583  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
9584  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9585  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9586  {
9587  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9588  }
9589  if(IsCorruptionDetectionEnabled())
9590  {
9591  res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, allocRequest.offset, size);
9592  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9593  }
9594  return VK_SUCCESS;
9595  }
9596  else
9597  {
9598  // Allocation from empty block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
9599  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9600  }
9601  }
9602  }
9603  }
9604 
9605  // 3. Try to allocate from existing blocks with making other allocations lost.
9606  if(canMakeOtherLost)
9607  {
9608  uint32_t tryIndex = 0;
9609  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
9610  {
9611  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
9612  VmaAllocationRequest bestRequest = {};
9613  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
9614 
9615  // 1. Search existing allocations.
9616  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9617  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9618  {
9619  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9620  VMA_ASSERT(pCurrBlock);
9621  VmaAllocationRequest currRequest = {};
9622  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
9623  currentFrameIndex,
9624  m_FrameInUseCount,
9625  m_BufferImageGranularity,
9626  size,
9627  alignment,
9628  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
9629  suballocType,
9630  canMakeOtherLost,
9631  &currRequest))
9632  {
9633  const VkDeviceSize currRequestCost = currRequest.CalcCost();
9634  if(pBestRequestBlock == VMA_NULL ||
9635  currRequestCost < bestRequestCost)
9636  {
9637  pBestRequestBlock = pCurrBlock;
9638  bestRequest = currRequest;
9639  bestRequestCost = currRequestCost;
9640 
9641  if(bestRequestCost == 0)
9642  {
9643  break;
9644  }
9645  }
9646  }
9647  }
9648 
9649  if(pBestRequestBlock != VMA_NULL)
9650  {
9651  if(mapped)
9652  {
9653  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
9654  if(res != VK_SUCCESS)
9655  {
9656  return res;
9657  }
9658  }
9659 
9660  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
9661  currentFrameIndex,
9662  m_FrameInUseCount,
9663  &bestRequest))
9664  {
9665  // We no longer have an empty Allocation.
9666  if(pBestRequestBlock->m_pMetadata->IsEmpty())
9667  {
9668  m_HasEmptyBlock = false;
9669  }
9670  // Allocate from this pBlock.
9671  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9672  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
9673  (*pAllocation)->InitBlockAllocation(
9674  hCurrentPool,
9675  pBestRequestBlock,
9676  bestRequest.offset,
9677  alignment,
9678  size,
9679  suballocType,
9680  mapped,
9681  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9682  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
9683  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
9684  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9685  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9686  {
9687  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9688  }
9689  if(IsCorruptionDetectionEnabled())
9690  {
9691  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
9692  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9693  }
9694  return VK_SUCCESS;
9695  }
9696  // else: Some allocations must have been touched while we are here. Next try.
9697  }
9698  else
9699  {
9700  // Could not find place in any of the blocks - break outer loop.
9701  break;
9702  }
9703  }
9704  /* Maximum number of tries exceeded - a very unlike event when many other
9705  threads are simultaneously touching allocations making it impossible to make
9706  lost at the same time as we try to allocate. */
9707  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
9708  {
9709  return VK_ERROR_TOO_MANY_OBJECTS;
9710  }
9711  }
9712 
9713  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9714 }
9715 
9716 void VmaBlockVector::Free(
9717  VmaAllocation hAllocation)
9718 {
9719  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
9720 
9721  // Scope for lock.
9722  {
9723  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9724 
9725  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
9726 
9727  if(IsCorruptionDetectionEnabled())
9728  {
9729  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
9730  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
9731  }
9732 
9733  if(hAllocation->IsPersistentMap())
9734  {
9735  pBlock->Unmap(m_hAllocator, 1);
9736  }
9737 
9738  pBlock->m_pMetadata->Free(hAllocation);
9739  VMA_HEAVY_ASSERT(pBlock->Validate());
9740 
9741  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
9742 
9743  // pBlock became empty after this deallocation.
9744  if(pBlock->m_pMetadata->IsEmpty())
9745  {
9746  // Already has empty Allocation. We don't want to have two, so delete this one.
9747  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
9748  {
9749  pBlockToDelete = pBlock;
9750  Remove(pBlock);
9751  }
9752  // We now have first empty block.
9753  else
9754  {
9755  m_HasEmptyBlock = true;
9756  }
9757  }
9758  // pBlock didn't become empty, but we have another empty block - find and free that one.
9759  // (This is optional, heuristics.)
9760  else if(m_HasEmptyBlock)
9761  {
9762  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
9763  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
9764  {
9765  pBlockToDelete = pLastBlock;
9766  m_Blocks.pop_back();
9767  m_HasEmptyBlock = false;
9768  }
9769  }
9770 
9771  IncrementallySortBlocks();
9772  }
9773 
9774  // Destruction of a free Allocation. Deferred until this point, outside of mutex
9775  // lock, for performance reason.
9776  if(pBlockToDelete != VMA_NULL)
9777  {
9778  VMA_DEBUG_LOG(" Deleted empty allocation");
9779  pBlockToDelete->Destroy(m_hAllocator);
9780  vma_delete(m_hAllocator, pBlockToDelete);
9781  }
9782 }
9783 
9784 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
9785 {
9786  VkDeviceSize result = 0;
9787  for(size_t i = m_Blocks.size(); i--; )
9788  {
9789  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
9790  if(result >= m_PreferredBlockSize)
9791  {
9792  break;
9793  }
9794  }
9795  return result;
9796 }
9797 
9798 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
9799 {
9800  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
9801  {
9802  if(m_Blocks[blockIndex] == pBlock)
9803  {
9804  VmaVectorRemove(m_Blocks, blockIndex);
9805  return;
9806  }
9807  }
9808  VMA_ASSERT(0);
9809 }
9810 
9811 void VmaBlockVector::IncrementallySortBlocks()
9812 {
9813  // Bubble sort only until first swap.
9814  for(size_t i = 1; i < m_Blocks.size(); ++i)
9815  {
9816  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
9817  {
9818  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
9819  return;
9820  }
9821  }
9822 }
9823 
9824 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
9825 {
9826  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
9827  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
9828  allocInfo.allocationSize = blockSize;
9829  VkDeviceMemory mem = VK_NULL_HANDLE;
9830  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
9831  if(res < 0)
9832  {
9833  return res;
9834  }
9835 
9836  // New VkDeviceMemory successfully created.
9837 
9838  // Create new Allocation for it.
9839  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
9840  pBlock->Init(
9841  m_hAllocator,
9842  m_MemoryTypeIndex,
9843  mem,
9844  allocInfo.allocationSize,
9845  m_NextBlockId++,
9846  m_LinearAlgorithm);
9847 
9848  m_Blocks.push_back(pBlock);
9849  if(pNewBlockIndex != VMA_NULL)
9850  {
9851  *pNewBlockIndex = m_Blocks.size() - 1;
9852  }
9853 
9854  return VK_SUCCESS;
9855 }
9856 
9857 #if VMA_STATS_STRING_ENABLED
9858 
9859 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
9860 {
9861  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9862 
9863  json.BeginObject();
9864 
9865  if(m_IsCustomPool)
9866  {
9867  json.WriteString("MemoryTypeIndex");
9868  json.WriteNumber(m_MemoryTypeIndex);
9869 
9870  json.WriteString("BlockSize");
9871  json.WriteNumber(m_PreferredBlockSize);
9872 
9873  json.WriteString("BlockCount");
9874  json.BeginObject(true);
9875  if(m_MinBlockCount > 0)
9876  {
9877  json.WriteString("Min");
9878  json.WriteNumber((uint64_t)m_MinBlockCount);
9879  }
9880  if(m_MaxBlockCount < SIZE_MAX)
9881  {
9882  json.WriteString("Max");
9883  json.WriteNumber((uint64_t)m_MaxBlockCount);
9884  }
9885  json.WriteString("Cur");
9886  json.WriteNumber((uint64_t)m_Blocks.size());
9887  json.EndObject();
9888 
9889  if(m_FrameInUseCount > 0)
9890  {
9891  json.WriteString("FrameInUseCount");
9892  json.WriteNumber(m_FrameInUseCount);
9893  }
9894 
9895  if(m_LinearAlgorithm)
9896  {
9897  json.WriteString("LinearAlgorithm");
9898  json.WriteBool(true);
9899  }
9900  }
9901  else
9902  {
9903  json.WriteString("PreferredBlockSize");
9904  json.WriteNumber(m_PreferredBlockSize);
9905  }
9906 
9907  json.WriteString("Blocks");
9908  json.BeginObject();
9909  for(size_t i = 0; i < m_Blocks.size(); ++i)
9910  {
9911  json.BeginString();
9912  json.ContinueString(m_Blocks[i]->GetId());
9913  json.EndString();
9914 
9915  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
9916  }
9917  json.EndObject();
9918 
9919  json.EndObject();
9920 }
9921 
9922 #endif // #if VMA_STATS_STRING_ENABLED
9923 
9924 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
9925  VmaAllocator hAllocator,
9926  uint32_t currentFrameIndex)
9927 {
9928  if(m_pDefragmentator == VMA_NULL)
9929  {
9930  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
9931  hAllocator,
9932  this,
9933  currentFrameIndex);
9934  }
9935 
9936  return m_pDefragmentator;
9937 }
9938 
9939 VkResult VmaBlockVector::Defragment(
9940  VmaDefragmentationStats* pDefragmentationStats,
9941  VkDeviceSize& maxBytesToMove,
9942  uint32_t& maxAllocationsToMove)
9943 {
9944  if(m_pDefragmentator == VMA_NULL)
9945  {
9946  return VK_SUCCESS;
9947  }
9948 
9949  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9950 
9951  // Defragment.
9952  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
9953 
9954  // Accumulate statistics.
9955  if(pDefragmentationStats != VMA_NULL)
9956  {
9957  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
9958  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
9959  pDefragmentationStats->bytesMoved += bytesMoved;
9960  pDefragmentationStats->allocationsMoved += allocationsMoved;
9961  VMA_ASSERT(bytesMoved <= maxBytesToMove);
9962  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
9963  maxBytesToMove -= bytesMoved;
9964  maxAllocationsToMove -= allocationsMoved;
9965  }
9966 
9967  // Free empty blocks.
9968  m_HasEmptyBlock = false;
9969  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
9970  {
9971  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
9972  if(pBlock->m_pMetadata->IsEmpty())
9973  {
9974  if(m_Blocks.size() > m_MinBlockCount)
9975  {
9976  if(pDefragmentationStats != VMA_NULL)
9977  {
9978  ++pDefragmentationStats->deviceMemoryBlocksFreed;
9979  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
9980  }
9981 
9982  VmaVectorRemove(m_Blocks, blockIndex);
9983  pBlock->Destroy(m_hAllocator);
9984  vma_delete(m_hAllocator, pBlock);
9985  }
9986  else
9987  {
9988  m_HasEmptyBlock = true;
9989  }
9990  }
9991  }
9992 
9993  return result;
9994 }
9995 
9996 void VmaBlockVector::DestroyDefragmentator()
9997 {
9998  if(m_pDefragmentator != VMA_NULL)
9999  {
10000  vma_delete(m_hAllocator, m_pDefragmentator);
10001  m_pDefragmentator = VMA_NULL;
10002  }
10003 }
10004 
10005 void VmaBlockVector::MakePoolAllocationsLost(
10006  uint32_t currentFrameIndex,
10007  size_t* pLostAllocationCount)
10008 {
10009  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10010  size_t lostAllocationCount = 0;
10011  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10012  {
10013  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10014  VMA_ASSERT(pBlock);
10015  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10016  }
10017  if(pLostAllocationCount != VMA_NULL)
10018  {
10019  *pLostAllocationCount = lostAllocationCount;
10020  }
10021 }
10022 
10023 VkResult VmaBlockVector::CheckCorruption()
10024 {
10025  if(!IsCorruptionDetectionEnabled())
10026  {
10027  return VK_ERROR_FEATURE_NOT_PRESENT;
10028  }
10029 
10030  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10031  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10032  {
10033  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10034  VMA_ASSERT(pBlock);
10035  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10036  if(res != VK_SUCCESS)
10037  {
10038  return res;
10039  }
10040  }
10041  return VK_SUCCESS;
10042 }
10043 
10044 void VmaBlockVector::AddStats(VmaStats* pStats)
10045 {
10046  const uint32_t memTypeIndex = m_MemoryTypeIndex;
10047  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
10048 
10049  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10050 
10051  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10052  {
10053  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10054  VMA_ASSERT(pBlock);
10055  VMA_HEAVY_ASSERT(pBlock->Validate());
10056  VmaStatInfo allocationStatInfo;
10057  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
10058  VmaAddStatInfo(pStats->total, allocationStatInfo);
10059  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
10060  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
10061  }
10062 }
10063 
10065 // VmaDefragmentator members definition
10066 
10067 VmaDefragmentator::VmaDefragmentator(
10068  VmaAllocator hAllocator,
10069  VmaBlockVector* pBlockVector,
10070  uint32_t currentFrameIndex) :
10071  m_hAllocator(hAllocator),
10072  m_pBlockVector(pBlockVector),
10073  m_CurrentFrameIndex(currentFrameIndex),
10074  m_BytesMoved(0),
10075  m_AllocationsMoved(0),
10076  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
10077  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
10078 {
10079  VMA_ASSERT(!pBlockVector->UsesLinearAlgorithm());
10080 }
10081 
10082 VmaDefragmentator::~VmaDefragmentator()
10083 {
10084  for(size_t i = m_Blocks.size(); i--; )
10085  {
10086  vma_delete(m_hAllocator, m_Blocks[i]);
10087  }
10088 }
10089 
10090 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
10091 {
10092  AllocationInfo allocInfo;
10093  allocInfo.m_hAllocation = hAlloc;
10094  allocInfo.m_pChanged = pChanged;
10095  m_Allocations.push_back(allocInfo);
10096 }
10097 
10098 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
10099 {
10100  // It has already been mapped for defragmentation.
10101  if(m_pMappedDataForDefragmentation)
10102  {
10103  *ppMappedData = m_pMappedDataForDefragmentation;
10104  return VK_SUCCESS;
10105  }
10106 
10107  // It is originally mapped.
10108  if(m_pBlock->GetMappedData())
10109  {
10110  *ppMappedData = m_pBlock->GetMappedData();
10111  return VK_SUCCESS;
10112  }
10113 
10114  // Map on first usage.
10115  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
10116  *ppMappedData = m_pMappedDataForDefragmentation;
10117  return res;
10118 }
10119 
10120 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
10121 {
10122  if(m_pMappedDataForDefragmentation != VMA_NULL)
10123  {
10124  m_pBlock->Unmap(hAllocator, 1);
10125  }
10126 }
10127 
10128 VkResult VmaDefragmentator::DefragmentRound(
10129  VkDeviceSize maxBytesToMove,
10130  uint32_t maxAllocationsToMove)
10131 {
10132  if(m_Blocks.empty())
10133  {
10134  return VK_SUCCESS;
10135  }
10136 
10137  size_t srcBlockIndex = m_Blocks.size() - 1;
10138  size_t srcAllocIndex = SIZE_MAX;
10139  for(;;)
10140  {
10141  // 1. Find next allocation to move.
10142  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
10143  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
10144  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
10145  {
10146  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
10147  {
10148  // Finished: no more allocations to process.
10149  if(srcBlockIndex == 0)
10150  {
10151  return VK_SUCCESS;
10152  }
10153  else
10154  {
10155  --srcBlockIndex;
10156  srcAllocIndex = SIZE_MAX;
10157  }
10158  }
10159  else
10160  {
10161  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
10162  }
10163  }
10164 
10165  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
10166  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
10167 
10168  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
10169  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
10170  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
10171  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
10172 
10173  // 2. Try to find new place for this allocation in preceding or current block.
10174  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
10175  {
10176  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
10177  VmaAllocationRequest dstAllocRequest;
10178  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
10179  m_CurrentFrameIndex,
10180  m_pBlockVector->GetFrameInUseCount(),
10181  m_pBlockVector->GetBufferImageGranularity(),
10182  size,
10183  alignment,
10184  false, // upperAddress
10185  suballocType,
10186  false, // canMakeOtherLost
10187  &dstAllocRequest) &&
10188  MoveMakesSense(
10189  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
10190  {
10191  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
10192 
10193  // Reached limit on number of allocations or bytes to move.
10194  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
10195  (m_BytesMoved + size > maxBytesToMove))
10196  {
10197  return VK_INCOMPLETE;
10198  }
10199 
10200  void* pDstMappedData = VMA_NULL;
10201  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
10202  if(res != VK_SUCCESS)
10203  {
10204  return res;
10205  }
10206 
10207  void* pSrcMappedData = VMA_NULL;
10208  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
10209  if(res != VK_SUCCESS)
10210  {
10211  return res;
10212  }
10213 
10214  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
10215  memcpy(
10216  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
10217  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
10218  static_cast<size_t>(size));
10219 
10220  if(VMA_DEBUG_MARGIN > 0)
10221  {
10222  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
10223  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
10224  }
10225 
10226  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
10227  dstAllocRequest,
10228  suballocType,
10229  size,
10230  false, // upperAddress
10231  allocInfo.m_hAllocation);
10232  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
10233 
10234  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
10235 
10236  if(allocInfo.m_pChanged != VMA_NULL)
10237  {
10238  *allocInfo.m_pChanged = VK_TRUE;
10239  }
10240 
10241  ++m_AllocationsMoved;
10242  m_BytesMoved += size;
10243 
10244  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
10245 
10246  break;
10247  }
10248  }
10249 
10250  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
10251 
10252  if(srcAllocIndex > 0)
10253  {
10254  --srcAllocIndex;
10255  }
10256  else
10257  {
10258  if(srcBlockIndex > 0)
10259  {
10260  --srcBlockIndex;
10261  srcAllocIndex = SIZE_MAX;
10262  }
10263  else
10264  {
10265  return VK_SUCCESS;
10266  }
10267  }
10268  }
10269 }
10270 
10271 VkResult VmaDefragmentator::Defragment(
10272  VkDeviceSize maxBytesToMove,
10273  uint32_t maxAllocationsToMove)
10274 {
10275  if(m_Allocations.empty())
10276  {
10277  return VK_SUCCESS;
10278  }
10279 
10280  // Create block info for each block.
10281  const size_t blockCount = m_pBlockVector->m_Blocks.size();
10282  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10283  {
10284  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
10285  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
10286  m_Blocks.push_back(pBlockInfo);
10287  }
10288 
10289  // Sort them by m_pBlock pointer value.
10290  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
10291 
10292  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
10293  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
10294  {
10295  AllocationInfo& allocInfo = m_Allocations[blockIndex];
10296  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
10297  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
10298  {
10299  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
10300  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
10301  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
10302  {
10303  (*it)->m_Allocations.push_back(allocInfo);
10304  }
10305  else
10306  {
10307  VMA_ASSERT(0);
10308  }
10309  }
10310  }
10311  m_Allocations.clear();
10312 
10313  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10314  {
10315  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
10316  pBlockInfo->CalcHasNonMovableAllocations();
10317  pBlockInfo->SortAllocationsBySizeDescecnding();
10318  }
10319 
10320  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
10321  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
10322 
10323  // Execute defragmentation rounds (the main part).
10324  VkResult result = VK_SUCCESS;
10325  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
10326  {
10327  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
10328  }
10329 
10330  // Unmap blocks that were mapped for defragmentation.
10331  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10332  {
10333  m_Blocks[blockIndex]->Unmap(m_hAllocator);
10334  }
10335 
10336  return result;
10337 }
10338 
10339 bool VmaDefragmentator::MoveMakesSense(
10340  size_t dstBlockIndex, VkDeviceSize dstOffset,
10341  size_t srcBlockIndex, VkDeviceSize srcOffset)
10342 {
10343  if(dstBlockIndex < srcBlockIndex)
10344  {
10345  return true;
10346  }
10347  if(dstBlockIndex > srcBlockIndex)
10348  {
10349  return false;
10350  }
10351  if(dstOffset < srcOffset)
10352  {
10353  return true;
10354  }
10355  return false;
10356 }
10357 
10359 // VmaRecorder
10360 
10361 #if VMA_RECORDING_ENABLED
10362 
10363 VmaRecorder::VmaRecorder() :
10364  m_UseMutex(true),
10365  m_Flags(0),
10366  m_File(VMA_NULL),
10367  m_Freq(INT64_MAX),
10368  m_StartCounter(INT64_MAX)
10369 {
10370 }
10371 
10372 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
10373 {
10374  m_UseMutex = useMutex;
10375  m_Flags = settings.flags;
10376 
10377  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
10378  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
10379 
10380  // Open file for writing.
10381  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
10382  if(err != 0)
10383  {
10384  return VK_ERROR_INITIALIZATION_FAILED;
10385  }
10386 
10387  // Write header.
10388  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
10389  fprintf(m_File, "%s\n", "1,3");
10390 
10391  return VK_SUCCESS;
10392 }
10393 
10394 VmaRecorder::~VmaRecorder()
10395 {
10396  if(m_File != VMA_NULL)
10397  {
10398  fclose(m_File);
10399  }
10400 }
10401 
10402 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
10403 {
10404  CallParams callParams;
10405  GetBasicParams(callParams);
10406 
10407  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10408  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
10409  Flush();
10410 }
10411 
10412 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
10413 {
10414  CallParams callParams;
10415  GetBasicParams(callParams);
10416 
10417  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10418  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
10419  Flush();
10420 }
10421 
10422 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
10423 {
10424  CallParams callParams;
10425  GetBasicParams(callParams);
10426 
10427  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10428  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
10429  createInfo.memoryTypeIndex,
10430  createInfo.flags,
10431  createInfo.blockSize,
10432  createInfo.minBlockCount,
10433  createInfo.maxBlockCount,
10434  createInfo.frameInUseCount,
10435  pool);
10436  Flush();
10437 }
10438 
10439 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
10440 {
10441  CallParams callParams;
10442  GetBasicParams(callParams);
10443 
10444  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10445  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
10446  pool);
10447  Flush();
10448 }
10449 
10450 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
10451  const VkMemoryRequirements& vkMemReq,
10452  const VmaAllocationCreateInfo& createInfo,
10453  VmaAllocation allocation)
10454 {
10455  CallParams callParams;
10456  GetBasicParams(callParams);
10457 
10458  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10459  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10460  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10461  vkMemReq.size,
10462  vkMemReq.alignment,
10463  vkMemReq.memoryTypeBits,
10464  createInfo.flags,
10465  createInfo.usage,
10466  createInfo.requiredFlags,
10467  createInfo.preferredFlags,
10468  createInfo.memoryTypeBits,
10469  createInfo.pool,
10470  allocation,
10471  userDataStr.GetString());
10472  Flush();
10473 }
10474 
10475 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
10476  const VkMemoryRequirements& vkMemReq,
10477  bool requiresDedicatedAllocation,
10478  bool prefersDedicatedAllocation,
10479  const VmaAllocationCreateInfo& createInfo,
10480  VmaAllocation allocation)
10481 {
10482  CallParams callParams;
10483  GetBasicParams(callParams);
10484 
10485  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10486  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10487  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10488  vkMemReq.size,
10489  vkMemReq.alignment,
10490  vkMemReq.memoryTypeBits,
10491  requiresDedicatedAllocation ? 1 : 0,
10492  prefersDedicatedAllocation ? 1 : 0,
10493  createInfo.flags,
10494  createInfo.usage,
10495  createInfo.requiredFlags,
10496  createInfo.preferredFlags,
10497  createInfo.memoryTypeBits,
10498  createInfo.pool,
10499  allocation,
10500  userDataStr.GetString());
10501  Flush();
10502 }
10503 
10504 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
10505  const VkMemoryRequirements& vkMemReq,
10506  bool requiresDedicatedAllocation,
10507  bool prefersDedicatedAllocation,
10508  const VmaAllocationCreateInfo& createInfo,
10509  VmaAllocation allocation)
10510 {
10511  CallParams callParams;
10512  GetBasicParams(callParams);
10513 
10514  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10515  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10516  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10517  vkMemReq.size,
10518  vkMemReq.alignment,
10519  vkMemReq.memoryTypeBits,
10520  requiresDedicatedAllocation ? 1 : 0,
10521  prefersDedicatedAllocation ? 1 : 0,
10522  createInfo.flags,
10523  createInfo.usage,
10524  createInfo.requiredFlags,
10525  createInfo.preferredFlags,
10526  createInfo.memoryTypeBits,
10527  createInfo.pool,
10528  allocation,
10529  userDataStr.GetString());
10530  Flush();
10531 }
10532 
10533 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
10534  VmaAllocation allocation)
10535 {
10536  CallParams callParams;
10537  GetBasicParams(callParams);
10538 
10539  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10540  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10541  allocation);
10542  Flush();
10543 }
10544 
10545 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
10546  VmaAllocation allocation,
10547  const void* pUserData)
10548 {
10549  CallParams callParams;
10550  GetBasicParams(callParams);
10551 
10552  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10553  UserDataString userDataStr(
10554  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
10555  pUserData);
10556  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10557  allocation,
10558  userDataStr.GetString());
10559  Flush();
10560 }
10561 
10562 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
10563  VmaAllocation allocation)
10564 {
10565  CallParams callParams;
10566  GetBasicParams(callParams);
10567 
10568  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10569  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10570  allocation);
10571  Flush();
10572 }
10573 
10574 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
10575  VmaAllocation allocation)
10576 {
10577  CallParams callParams;
10578  GetBasicParams(callParams);
10579 
10580  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10581  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10582  allocation);
10583  Flush();
10584 }
10585 
10586 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
10587  VmaAllocation allocation)
10588 {
10589  CallParams callParams;
10590  GetBasicParams(callParams);
10591 
10592  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10593  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10594  allocation);
10595  Flush();
10596 }
10597 
10598 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
10599  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10600 {
10601  CallParams callParams;
10602  GetBasicParams(callParams);
10603 
10604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10605  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10606  allocation,
10607  offset,
10608  size);
10609  Flush();
10610 }
10611 
10612 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
10613  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10614 {
10615  CallParams callParams;
10616  GetBasicParams(callParams);
10617 
10618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10619  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10620  allocation,
10621  offset,
10622  size);
10623  Flush();
10624 }
10625 
10626 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
10627  const VkBufferCreateInfo& bufCreateInfo,
10628  const VmaAllocationCreateInfo& allocCreateInfo,
10629  VmaAllocation allocation)
10630 {
10631  CallParams callParams;
10632  GetBasicParams(callParams);
10633 
10634  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10635  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10636  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10637  bufCreateInfo.flags,
10638  bufCreateInfo.size,
10639  bufCreateInfo.usage,
10640  bufCreateInfo.sharingMode,
10641  allocCreateInfo.flags,
10642  allocCreateInfo.usage,
10643  allocCreateInfo.requiredFlags,
10644  allocCreateInfo.preferredFlags,
10645  allocCreateInfo.memoryTypeBits,
10646  allocCreateInfo.pool,
10647  allocation,
10648  userDataStr.GetString());
10649  Flush();
10650 }
10651 
10652 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
10653  const VkImageCreateInfo& imageCreateInfo,
10654  const VmaAllocationCreateInfo& allocCreateInfo,
10655  VmaAllocation allocation)
10656 {
10657  CallParams callParams;
10658  GetBasicParams(callParams);
10659 
10660  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10661  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10662  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10663  imageCreateInfo.flags,
10664  imageCreateInfo.imageType,
10665  imageCreateInfo.format,
10666  imageCreateInfo.extent.width,
10667  imageCreateInfo.extent.height,
10668  imageCreateInfo.extent.depth,
10669  imageCreateInfo.mipLevels,
10670  imageCreateInfo.arrayLayers,
10671  imageCreateInfo.samples,
10672  imageCreateInfo.tiling,
10673  imageCreateInfo.usage,
10674  imageCreateInfo.sharingMode,
10675  imageCreateInfo.initialLayout,
10676  allocCreateInfo.flags,
10677  allocCreateInfo.usage,
10678  allocCreateInfo.requiredFlags,
10679  allocCreateInfo.preferredFlags,
10680  allocCreateInfo.memoryTypeBits,
10681  allocCreateInfo.pool,
10682  allocation,
10683  userDataStr.GetString());
10684  Flush();
10685 }
10686 
10687 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
10688  VmaAllocation allocation)
10689 {
10690  CallParams callParams;
10691  GetBasicParams(callParams);
10692 
10693  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10694  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
10695  allocation);
10696  Flush();
10697 }
10698 
10699 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
10700  VmaAllocation allocation)
10701 {
10702  CallParams callParams;
10703  GetBasicParams(callParams);
10704 
10705  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10706  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
10707  allocation);
10708  Flush();
10709 }
10710 
10711 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
10712  VmaAllocation allocation)
10713 {
10714  CallParams callParams;
10715  GetBasicParams(callParams);
10716 
10717  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10718  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10719  allocation);
10720  Flush();
10721 }
10722 
10723 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
10724  VmaAllocation allocation)
10725 {
10726  CallParams callParams;
10727  GetBasicParams(callParams);
10728 
10729  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10730  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
10731  allocation);
10732  Flush();
10733 }
10734 
10735 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
10736  VmaPool pool)
10737 {
10738  CallParams callParams;
10739  GetBasicParams(callParams);
10740 
10741  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10742  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
10743  pool);
10744  Flush();
10745 }
10746 
10747 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
10748 {
10749  if(pUserData != VMA_NULL)
10750  {
10751  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
10752  {
10753  m_Str = (const char*)pUserData;
10754  }
10755  else
10756  {
10757  sprintf_s(m_PtrStr, "%p", pUserData);
10758  m_Str = m_PtrStr;
10759  }
10760  }
10761  else
10762  {
10763  m_Str = "";
10764  }
10765 }
10766 
10767 void VmaRecorder::WriteConfiguration(
10768  const VkPhysicalDeviceProperties& devProps,
10769  const VkPhysicalDeviceMemoryProperties& memProps,
10770  bool dedicatedAllocationExtensionEnabled)
10771 {
10772  fprintf(m_File, "Config,Begin\n");
10773 
10774  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
10775  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
10776  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
10777  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
10778  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
10779  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
10780 
10781  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
10782  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
10783  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
10784 
10785  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
10786  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
10787  {
10788  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
10789  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
10790  }
10791  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
10792  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
10793  {
10794  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
10795  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
10796  }
10797 
10798  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
10799 
10800  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
10801  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
10802  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
10803  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
10804  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
10805  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
10806  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
10807  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
10808  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10809 
10810  fprintf(m_File, "Config,End\n");
10811 }
10812 
10813 void VmaRecorder::GetBasicParams(CallParams& outParams)
10814 {
10815  outParams.threadId = GetCurrentThreadId();
10816 
10817  LARGE_INTEGER counter;
10818  QueryPerformanceCounter(&counter);
10819  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
10820 }
10821 
10822 void VmaRecorder::Flush()
10823 {
10824  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
10825  {
10826  fflush(m_File);
10827  }
10828 }
10829 
10830 #endif // #if VMA_RECORDING_ENABLED
10831 
10833 // VmaAllocator_T
10834 
10835 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
10836  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
10837  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
10838  m_hDevice(pCreateInfo->device),
10839  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
10840  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
10841  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
10842  m_PreferredLargeHeapBlockSize(0),
10843  m_PhysicalDevice(pCreateInfo->physicalDevice),
10844  m_CurrentFrameIndex(0),
10845  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
10846  m_NextPoolId(0)
10848  ,m_pRecorder(VMA_NULL)
10849 #endif
10850 {
10851  if(VMA_DEBUG_DETECT_CORRUPTION)
10852  {
10853  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
10854  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
10855  }
10856 
10857  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
10858 
10859 #if !(VMA_DEDICATED_ALLOCATION)
10861  {
10862  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
10863  }
10864 #endif
10865 
10866  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
10867  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
10868  memset(&m_MemProps, 0, sizeof(m_MemProps));
10869 
10870  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
10871  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
10872 
10873  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
10874  {
10875  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
10876  }
10877 
10878  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
10879  {
10880  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
10881  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
10882  }
10883 
10884  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
10885 
10886  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
10887  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
10888 
10889  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
10890  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10891 
10892  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
10893  {
10894  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
10895  {
10896  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
10897  if(limit != VK_WHOLE_SIZE)
10898  {
10899  m_HeapSizeLimit[heapIndex] = limit;
10900  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
10901  {
10902  m_MemProps.memoryHeaps[heapIndex].size = limit;
10903  }
10904  }
10905  }
10906  }
10907 
10908  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
10909  {
10910  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
10911 
10912  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
10913  this,
10914  memTypeIndex,
10915  preferredBlockSize,
10916  0,
10917  SIZE_MAX,
10918  GetBufferImageGranularity(),
10919  pCreateInfo->frameInUseCount,
10920  false, // isCustomPool
10921  false); // linearAlgorithm
10922  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
10923  // becase minBlockCount is 0.
10924  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
10925 
10926  }
10927 }
10928 
10929 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
10930 {
10931  VkResult res = VK_SUCCESS;
10932 
10933  if(pCreateInfo->pRecordSettings != VMA_NULL &&
10934  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
10935  {
10936 #if VMA_RECORDING_ENABLED
10937  m_pRecorder = vma_new(this, VmaRecorder)();
10938  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
10939  if(res != VK_SUCCESS)
10940  {
10941  return res;
10942  }
10943  m_pRecorder->WriteConfiguration(
10944  m_PhysicalDeviceProperties,
10945  m_MemProps,
10946  m_UseKhrDedicatedAllocation);
10947  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
10948 #else
10949  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
10950  return VK_ERROR_FEATURE_NOT_PRESENT;
10951 #endif
10952  }
10953 
10954  return res;
10955 }
10956 
10957 VmaAllocator_T::~VmaAllocator_T()
10958 {
10959 #if VMA_RECORDING_ENABLED
10960  if(m_pRecorder != VMA_NULL)
10961  {
10962  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
10963  vma_delete(this, m_pRecorder);
10964  }
10965 #endif
10966 
10967  VMA_ASSERT(m_Pools.empty());
10968 
10969  for(size_t i = GetMemoryTypeCount(); i--; )
10970  {
10971  vma_delete(this, m_pDedicatedAllocations[i]);
10972  vma_delete(this, m_pBlockVectors[i]);
10973  }
10974 }
10975 
10976 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
10977 {
10978 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
10979  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
10980  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
10981  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
10982  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
10983  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
10984  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
10985  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
10986  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
10987  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
10988  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
10989  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
10990  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
10991  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
10992  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
10993  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
10994  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
10995 #if VMA_DEDICATED_ALLOCATION
10996  if(m_UseKhrDedicatedAllocation)
10997  {
10998  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
10999  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11000  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11001  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11002  }
11003 #endif // #if VMA_DEDICATED_ALLOCATION
11004 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11005 
11006 #define VMA_COPY_IF_NOT_NULL(funcName) \
11007  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11008 
11009  if(pVulkanFunctions != VMA_NULL)
11010  {
11011  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11012  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11013  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11014  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11015  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11016  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11017  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11018  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11019  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11020  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11021  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11022  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11023  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11024  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11025  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11026  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11027 #if VMA_DEDICATED_ALLOCATION
11028  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11029  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11030 #endif
11031  }
11032 
11033 #undef VMA_COPY_IF_NOT_NULL
11034 
11035  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
11036  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
11037  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
11038  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
11039  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
11040  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
11041  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
11042  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
11043  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
11044  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
11045  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
11046  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
11047  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
11048  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
11049  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
11050  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
11051  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
11052  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
11053 #if VMA_DEDICATED_ALLOCATION
11054  if(m_UseKhrDedicatedAllocation)
11055  {
11056  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
11057  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
11058  }
11059 #endif
11060 }
11061 
11062 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
11063 {
11064  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11065  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
11066  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
11067  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
11068 }
11069 
11070 VkResult VmaAllocator_T::AllocateMemoryOfType(
11071  VkDeviceSize size,
11072  VkDeviceSize alignment,
11073  bool dedicatedAllocation,
11074  VkBuffer dedicatedBuffer,
11075  VkImage dedicatedImage,
11076  const VmaAllocationCreateInfo& createInfo,
11077  uint32_t memTypeIndex,
11078  VmaSuballocationType suballocType,
11079  VmaAllocation* pAllocation)
11080 {
11081  VMA_ASSERT(pAllocation != VMA_NULL);
11082  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
11083 
11084  VmaAllocationCreateInfo finalCreateInfo = createInfo;
11085 
11086  // If memory type is not HOST_VISIBLE, disable MAPPED.
11087  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11088  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
11089  {
11090  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
11091  }
11092 
11093  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
11094  VMA_ASSERT(blockVector);
11095 
11096  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
11097  bool preferDedicatedMemory =
11098  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
11099  dedicatedAllocation ||
11100  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
11101  size > preferredBlockSize / 2;
11102 
11103  if(preferDedicatedMemory &&
11104  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
11105  finalCreateInfo.pool == VK_NULL_HANDLE)
11106  {
11108  }
11109 
11110  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
11111  {
11112  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11113  {
11114  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11115  }
11116  else
11117  {
11118  return AllocateDedicatedMemory(
11119  size,
11120  suballocType,
11121  memTypeIndex,
11122  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11123  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11124  finalCreateInfo.pUserData,
11125  dedicatedBuffer,
11126  dedicatedImage,
11127  pAllocation);
11128  }
11129  }
11130  else
11131  {
11132  VkResult res = blockVector->Allocate(
11133  VK_NULL_HANDLE, // hCurrentPool
11134  m_CurrentFrameIndex.load(),
11135  size,
11136  alignment,
11137  finalCreateInfo,
11138  suballocType,
11139  pAllocation);
11140  if(res == VK_SUCCESS)
11141  {
11142  return res;
11143  }
11144 
11145  // 5. Try dedicated memory.
11146  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11147  {
11148  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11149  }
11150  else
11151  {
11152  res = AllocateDedicatedMemory(
11153  size,
11154  suballocType,
11155  memTypeIndex,
11156  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11157  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11158  finalCreateInfo.pUserData,
11159  dedicatedBuffer,
11160  dedicatedImage,
11161  pAllocation);
11162  if(res == VK_SUCCESS)
11163  {
11164  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
11165  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
11166  return VK_SUCCESS;
11167  }
11168  else
11169  {
11170  // Everything failed: Return error code.
11171  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11172  return res;
11173  }
11174  }
11175  }
11176 }
11177 
11178 VkResult VmaAllocator_T::AllocateDedicatedMemory(
11179  VkDeviceSize size,
11180  VmaSuballocationType suballocType,
11181  uint32_t memTypeIndex,
11182  bool map,
11183  bool isUserDataString,
11184  void* pUserData,
11185  VkBuffer dedicatedBuffer,
11186  VkImage dedicatedImage,
11187  VmaAllocation* pAllocation)
11188 {
11189  VMA_ASSERT(pAllocation);
11190 
11191  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11192  allocInfo.memoryTypeIndex = memTypeIndex;
11193  allocInfo.allocationSize = size;
11194 
11195 #if VMA_DEDICATED_ALLOCATION
11196  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
11197  if(m_UseKhrDedicatedAllocation)
11198  {
11199  if(dedicatedBuffer != VK_NULL_HANDLE)
11200  {
11201  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
11202  dedicatedAllocInfo.buffer = dedicatedBuffer;
11203  allocInfo.pNext = &dedicatedAllocInfo;
11204  }
11205  else if(dedicatedImage != VK_NULL_HANDLE)
11206  {
11207  dedicatedAllocInfo.image = dedicatedImage;
11208  allocInfo.pNext = &dedicatedAllocInfo;
11209  }
11210  }
11211 #endif // #if VMA_DEDICATED_ALLOCATION
11212 
11213  // Allocate VkDeviceMemory.
11214  VkDeviceMemory hMemory = VK_NULL_HANDLE;
11215  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
11216  if(res < 0)
11217  {
11218  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11219  return res;
11220  }
11221 
11222  void* pMappedData = VMA_NULL;
11223  if(map)
11224  {
11225  res = (*m_VulkanFunctions.vkMapMemory)(
11226  m_hDevice,
11227  hMemory,
11228  0,
11229  VK_WHOLE_SIZE,
11230  0,
11231  &pMappedData);
11232  if(res < 0)
11233  {
11234  VMA_DEBUG_LOG(" vkMapMemory FAILED");
11235  FreeVulkanMemory(memTypeIndex, size, hMemory);
11236  return res;
11237  }
11238  }
11239 
11240  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
11241  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
11242  (*pAllocation)->SetUserData(this, pUserData);
11243  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11244  {
11245  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11246  }
11247 
11248  // Register it in m_pDedicatedAllocations.
11249  {
11250  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11251  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
11252  VMA_ASSERT(pDedicatedAllocations);
11253  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
11254  }
11255 
11256  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
11257 
11258  return VK_SUCCESS;
11259 }
11260 
11261 void VmaAllocator_T::GetBufferMemoryRequirements(
11262  VkBuffer hBuffer,
11263  VkMemoryRequirements& memReq,
11264  bool& requiresDedicatedAllocation,
11265  bool& prefersDedicatedAllocation) const
11266 {
11267 #if VMA_DEDICATED_ALLOCATION
11268  if(m_UseKhrDedicatedAllocation)
11269  {
11270  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
11271  memReqInfo.buffer = hBuffer;
11272 
11273  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11274 
11275  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11276  memReq2.pNext = &memDedicatedReq;
11277 
11278  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11279 
11280  memReq = memReq2.memoryRequirements;
11281  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11282  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11283  }
11284  else
11285 #endif // #if VMA_DEDICATED_ALLOCATION
11286  {
11287  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
11288  requiresDedicatedAllocation = false;
11289  prefersDedicatedAllocation = false;
11290  }
11291 }
11292 
11293 void VmaAllocator_T::GetImageMemoryRequirements(
11294  VkImage hImage,
11295  VkMemoryRequirements& memReq,
11296  bool& requiresDedicatedAllocation,
11297  bool& prefersDedicatedAllocation) const
11298 {
11299 #if VMA_DEDICATED_ALLOCATION
11300  if(m_UseKhrDedicatedAllocation)
11301  {
11302  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
11303  memReqInfo.image = hImage;
11304 
11305  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11306 
11307  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11308  memReq2.pNext = &memDedicatedReq;
11309 
11310  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11311 
11312  memReq = memReq2.memoryRequirements;
11313  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11314  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11315  }
11316  else
11317 #endif // #if VMA_DEDICATED_ALLOCATION
11318  {
11319  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
11320  requiresDedicatedAllocation = false;
11321  prefersDedicatedAllocation = false;
11322  }
11323 }
11324 
11325 VkResult VmaAllocator_T::AllocateMemory(
11326  const VkMemoryRequirements& vkMemReq,
11327  bool requiresDedicatedAllocation,
11328  bool prefersDedicatedAllocation,
11329  VkBuffer dedicatedBuffer,
11330  VkImage dedicatedImage,
11331  const VmaAllocationCreateInfo& createInfo,
11332  VmaSuballocationType suballocType,
11333  VmaAllocation* pAllocation)
11334 {
11335  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
11336  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11337  {
11338  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
11339  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11340  }
11341  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11343  {
11344  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
11345  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11346  }
11347  if(requiresDedicatedAllocation)
11348  {
11349  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11350  {
11351  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
11352  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11353  }
11354  if(createInfo.pool != VK_NULL_HANDLE)
11355  {
11356  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
11357  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11358  }
11359  }
11360  if((createInfo.pool != VK_NULL_HANDLE) &&
11361  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
11362  {
11363  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
11364  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11365  }
11366 
11367  if(createInfo.pool != VK_NULL_HANDLE)
11368  {
11369  const VkDeviceSize alignmentForPool = VMA_MAX(
11370  vkMemReq.alignment,
11371  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
11372  return createInfo.pool->m_BlockVector.Allocate(
11373  createInfo.pool,
11374  m_CurrentFrameIndex.load(),
11375  vkMemReq.size,
11376  alignmentForPool,
11377  createInfo,
11378  suballocType,
11379  pAllocation);
11380  }
11381  else
11382  {
11383  // Bit mask of memory Vulkan types acceptable for this allocation.
11384  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
11385  uint32_t memTypeIndex = UINT32_MAX;
11386  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11387  if(res == VK_SUCCESS)
11388  {
11389  VkDeviceSize alignmentForMemType = VMA_MAX(
11390  vkMemReq.alignment,
11391  GetMemoryTypeMinAlignment(memTypeIndex));
11392 
11393  res = AllocateMemoryOfType(
11394  vkMemReq.size,
11395  alignmentForMemType,
11396  requiresDedicatedAllocation || prefersDedicatedAllocation,
11397  dedicatedBuffer,
11398  dedicatedImage,
11399  createInfo,
11400  memTypeIndex,
11401  suballocType,
11402  pAllocation);
11403  // Succeeded on first try.
11404  if(res == VK_SUCCESS)
11405  {
11406  return res;
11407  }
11408  // Allocation from this memory type failed. Try other compatible memory types.
11409  else
11410  {
11411  for(;;)
11412  {
11413  // Remove old memTypeIndex from list of possibilities.
11414  memoryTypeBits &= ~(1u << memTypeIndex);
11415  // Find alternative memTypeIndex.
11416  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11417  if(res == VK_SUCCESS)
11418  {
11419  alignmentForMemType = VMA_MAX(
11420  vkMemReq.alignment,
11421  GetMemoryTypeMinAlignment(memTypeIndex));
11422 
11423  res = AllocateMemoryOfType(
11424  vkMemReq.size,
11425  alignmentForMemType,
11426  requiresDedicatedAllocation || prefersDedicatedAllocation,
11427  dedicatedBuffer,
11428  dedicatedImage,
11429  createInfo,
11430  memTypeIndex,
11431  suballocType,
11432  pAllocation);
11433  // Allocation from this alternative memory type succeeded.
11434  if(res == VK_SUCCESS)
11435  {
11436  return res;
11437  }
11438  // else: Allocation from this memory type failed. Try next one - next loop iteration.
11439  }
11440  // No other matching memory type index could be found.
11441  else
11442  {
11443  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
11444  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11445  }
11446  }
11447  }
11448  }
11449  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
11450  else
11451  return res;
11452  }
11453 }
11454 
11455 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
11456 {
11457  VMA_ASSERT(allocation);
11458 
11459  if(allocation->CanBecomeLost() == false ||
11460  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11461  {
11462  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11463  {
11464  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
11465  }
11466 
11467  switch(allocation->GetType())
11468  {
11469  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
11470  {
11471  VmaBlockVector* pBlockVector = VMA_NULL;
11472  VmaPool hPool = allocation->GetPool();
11473  if(hPool != VK_NULL_HANDLE)
11474  {
11475  pBlockVector = &hPool->m_BlockVector;
11476  }
11477  else
11478  {
11479  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
11480  pBlockVector = m_pBlockVectors[memTypeIndex];
11481  }
11482  pBlockVector->Free(allocation);
11483  }
11484  break;
11485  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
11486  FreeDedicatedMemory(allocation);
11487  break;
11488  default:
11489  VMA_ASSERT(0);
11490  }
11491  }
11492 
11493  allocation->SetUserData(this, VMA_NULL);
11494  vma_delete(this, allocation);
11495 }
11496 
11497 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
11498 {
11499  // Initialize.
11500  InitStatInfo(pStats->total);
11501  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
11502  InitStatInfo(pStats->memoryType[i]);
11503  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11504  InitStatInfo(pStats->memoryHeap[i]);
11505 
11506  // Process default pools.
11507  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11508  {
11509  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11510  VMA_ASSERT(pBlockVector);
11511  pBlockVector->AddStats(pStats);
11512  }
11513 
11514  // Process custom pools.
11515  {
11516  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11517  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11518  {
11519  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
11520  }
11521  }
11522 
11523  // Process dedicated allocations.
11524  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11525  {
11526  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11527  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11528  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
11529  VMA_ASSERT(pDedicatedAllocVector);
11530  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
11531  {
11532  VmaStatInfo allocationStatInfo;
11533  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
11534  VmaAddStatInfo(pStats->total, allocationStatInfo);
11535  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11536  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11537  }
11538  }
11539 
11540  // Postprocess.
11541  VmaPostprocessCalcStatInfo(pStats->total);
11542  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
11543  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
11544  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
11545  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
11546 }
11547 
11548 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
11549 
11550 VkResult VmaAllocator_T::Defragment(
11551  VmaAllocation* pAllocations,
11552  size_t allocationCount,
11553  VkBool32* pAllocationsChanged,
11554  const VmaDefragmentationInfo* pDefragmentationInfo,
11555  VmaDefragmentationStats* pDefragmentationStats)
11556 {
11557  if(pAllocationsChanged != VMA_NULL)
11558  {
11559  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
11560  }
11561  if(pDefragmentationStats != VMA_NULL)
11562  {
11563  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
11564  }
11565 
11566  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
11567 
11568  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
11569 
11570  const size_t poolCount = m_Pools.size();
11571 
11572  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
11573  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11574  {
11575  VmaAllocation hAlloc = pAllocations[allocIndex];
11576  VMA_ASSERT(hAlloc);
11577  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
11578  // DedicatedAlloc cannot be defragmented.
11579  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11580  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
11581  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
11582  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
11583  // Lost allocation cannot be defragmented.
11584  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
11585  {
11586  VmaBlockVector* pAllocBlockVector = VMA_NULL;
11587 
11588  const VmaPool hAllocPool = hAlloc->GetPool();
11589  // This allocation belongs to custom pool.
11590  if(hAllocPool != VK_NULL_HANDLE)
11591  {
11592  // Pools with linear algorithm are not defragmented.
11593  if(!hAllocPool->m_BlockVector.UsesLinearAlgorithm())
11594  {
11595  pAllocBlockVector = &hAllocPool->m_BlockVector;
11596  }
11597  }
11598  // This allocation belongs to general pool.
11599  else
11600  {
11601  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
11602  }
11603 
11604  if(pAllocBlockVector != VMA_NULL)
11605  {
11606  VmaDefragmentator* const pDefragmentator =
11607  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
11608  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
11609  &pAllocationsChanged[allocIndex] : VMA_NULL;
11610  pDefragmentator->AddAllocation(hAlloc, pChanged);
11611  }
11612  }
11613  }
11614 
11615  VkResult result = VK_SUCCESS;
11616 
11617  // ======== Main processing.
11618 
11619  VkDeviceSize maxBytesToMove = SIZE_MAX;
11620  uint32_t maxAllocationsToMove = UINT32_MAX;
11621  if(pDefragmentationInfo != VMA_NULL)
11622  {
11623  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
11624  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
11625  }
11626 
11627  // Process standard memory.
11628  for(uint32_t memTypeIndex = 0;
11629  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
11630  ++memTypeIndex)
11631  {
11632  // Only HOST_VISIBLE memory types can be defragmented.
11633  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11634  {
11635  result = m_pBlockVectors[memTypeIndex]->Defragment(
11636  pDefragmentationStats,
11637  maxBytesToMove,
11638  maxAllocationsToMove);
11639  }
11640  }
11641 
11642  // Process custom pools.
11643  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
11644  {
11645  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
11646  pDefragmentationStats,
11647  maxBytesToMove,
11648  maxAllocationsToMove);
11649  }
11650 
11651  // ======== Destroy defragmentators.
11652 
11653  // Process custom pools.
11654  for(size_t poolIndex = poolCount; poolIndex--; )
11655  {
11656  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
11657  }
11658 
11659  // Process standard memory.
11660  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
11661  {
11662  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11663  {
11664  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
11665  }
11666  }
11667 
11668  return result;
11669 }
11670 
11671 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
11672 {
11673  if(hAllocation->CanBecomeLost())
11674  {
11675  /*
11676  Warning: This is a carefully designed algorithm.
11677  Do not modify unless you really know what you're doing :)
11678  */
11679  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11680  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11681  for(;;)
11682  {
11683  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11684  {
11685  pAllocationInfo->memoryType = UINT32_MAX;
11686  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
11687  pAllocationInfo->offset = 0;
11688  pAllocationInfo->size = hAllocation->GetSize();
11689  pAllocationInfo->pMappedData = VMA_NULL;
11690  pAllocationInfo->pUserData = hAllocation->GetUserData();
11691  return;
11692  }
11693  else if(localLastUseFrameIndex == localCurrFrameIndex)
11694  {
11695  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11696  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11697  pAllocationInfo->offset = hAllocation->GetOffset();
11698  pAllocationInfo->size = hAllocation->GetSize();
11699  pAllocationInfo->pMappedData = VMA_NULL;
11700  pAllocationInfo->pUserData = hAllocation->GetUserData();
11701  return;
11702  }
11703  else // Last use time earlier than current time.
11704  {
11705  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11706  {
11707  localLastUseFrameIndex = localCurrFrameIndex;
11708  }
11709  }
11710  }
11711  }
11712  else
11713  {
11714 #if VMA_STATS_STRING_ENABLED
11715  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11716  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11717  for(;;)
11718  {
11719  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11720  if(localLastUseFrameIndex == localCurrFrameIndex)
11721  {
11722  break;
11723  }
11724  else // Last use time earlier than current time.
11725  {
11726  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11727  {
11728  localLastUseFrameIndex = localCurrFrameIndex;
11729  }
11730  }
11731  }
11732 #endif
11733 
11734  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11735  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11736  pAllocationInfo->offset = hAllocation->GetOffset();
11737  pAllocationInfo->size = hAllocation->GetSize();
11738  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
11739  pAllocationInfo->pUserData = hAllocation->GetUserData();
11740  }
11741 }
11742 
11743 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
11744 {
11745  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
11746  if(hAllocation->CanBecomeLost())
11747  {
11748  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11749  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11750  for(;;)
11751  {
11752  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11753  {
11754  return false;
11755  }
11756  else if(localLastUseFrameIndex == localCurrFrameIndex)
11757  {
11758  return true;
11759  }
11760  else // Last use time earlier than current time.
11761  {
11762  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11763  {
11764  localLastUseFrameIndex = localCurrFrameIndex;
11765  }
11766  }
11767  }
11768  }
11769  else
11770  {
11771 #if VMA_STATS_STRING_ENABLED
11772  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11773  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11774  for(;;)
11775  {
11776  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11777  if(localLastUseFrameIndex == localCurrFrameIndex)
11778  {
11779  break;
11780  }
11781  else // Last use time earlier than current time.
11782  {
11783  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11784  {
11785  localLastUseFrameIndex = localCurrFrameIndex;
11786  }
11787  }
11788  }
11789 #endif
11790 
11791  return true;
11792  }
11793 }
11794 
11795 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
11796 {
11797  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
11798 
11799  const bool isLinearAlgorithm = (pCreateInfo->flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0;
11800 
11801  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
11802 
11803  if(newCreateInfo.maxBlockCount == 0)
11804  {
11805  newCreateInfo.maxBlockCount = isLinearAlgorithm ? 1 : SIZE_MAX;
11806  }
11807  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount ||
11808  isLinearAlgorithm && newCreateInfo.maxBlockCount > 1)
11809  {
11810  return VK_ERROR_INITIALIZATION_FAILED;
11811  }
11812  if(newCreateInfo.blockSize == 0)
11813  {
11814  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
11815  }
11816 
11817  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
11818 
11819  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
11820  if(res != VK_SUCCESS)
11821  {
11822  vma_delete(this, *pPool);
11823  *pPool = VMA_NULL;
11824  return res;
11825  }
11826 
11827  // Add to m_Pools.
11828  {
11829  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11830  (*pPool)->SetId(m_NextPoolId++);
11831  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
11832  }
11833 
11834  return VK_SUCCESS;
11835 }
11836 
11837 void VmaAllocator_T::DestroyPool(VmaPool pool)
11838 {
11839  // Remove from m_Pools.
11840  {
11841  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11842  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
11843  VMA_ASSERT(success && "Pool not found in Allocator.");
11844  }
11845 
11846  vma_delete(this, pool);
11847 }
11848 
11849 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
11850 {
11851  pool->m_BlockVector.GetPoolStats(pPoolStats);
11852 }
11853 
11854 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
11855 {
11856  m_CurrentFrameIndex.store(frameIndex);
11857 }
11858 
11859 void VmaAllocator_T::MakePoolAllocationsLost(
11860  VmaPool hPool,
11861  size_t* pLostAllocationCount)
11862 {
11863  hPool->m_BlockVector.MakePoolAllocationsLost(
11864  m_CurrentFrameIndex.load(),
11865  pLostAllocationCount);
11866 }
11867 
11868 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
11869 {
11870  return hPool->m_BlockVector.CheckCorruption();
11871 }
11872 
11873 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
11874 {
11875  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
11876 
11877  // Process default pools.
11878  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11879  {
11880  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
11881  {
11882  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11883  VMA_ASSERT(pBlockVector);
11884  VkResult localRes = pBlockVector->CheckCorruption();
11885  switch(localRes)
11886  {
11887  case VK_ERROR_FEATURE_NOT_PRESENT:
11888  break;
11889  case VK_SUCCESS:
11890  finalRes = VK_SUCCESS;
11891  break;
11892  default:
11893  return localRes;
11894  }
11895  }
11896  }
11897 
11898  // Process custom pools.
11899  {
11900  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11901  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11902  {
11903  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
11904  {
11905  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
11906  switch(localRes)
11907  {
11908  case VK_ERROR_FEATURE_NOT_PRESENT:
11909  break;
11910  case VK_SUCCESS:
11911  finalRes = VK_SUCCESS;
11912  break;
11913  default:
11914  return localRes;
11915  }
11916  }
11917  }
11918  }
11919 
11920  return finalRes;
11921 }
11922 
11923 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
11924 {
11925  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
11926  (*pAllocation)->InitLost();
11927 }
11928 
11929 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
11930 {
11931  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
11932 
11933  VkResult res;
11934  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
11935  {
11936  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
11937  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
11938  {
11939  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
11940  if(res == VK_SUCCESS)
11941  {
11942  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
11943  }
11944  }
11945  else
11946  {
11947  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
11948  }
11949  }
11950  else
11951  {
11952  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
11953  }
11954 
11955  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
11956  {
11957  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
11958  }
11959 
11960  return res;
11961 }
11962 
11963 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
11964 {
11965  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
11966  {
11967  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
11968  }
11969 
11970  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
11971 
11972  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
11973  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
11974  {
11975  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
11976  m_HeapSizeLimit[heapIndex] += size;
11977  }
11978 }
11979 
11980 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
11981 {
11982  if(hAllocation->CanBecomeLost())
11983  {
11984  return VK_ERROR_MEMORY_MAP_FAILED;
11985  }
11986 
11987  switch(hAllocation->GetType())
11988  {
11989  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
11990  {
11991  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
11992  char *pBytes = VMA_NULL;
11993  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
11994  if(res == VK_SUCCESS)
11995  {
11996  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
11997  hAllocation->BlockAllocMap();
11998  }
11999  return res;
12000  }
12001  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12002  return hAllocation->DedicatedAllocMap(this, ppData);
12003  default:
12004  VMA_ASSERT(0);
12005  return VK_ERROR_MEMORY_MAP_FAILED;
12006  }
12007 }
12008 
12009 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12010 {
12011  switch(hAllocation->GetType())
12012  {
12013  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12014  {
12015  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12016  hAllocation->BlockAllocUnmap();
12017  pBlock->Unmap(this, 1);
12018  }
12019  break;
12020  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12021  hAllocation->DedicatedAllocUnmap(this);
12022  break;
12023  default:
12024  VMA_ASSERT(0);
12025  }
12026 }
12027 
12028 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12029 {
12030  VkResult res = VK_SUCCESS;
12031  switch(hAllocation->GetType())
12032  {
12033  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12034  res = GetVulkanFunctions().vkBindBufferMemory(
12035  m_hDevice,
12036  hBuffer,
12037  hAllocation->GetMemory(),
12038  0); //memoryOffset
12039  break;
12040  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12041  {
12042  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12043  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
12044  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
12045  break;
12046  }
12047  default:
12048  VMA_ASSERT(0);
12049  }
12050  return res;
12051 }
12052 
12053 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
12054 {
12055  VkResult res = VK_SUCCESS;
12056  switch(hAllocation->GetType())
12057  {
12058  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12059  res = GetVulkanFunctions().vkBindImageMemory(
12060  m_hDevice,
12061  hImage,
12062  hAllocation->GetMemory(),
12063  0); //memoryOffset
12064  break;
12065  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12066  {
12067  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12068  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
12069  res = pBlock->BindImageMemory(this, hAllocation, hImage);
12070  break;
12071  }
12072  default:
12073  VMA_ASSERT(0);
12074  }
12075  return res;
12076 }
12077 
12078 void VmaAllocator_T::FlushOrInvalidateAllocation(
12079  VmaAllocation hAllocation,
12080  VkDeviceSize offset, VkDeviceSize size,
12081  VMA_CACHE_OPERATION op)
12082 {
12083  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
12084  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
12085  {
12086  const VkDeviceSize allocationSize = hAllocation->GetSize();
12087  VMA_ASSERT(offset <= allocationSize);
12088 
12089  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12090 
12091  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12092  memRange.memory = hAllocation->GetMemory();
12093 
12094  switch(hAllocation->GetType())
12095  {
12096  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12097  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12098  if(size == VK_WHOLE_SIZE)
12099  {
12100  memRange.size = allocationSize - memRange.offset;
12101  }
12102  else
12103  {
12104  VMA_ASSERT(offset + size <= allocationSize);
12105  memRange.size = VMA_MIN(
12106  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
12107  allocationSize - memRange.offset);
12108  }
12109  break;
12110 
12111  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12112  {
12113  // 1. Still within this allocation.
12114  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12115  if(size == VK_WHOLE_SIZE)
12116  {
12117  size = allocationSize - offset;
12118  }
12119  else
12120  {
12121  VMA_ASSERT(offset + size <= allocationSize);
12122  }
12123  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
12124 
12125  // 2. Adjust to whole block.
12126  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
12127  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
12128  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
12129  memRange.offset += allocationOffset;
12130  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
12131 
12132  break;
12133  }
12134 
12135  default:
12136  VMA_ASSERT(0);
12137  }
12138 
12139  switch(op)
12140  {
12141  case VMA_CACHE_FLUSH:
12142  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
12143  break;
12144  case VMA_CACHE_INVALIDATE:
12145  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
12146  break;
12147  default:
12148  VMA_ASSERT(0);
12149  }
12150  }
12151  // else: Just ignore this call.
12152 }
12153 
12154 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
12155 {
12156  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
12157 
12158  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12159  {
12160  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12161  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12162  VMA_ASSERT(pDedicatedAllocations);
12163  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
12164  VMA_ASSERT(success);
12165  }
12166 
12167  VkDeviceMemory hMemory = allocation->GetMemory();
12168 
12169  if(allocation->GetMappedData() != VMA_NULL)
12170  {
12171  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
12172  }
12173 
12174  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
12175 
12176  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
12177 }
12178 
12179 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
12180 {
12181  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
12182  !hAllocation->CanBecomeLost() &&
12183  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12184  {
12185  void* pData = VMA_NULL;
12186  VkResult res = Map(hAllocation, &pData);
12187  if(res == VK_SUCCESS)
12188  {
12189  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
12190  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
12191  Unmap(hAllocation);
12192  }
12193  else
12194  {
12195  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
12196  }
12197  }
12198 }
12199 
12200 #if VMA_STATS_STRING_ENABLED
12201 
12202 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
12203 {
12204  bool dedicatedAllocationsStarted = false;
12205  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12206  {
12207  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12208  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12209  VMA_ASSERT(pDedicatedAllocVector);
12210  if(pDedicatedAllocVector->empty() == false)
12211  {
12212  if(dedicatedAllocationsStarted == false)
12213  {
12214  dedicatedAllocationsStarted = true;
12215  json.WriteString("DedicatedAllocations");
12216  json.BeginObject();
12217  }
12218 
12219  json.BeginString("Type ");
12220  json.ContinueString(memTypeIndex);
12221  json.EndString();
12222 
12223  json.BeginArray();
12224 
12225  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
12226  {
12227  json.BeginObject(true);
12228  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
12229  hAlloc->PrintParameters(json);
12230  json.EndObject();
12231  }
12232 
12233  json.EndArray();
12234  }
12235  }
12236  if(dedicatedAllocationsStarted)
12237  {
12238  json.EndObject();
12239  }
12240 
12241  {
12242  bool allocationsStarted = false;
12243  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12244  {
12245  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
12246  {
12247  if(allocationsStarted == false)
12248  {
12249  allocationsStarted = true;
12250  json.WriteString("DefaultPools");
12251  json.BeginObject();
12252  }
12253 
12254  json.BeginString("Type ");
12255  json.ContinueString(memTypeIndex);
12256  json.EndString();
12257 
12258  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
12259  }
12260  }
12261  if(allocationsStarted)
12262  {
12263  json.EndObject();
12264  }
12265  }
12266 
12267  // Custom pools
12268  {
12269  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12270  const size_t poolCount = m_Pools.size();
12271  if(poolCount > 0)
12272  {
12273  json.WriteString("Pools");
12274  json.BeginObject();
12275  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
12276  {
12277  json.BeginString();
12278  json.ContinueString(m_Pools[poolIndex]->GetId());
12279  json.EndString();
12280 
12281  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
12282  }
12283  json.EndObject();
12284  }
12285  }
12286 }
12287 
12288 #endif // #if VMA_STATS_STRING_ENABLED
12289 
12291 // Public interface
12292 
12293 VkResult vmaCreateAllocator(
12294  const VmaAllocatorCreateInfo* pCreateInfo,
12295  VmaAllocator* pAllocator)
12296 {
12297  VMA_ASSERT(pCreateInfo && pAllocator);
12298  VMA_DEBUG_LOG("vmaCreateAllocator");
12299  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
12300  return (*pAllocator)->Init(pCreateInfo);
12301 }
12302 
12303 void vmaDestroyAllocator(
12304  VmaAllocator allocator)
12305 {
12306  if(allocator != VK_NULL_HANDLE)
12307  {
12308  VMA_DEBUG_LOG("vmaDestroyAllocator");
12309  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
12310  vma_delete(&allocationCallbacks, allocator);
12311  }
12312 }
12313 
12315  VmaAllocator allocator,
12316  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
12317 {
12318  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
12319  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
12320 }
12321 
12323  VmaAllocator allocator,
12324  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
12325 {
12326  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
12327  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
12328 }
12329 
12331  VmaAllocator allocator,
12332  uint32_t memoryTypeIndex,
12333  VkMemoryPropertyFlags* pFlags)
12334 {
12335  VMA_ASSERT(allocator && pFlags);
12336  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
12337  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
12338 }
12339 
12341  VmaAllocator allocator,
12342  uint32_t frameIndex)
12343 {
12344  VMA_ASSERT(allocator);
12345  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
12346 
12347  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12348 
12349  allocator->SetCurrentFrameIndex(frameIndex);
12350 }
12351 
12352 void vmaCalculateStats(
12353  VmaAllocator allocator,
12354  VmaStats* pStats)
12355 {
12356  VMA_ASSERT(allocator && pStats);
12357  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12358  allocator->CalculateStats(pStats);
12359 }
12360 
12361 #if VMA_STATS_STRING_ENABLED
12362 
12363 void vmaBuildStatsString(
12364  VmaAllocator allocator,
12365  char** ppStatsString,
12366  VkBool32 detailedMap)
12367 {
12368  VMA_ASSERT(allocator && ppStatsString);
12369  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12370 
12371  VmaStringBuilder sb(allocator);
12372  {
12373  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
12374  json.BeginObject();
12375 
12376  VmaStats stats;
12377  allocator->CalculateStats(&stats);
12378 
12379  json.WriteString("Total");
12380  VmaPrintStatInfo(json, stats.total);
12381 
12382  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
12383  {
12384  json.BeginString("Heap ");
12385  json.ContinueString(heapIndex);
12386  json.EndString();
12387  json.BeginObject();
12388 
12389  json.WriteString("Size");
12390  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
12391 
12392  json.WriteString("Flags");
12393  json.BeginArray(true);
12394  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
12395  {
12396  json.WriteString("DEVICE_LOCAL");
12397  }
12398  json.EndArray();
12399 
12400  if(stats.memoryHeap[heapIndex].blockCount > 0)
12401  {
12402  json.WriteString("Stats");
12403  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
12404  }
12405 
12406  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
12407  {
12408  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
12409  {
12410  json.BeginString("Type ");
12411  json.ContinueString(typeIndex);
12412  json.EndString();
12413 
12414  json.BeginObject();
12415 
12416  json.WriteString("Flags");
12417  json.BeginArray(true);
12418  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
12419  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
12420  {
12421  json.WriteString("DEVICE_LOCAL");
12422  }
12423  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12424  {
12425  json.WriteString("HOST_VISIBLE");
12426  }
12427  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
12428  {
12429  json.WriteString("HOST_COHERENT");
12430  }
12431  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
12432  {
12433  json.WriteString("HOST_CACHED");
12434  }
12435  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
12436  {
12437  json.WriteString("LAZILY_ALLOCATED");
12438  }
12439  json.EndArray();
12440 
12441  if(stats.memoryType[typeIndex].blockCount > 0)
12442  {
12443  json.WriteString("Stats");
12444  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
12445  }
12446 
12447  json.EndObject();
12448  }
12449  }
12450 
12451  json.EndObject();
12452  }
12453  if(detailedMap == VK_TRUE)
12454  {
12455  allocator->PrintDetailedMap(json);
12456  }
12457 
12458  json.EndObject();
12459  }
12460 
12461  const size_t len = sb.GetLength();
12462  char* const pChars = vma_new_array(allocator, char, len + 1);
12463  if(len > 0)
12464  {
12465  memcpy(pChars, sb.GetData(), len);
12466  }
12467  pChars[len] = '\0';
12468  *ppStatsString = pChars;
12469 }
12470 
12471 void vmaFreeStatsString(
12472  VmaAllocator allocator,
12473  char* pStatsString)
12474 {
12475  if(pStatsString != VMA_NULL)
12476  {
12477  VMA_ASSERT(allocator);
12478  size_t len = strlen(pStatsString);
12479  vma_delete_array(allocator, pStatsString, len + 1);
12480  }
12481 }
12482 
12483 #endif // #if VMA_STATS_STRING_ENABLED
12484 
12485 /*
12486 This function is not protected by any mutex because it just reads immutable data.
12487 */
12488 VkResult vmaFindMemoryTypeIndex(
12489  VmaAllocator allocator,
12490  uint32_t memoryTypeBits,
12491  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12492  uint32_t* pMemoryTypeIndex)
12493 {
12494  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12495  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12496  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12497 
12498  if(pAllocationCreateInfo->memoryTypeBits != 0)
12499  {
12500  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
12501  }
12502 
12503  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
12504  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
12505 
12506  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12507  if(mapped)
12508  {
12509  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12510  }
12511 
12512  // Convert usage to requiredFlags and preferredFlags.
12513  switch(pAllocationCreateInfo->usage)
12514  {
12516  break;
12518  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12519  {
12520  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12521  }
12522  break;
12524  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12525  break;
12527  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12528  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12529  {
12530  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12531  }
12532  break;
12534  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12535  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
12536  break;
12537  default:
12538  break;
12539  }
12540 
12541  *pMemoryTypeIndex = UINT32_MAX;
12542  uint32_t minCost = UINT32_MAX;
12543  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
12544  memTypeIndex < allocator->GetMemoryTypeCount();
12545  ++memTypeIndex, memTypeBit <<= 1)
12546  {
12547  // This memory type is acceptable according to memoryTypeBits bitmask.
12548  if((memTypeBit & memoryTypeBits) != 0)
12549  {
12550  const VkMemoryPropertyFlags currFlags =
12551  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
12552  // This memory type contains requiredFlags.
12553  if((requiredFlags & ~currFlags) == 0)
12554  {
12555  // Calculate cost as number of bits from preferredFlags not present in this memory type.
12556  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
12557  // Remember memory type with lowest cost.
12558  if(currCost < minCost)
12559  {
12560  *pMemoryTypeIndex = memTypeIndex;
12561  if(currCost == 0)
12562  {
12563  return VK_SUCCESS;
12564  }
12565  minCost = currCost;
12566  }
12567  }
12568  }
12569  }
12570  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
12571 }
12572 
12574  VmaAllocator allocator,
12575  const VkBufferCreateInfo* pBufferCreateInfo,
12576  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12577  uint32_t* pMemoryTypeIndex)
12578 {
12579  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12580  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
12581  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12582  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12583 
12584  const VkDevice hDev = allocator->m_hDevice;
12585  VkBuffer hBuffer = VK_NULL_HANDLE;
12586  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
12587  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
12588  if(res == VK_SUCCESS)
12589  {
12590  VkMemoryRequirements memReq = {};
12591  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
12592  hDev, hBuffer, &memReq);
12593 
12594  res = vmaFindMemoryTypeIndex(
12595  allocator,
12596  memReq.memoryTypeBits,
12597  pAllocationCreateInfo,
12598  pMemoryTypeIndex);
12599 
12600  allocator->GetVulkanFunctions().vkDestroyBuffer(
12601  hDev, hBuffer, allocator->GetAllocationCallbacks());
12602  }
12603  return res;
12604 }
12605 
12607  VmaAllocator allocator,
12608  const VkImageCreateInfo* pImageCreateInfo,
12609  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12610  uint32_t* pMemoryTypeIndex)
12611 {
12612  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12613  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
12614  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12615  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12616 
12617  const VkDevice hDev = allocator->m_hDevice;
12618  VkImage hImage = VK_NULL_HANDLE;
12619  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
12620  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
12621  if(res == VK_SUCCESS)
12622  {
12623  VkMemoryRequirements memReq = {};
12624  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
12625  hDev, hImage, &memReq);
12626 
12627  res = vmaFindMemoryTypeIndex(
12628  allocator,
12629  memReq.memoryTypeBits,
12630  pAllocationCreateInfo,
12631  pMemoryTypeIndex);
12632 
12633  allocator->GetVulkanFunctions().vkDestroyImage(
12634  hDev, hImage, allocator->GetAllocationCallbacks());
12635  }
12636  return res;
12637 }
12638 
12639 VkResult vmaCreatePool(
12640  VmaAllocator allocator,
12641  const VmaPoolCreateInfo* pCreateInfo,
12642  VmaPool* pPool)
12643 {
12644  VMA_ASSERT(allocator && pCreateInfo && pPool);
12645 
12646  VMA_DEBUG_LOG("vmaCreatePool");
12647 
12648  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12649 
12650  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
12651 
12652 #if VMA_RECORDING_ENABLED
12653  if(allocator->GetRecorder() != VMA_NULL)
12654  {
12655  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
12656  }
12657 #endif
12658 
12659  return res;
12660 }
12661 
12662 void vmaDestroyPool(
12663  VmaAllocator allocator,
12664  VmaPool pool)
12665 {
12666  VMA_ASSERT(allocator);
12667 
12668  if(pool == VK_NULL_HANDLE)
12669  {
12670  return;
12671  }
12672 
12673  VMA_DEBUG_LOG("vmaDestroyPool");
12674 
12675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12676 
12677 #if VMA_RECORDING_ENABLED
12678  if(allocator->GetRecorder() != VMA_NULL)
12679  {
12680  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
12681  }
12682 #endif
12683 
12684  allocator->DestroyPool(pool);
12685 }
12686 
12687 void vmaGetPoolStats(
12688  VmaAllocator allocator,
12689  VmaPool pool,
12690  VmaPoolStats* pPoolStats)
12691 {
12692  VMA_ASSERT(allocator && pool && pPoolStats);
12693 
12694  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12695 
12696  allocator->GetPoolStats(pool, pPoolStats);
12697 }
12698 
12700  VmaAllocator allocator,
12701  VmaPool pool,
12702  size_t* pLostAllocationCount)
12703 {
12704  VMA_ASSERT(allocator && pool);
12705 
12706  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12707 
12708 #if VMA_RECORDING_ENABLED
12709  if(allocator->GetRecorder() != VMA_NULL)
12710  {
12711  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
12712  }
12713 #endif
12714 
12715  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
12716 }
12717 
12718 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
12719 {
12720  VMA_ASSERT(allocator && pool);
12721 
12722  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12723 
12724  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
12725 
12726  return allocator->CheckPoolCorruption(pool);
12727 }
12728 
12729 VkResult vmaAllocateMemory(
12730  VmaAllocator allocator,
12731  const VkMemoryRequirements* pVkMemoryRequirements,
12732  const VmaAllocationCreateInfo* pCreateInfo,
12733  VmaAllocation* pAllocation,
12734  VmaAllocationInfo* pAllocationInfo)
12735 {
12736  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
12737 
12738  VMA_DEBUG_LOG("vmaAllocateMemory");
12739 
12740  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12741 
12742  VkResult result = allocator->AllocateMemory(
12743  *pVkMemoryRequirements,
12744  false, // requiresDedicatedAllocation
12745  false, // prefersDedicatedAllocation
12746  VK_NULL_HANDLE, // dedicatedBuffer
12747  VK_NULL_HANDLE, // dedicatedImage
12748  *pCreateInfo,
12749  VMA_SUBALLOCATION_TYPE_UNKNOWN,
12750  pAllocation);
12751 
12752 #if VMA_RECORDING_ENABLED
12753  if(allocator->GetRecorder() != VMA_NULL)
12754  {
12755  allocator->GetRecorder()->RecordAllocateMemory(
12756  allocator->GetCurrentFrameIndex(),
12757  *pVkMemoryRequirements,
12758  *pCreateInfo,
12759  *pAllocation);
12760  }
12761 #endif
12762 
12763  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
12764  {
12765  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12766  }
12767 
12768  return result;
12769 }
12770 
12772  VmaAllocator allocator,
12773  VkBuffer buffer,
12774  const VmaAllocationCreateInfo* pCreateInfo,
12775  VmaAllocation* pAllocation,
12776  VmaAllocationInfo* pAllocationInfo)
12777 {
12778  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12779 
12780  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
12781 
12782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12783 
12784  VkMemoryRequirements vkMemReq = {};
12785  bool requiresDedicatedAllocation = false;
12786  bool prefersDedicatedAllocation = false;
12787  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
12788  requiresDedicatedAllocation,
12789  prefersDedicatedAllocation);
12790 
12791  VkResult result = allocator->AllocateMemory(
12792  vkMemReq,
12793  requiresDedicatedAllocation,
12794  prefersDedicatedAllocation,
12795  buffer, // dedicatedBuffer
12796  VK_NULL_HANDLE, // dedicatedImage
12797  *pCreateInfo,
12798  VMA_SUBALLOCATION_TYPE_BUFFER,
12799  pAllocation);
12800 
12801 #if VMA_RECORDING_ENABLED
12802  if(allocator->GetRecorder() != VMA_NULL)
12803  {
12804  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
12805  allocator->GetCurrentFrameIndex(),
12806  vkMemReq,
12807  requiresDedicatedAllocation,
12808  prefersDedicatedAllocation,
12809  *pCreateInfo,
12810  *pAllocation);
12811  }
12812 #endif
12813 
12814  if(pAllocationInfo && result == VK_SUCCESS)
12815  {
12816  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12817  }
12818 
12819  return result;
12820 }
12821 
12822 VkResult vmaAllocateMemoryForImage(
12823  VmaAllocator allocator,
12824  VkImage image,
12825  const VmaAllocationCreateInfo* pCreateInfo,
12826  VmaAllocation* pAllocation,
12827  VmaAllocationInfo* pAllocationInfo)
12828 {
12829  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12830 
12831  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
12832 
12833  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12834 
12835  VkMemoryRequirements vkMemReq = {};
12836  bool requiresDedicatedAllocation = false;
12837  bool prefersDedicatedAllocation = false;
12838  allocator->GetImageMemoryRequirements(image, vkMemReq,
12839  requiresDedicatedAllocation, prefersDedicatedAllocation);
12840 
12841  VkResult result = allocator->AllocateMemory(
12842  vkMemReq,
12843  requiresDedicatedAllocation,
12844  prefersDedicatedAllocation,
12845  VK_NULL_HANDLE, // dedicatedBuffer
12846  image, // dedicatedImage
12847  *pCreateInfo,
12848  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
12849  pAllocation);
12850 
12851 #if VMA_RECORDING_ENABLED
12852  if(allocator->GetRecorder() != VMA_NULL)
12853  {
12854  allocator->GetRecorder()->RecordAllocateMemoryForImage(
12855  allocator->GetCurrentFrameIndex(),
12856  vkMemReq,
12857  requiresDedicatedAllocation,
12858  prefersDedicatedAllocation,
12859  *pCreateInfo,
12860  *pAllocation);
12861  }
12862 #endif
12863 
12864  if(pAllocationInfo && result == VK_SUCCESS)
12865  {
12866  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12867  }
12868 
12869  return result;
12870 }
12871 
12872 void vmaFreeMemory(
12873  VmaAllocator allocator,
12874  VmaAllocation allocation)
12875 {
12876  VMA_ASSERT(allocator);
12877 
12878  if(allocation == VK_NULL_HANDLE)
12879  {
12880  return;
12881  }
12882 
12883  VMA_DEBUG_LOG("vmaFreeMemory");
12884 
12885  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12886 
12887 #if VMA_RECORDING_ENABLED
12888  if(allocator->GetRecorder() != VMA_NULL)
12889  {
12890  allocator->GetRecorder()->RecordFreeMemory(
12891  allocator->GetCurrentFrameIndex(),
12892  allocation);
12893  }
12894 #endif
12895 
12896  allocator->FreeMemory(allocation);
12897 }
12898 
12900  VmaAllocator allocator,
12901  VmaAllocation allocation,
12902  VmaAllocationInfo* pAllocationInfo)
12903 {
12904  VMA_ASSERT(allocator && allocation && pAllocationInfo);
12905 
12906  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12907 
12908 #if VMA_RECORDING_ENABLED
12909  if(allocator->GetRecorder() != VMA_NULL)
12910  {
12911  allocator->GetRecorder()->RecordGetAllocationInfo(
12912  allocator->GetCurrentFrameIndex(),
12913  allocation);
12914  }
12915 #endif
12916 
12917  allocator->GetAllocationInfo(allocation, pAllocationInfo);
12918 }
12919 
12920 VkBool32 vmaTouchAllocation(
12921  VmaAllocator allocator,
12922  VmaAllocation allocation)
12923 {
12924  VMA_ASSERT(allocator && allocation);
12925 
12926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12927 
12928 #if VMA_RECORDING_ENABLED
12929  if(allocator->GetRecorder() != VMA_NULL)
12930  {
12931  allocator->GetRecorder()->RecordTouchAllocation(
12932  allocator->GetCurrentFrameIndex(),
12933  allocation);
12934  }
12935 #endif
12936 
12937  return allocator->TouchAllocation(allocation);
12938 }
12939 
12941  VmaAllocator allocator,
12942  VmaAllocation allocation,
12943  void* pUserData)
12944 {
12945  VMA_ASSERT(allocator && allocation);
12946 
12947  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12948 
12949  allocation->SetUserData(allocator, pUserData);
12950 
12951 #if VMA_RECORDING_ENABLED
12952  if(allocator->GetRecorder() != VMA_NULL)
12953  {
12954  allocator->GetRecorder()->RecordSetAllocationUserData(
12955  allocator->GetCurrentFrameIndex(),
12956  allocation,
12957  pUserData);
12958  }
12959 #endif
12960 }
12961 
12963  VmaAllocator allocator,
12964  VmaAllocation* pAllocation)
12965 {
12966  VMA_ASSERT(allocator && pAllocation);
12967 
12968  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
12969 
12970  allocator->CreateLostAllocation(pAllocation);
12971 
12972 #if VMA_RECORDING_ENABLED
12973  if(allocator->GetRecorder() != VMA_NULL)
12974  {
12975  allocator->GetRecorder()->RecordCreateLostAllocation(
12976  allocator->GetCurrentFrameIndex(),
12977  *pAllocation);
12978  }
12979 #endif
12980 }
12981 
12982 VkResult vmaMapMemory(
12983  VmaAllocator allocator,
12984  VmaAllocation allocation,
12985  void** ppData)
12986 {
12987  VMA_ASSERT(allocator && allocation && ppData);
12988 
12989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12990 
12991  VkResult res = allocator->Map(allocation, ppData);
12992 
12993 #if VMA_RECORDING_ENABLED
12994  if(allocator->GetRecorder() != VMA_NULL)
12995  {
12996  allocator->GetRecorder()->RecordMapMemory(
12997  allocator->GetCurrentFrameIndex(),
12998  allocation);
12999  }
13000 #endif
13001 
13002  return res;
13003 }
13004 
13005 void vmaUnmapMemory(
13006  VmaAllocator allocator,
13007  VmaAllocation allocation)
13008 {
13009  VMA_ASSERT(allocator && allocation);
13010 
13011  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13012 
13013 #if VMA_RECORDING_ENABLED
13014  if(allocator->GetRecorder() != VMA_NULL)
13015  {
13016  allocator->GetRecorder()->RecordUnmapMemory(
13017  allocator->GetCurrentFrameIndex(),
13018  allocation);
13019  }
13020 #endif
13021 
13022  allocator->Unmap(allocation);
13023 }
13024 
13025 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13026 {
13027  VMA_ASSERT(allocator && allocation);
13028 
13029  VMA_DEBUG_LOG("vmaFlushAllocation");
13030 
13031  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13032 
13033  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
13034 
13035 #if VMA_RECORDING_ENABLED
13036  if(allocator->GetRecorder() != VMA_NULL)
13037  {
13038  allocator->GetRecorder()->RecordFlushAllocation(
13039  allocator->GetCurrentFrameIndex(),
13040  allocation, offset, size);
13041  }
13042 #endif
13043 }
13044 
13045 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13046 {
13047  VMA_ASSERT(allocator && allocation);
13048 
13049  VMA_DEBUG_LOG("vmaInvalidateAllocation");
13050 
13051  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13052 
13053  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
13054 
13055 #if VMA_RECORDING_ENABLED
13056  if(allocator->GetRecorder() != VMA_NULL)
13057  {
13058  allocator->GetRecorder()->RecordInvalidateAllocation(
13059  allocator->GetCurrentFrameIndex(),
13060  allocation, offset, size);
13061  }
13062 #endif
13063 }
13064 
13065 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
13066 {
13067  VMA_ASSERT(allocator);
13068 
13069  VMA_DEBUG_LOG("vmaCheckCorruption");
13070 
13071  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13072 
13073  return allocator->CheckCorruption(memoryTypeBits);
13074 }
13075 
13076 VkResult vmaDefragment(
13077  VmaAllocator allocator,
13078  VmaAllocation* pAllocations,
13079  size_t allocationCount,
13080  VkBool32* pAllocationsChanged,
13081  const VmaDefragmentationInfo *pDefragmentationInfo,
13082  VmaDefragmentationStats* pDefragmentationStats)
13083 {
13084  VMA_ASSERT(allocator && pAllocations);
13085 
13086  VMA_DEBUG_LOG("vmaDefragment");
13087 
13088  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13089 
13090  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
13091 }
13092 
13093 VkResult vmaBindBufferMemory(
13094  VmaAllocator allocator,
13095  VmaAllocation allocation,
13096  VkBuffer buffer)
13097 {
13098  VMA_ASSERT(allocator && allocation && buffer);
13099 
13100  VMA_DEBUG_LOG("vmaBindBufferMemory");
13101 
13102  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13103 
13104  return allocator->BindBufferMemory(allocation, buffer);
13105 }
13106 
13107 VkResult vmaBindImageMemory(
13108  VmaAllocator allocator,
13109  VmaAllocation allocation,
13110  VkImage image)
13111 {
13112  VMA_ASSERT(allocator && allocation && image);
13113 
13114  VMA_DEBUG_LOG("vmaBindImageMemory");
13115 
13116  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13117 
13118  return allocator->BindImageMemory(allocation, image);
13119 }
13120 
13121 VkResult vmaCreateBuffer(
13122  VmaAllocator allocator,
13123  const VkBufferCreateInfo* pBufferCreateInfo,
13124  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13125  VkBuffer* pBuffer,
13126  VmaAllocation* pAllocation,
13127  VmaAllocationInfo* pAllocationInfo)
13128 {
13129  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
13130 
13131  VMA_DEBUG_LOG("vmaCreateBuffer");
13132 
13133  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13134 
13135  *pBuffer = VK_NULL_HANDLE;
13136  *pAllocation = VK_NULL_HANDLE;
13137 
13138  // 1. Create VkBuffer.
13139  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
13140  allocator->m_hDevice,
13141  pBufferCreateInfo,
13142  allocator->GetAllocationCallbacks(),
13143  pBuffer);
13144  if(res >= 0)
13145  {
13146  // 2. vkGetBufferMemoryRequirements.
13147  VkMemoryRequirements vkMemReq = {};
13148  bool requiresDedicatedAllocation = false;
13149  bool prefersDedicatedAllocation = false;
13150  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
13151  requiresDedicatedAllocation, prefersDedicatedAllocation);
13152 
13153  // Make sure alignment requirements for specific buffer usages reported
13154  // in Physical Device Properties are included in alignment reported by memory requirements.
13155  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
13156  {
13157  VMA_ASSERT(vkMemReq.alignment %
13158  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
13159  }
13160  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
13161  {
13162  VMA_ASSERT(vkMemReq.alignment %
13163  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
13164  }
13165  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
13166  {
13167  VMA_ASSERT(vkMemReq.alignment %
13168  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
13169  }
13170 
13171  // 3. Allocate memory using allocator.
13172  res = allocator->AllocateMemory(
13173  vkMemReq,
13174  requiresDedicatedAllocation,
13175  prefersDedicatedAllocation,
13176  *pBuffer, // dedicatedBuffer
13177  VK_NULL_HANDLE, // dedicatedImage
13178  *pAllocationCreateInfo,
13179  VMA_SUBALLOCATION_TYPE_BUFFER,
13180  pAllocation);
13181 
13182 #if VMA_RECORDING_ENABLED
13183  if(allocator->GetRecorder() != VMA_NULL)
13184  {
13185  allocator->GetRecorder()->RecordCreateBuffer(
13186  allocator->GetCurrentFrameIndex(),
13187  *pBufferCreateInfo,
13188  *pAllocationCreateInfo,
13189  *pAllocation);
13190  }
13191 #endif
13192 
13193  if(res >= 0)
13194  {
13195  // 3. Bind buffer with memory.
13196  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
13197  if(res >= 0)
13198  {
13199  // All steps succeeded.
13200  #if VMA_STATS_STRING_ENABLED
13201  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
13202  #endif
13203  if(pAllocationInfo != VMA_NULL)
13204  {
13205  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13206  }
13207 
13208  return VK_SUCCESS;
13209  }
13210  allocator->FreeMemory(*pAllocation);
13211  *pAllocation = VK_NULL_HANDLE;
13212  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13213  *pBuffer = VK_NULL_HANDLE;
13214  return res;
13215  }
13216  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13217  *pBuffer = VK_NULL_HANDLE;
13218  return res;
13219  }
13220  return res;
13221 }
13222 
13223 void vmaDestroyBuffer(
13224  VmaAllocator allocator,
13225  VkBuffer buffer,
13226  VmaAllocation allocation)
13227 {
13228  VMA_ASSERT(allocator);
13229 
13230  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13231  {
13232  return;
13233  }
13234 
13235  VMA_DEBUG_LOG("vmaDestroyBuffer");
13236 
13237  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13238 
13239 #if VMA_RECORDING_ENABLED
13240  if(allocator->GetRecorder() != VMA_NULL)
13241  {
13242  allocator->GetRecorder()->RecordDestroyBuffer(
13243  allocator->GetCurrentFrameIndex(),
13244  allocation);
13245  }
13246 #endif
13247 
13248  if(buffer != VK_NULL_HANDLE)
13249  {
13250  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
13251  }
13252 
13253  if(allocation != VK_NULL_HANDLE)
13254  {
13255  allocator->FreeMemory(allocation);
13256  }
13257 }
13258 
13259 VkResult vmaCreateImage(
13260  VmaAllocator allocator,
13261  const VkImageCreateInfo* pImageCreateInfo,
13262  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13263  VkImage* pImage,
13264  VmaAllocation* pAllocation,
13265  VmaAllocationInfo* pAllocationInfo)
13266 {
13267  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
13268 
13269  VMA_DEBUG_LOG("vmaCreateImage");
13270 
13271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13272 
13273  *pImage = VK_NULL_HANDLE;
13274  *pAllocation = VK_NULL_HANDLE;
13275 
13276  // 1. Create VkImage.
13277  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
13278  allocator->m_hDevice,
13279  pImageCreateInfo,
13280  allocator->GetAllocationCallbacks(),
13281  pImage);
13282  if(res >= 0)
13283  {
13284  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
13285  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
13286  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
13287 
13288  // 2. Allocate memory using allocator.
13289  VkMemoryRequirements vkMemReq = {};
13290  bool requiresDedicatedAllocation = false;
13291  bool prefersDedicatedAllocation = false;
13292  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
13293  requiresDedicatedAllocation, prefersDedicatedAllocation);
13294 
13295  res = allocator->AllocateMemory(
13296  vkMemReq,
13297  requiresDedicatedAllocation,
13298  prefersDedicatedAllocation,
13299  VK_NULL_HANDLE, // dedicatedBuffer
13300  *pImage, // dedicatedImage
13301  *pAllocationCreateInfo,
13302  suballocType,
13303  pAllocation);
13304 
13305 #if VMA_RECORDING_ENABLED
13306  if(allocator->GetRecorder() != VMA_NULL)
13307  {
13308  allocator->GetRecorder()->RecordCreateImage(
13309  allocator->GetCurrentFrameIndex(),
13310  *pImageCreateInfo,
13311  *pAllocationCreateInfo,
13312  *pAllocation);
13313  }
13314 #endif
13315 
13316  if(res >= 0)
13317  {
13318  // 3. Bind image with memory.
13319  res = allocator->BindImageMemory(*pAllocation, *pImage);
13320  if(res >= 0)
13321  {
13322  // All steps succeeded.
13323  #if VMA_STATS_STRING_ENABLED
13324  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
13325  #endif
13326  if(pAllocationInfo != VMA_NULL)
13327  {
13328  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13329  }
13330 
13331  return VK_SUCCESS;
13332  }
13333  allocator->FreeMemory(*pAllocation);
13334  *pAllocation = VK_NULL_HANDLE;
13335  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13336  *pImage = VK_NULL_HANDLE;
13337  return res;
13338  }
13339  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13340  *pImage = VK_NULL_HANDLE;
13341  return res;
13342  }
13343  return res;
13344 }
13345 
13346 void vmaDestroyImage(
13347  VmaAllocator allocator,
13348  VkImage image,
13349  VmaAllocation allocation)
13350 {
13351  VMA_ASSERT(allocator);
13352 
13353  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13354  {
13355  return;
13356  }
13357 
13358  VMA_DEBUG_LOG("vmaDestroyImage");
13359 
13360  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13361 
13362 #if VMA_RECORDING_ENABLED
13363  if(allocator->GetRecorder() != VMA_NULL)
13364  {
13365  allocator->GetRecorder()->RecordDestroyImage(
13366  allocator->GetCurrentFrameIndex(),
13367  allocation);
13368  }
13369 #endif
13370 
13371  if(image != VK_NULL_HANDLE)
13372  {
13373  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
13374  }
13375  if(allocation != VK_NULL_HANDLE)
13376  {
13377  allocator->FreeMemory(allocation);
13378  }
13379 }
13380 
13381 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1430
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1743
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1499
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1461
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1442
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1700
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1434
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2110
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1496
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2355
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1924
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1473
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1994
Definition: vk_mem_alloc.h:1780
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1423
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1823
Definition: vk_mem_alloc.h:1727
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1508
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1561
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1493
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1731
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1633
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1439
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1632
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2359
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1525
VmaStatInfo total
Definition: vk_mem_alloc.h:1642
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2367
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1807
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2350
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1440
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1365
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1502
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1947
Definition: vk_mem_alloc.h:1941
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1568
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2120
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1435
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1459
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1844
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1963
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2000
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1421
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1950
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1678
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2345
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2363
Definition: vk_mem_alloc.h:1717
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1831
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1438
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1638
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1371
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1392
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1463
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1397
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2365
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1818
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:2010
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1431
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1621
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1958
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1384
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1787
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1634
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1388
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1953
Definition: vk_mem_alloc.h:1726
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1437
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1813
Definition: vk_mem_alloc.h:1804
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1624
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1433
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1972
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1511
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2003
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1802
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1837
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1549
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1640
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1767
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1633
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1444
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1481
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1386
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1443
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1986
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1436
Definition: vk_mem_alloc.h:1798
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1489
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2134
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1505
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1633
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1630
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1991
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2115
Definition: vk_mem_alloc.h:1800
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2361
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1429
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1628
Definition: vk_mem_alloc.h:1683
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1943
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1478
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1626
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1441
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1445
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1754
Definition: vk_mem_alloc.h:1710
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2129
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1419
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1432
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:1939
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2096
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1906
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1634
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1453
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1641
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1997
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1634
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2101