Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1359 #include <vulkan/vulkan.h>
1360 
1361 #if !defined(VMA_DEDICATED_ALLOCATION)
1362  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1363  #define VMA_DEDICATED_ALLOCATION 1
1364  #else
1365  #define VMA_DEDICATED_ALLOCATION 0
1366  #endif
1367 #endif
1368 
1378 VK_DEFINE_HANDLE(VmaAllocator)
1379 
1380 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1382  VmaAllocator allocator,
1383  uint32_t memoryType,
1384  VkDeviceMemory memory,
1385  VkDeviceSize size);
1387 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1388  VmaAllocator allocator,
1389  uint32_t memoryType,
1390  VkDeviceMemory memory,
1391  VkDeviceSize size);
1392 
1406 
1436 
1439 typedef VkFlags VmaAllocatorCreateFlags;
1440 
1445 typedef struct VmaVulkanFunctions {
1446  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1447  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1448  PFN_vkAllocateMemory vkAllocateMemory;
1449  PFN_vkFreeMemory vkFreeMemory;
1450  PFN_vkMapMemory vkMapMemory;
1451  PFN_vkUnmapMemory vkUnmapMemory;
1452  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1453  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1454  PFN_vkBindBufferMemory vkBindBufferMemory;
1455  PFN_vkBindImageMemory vkBindImageMemory;
1456  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1457  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1458  PFN_vkCreateBuffer vkCreateBuffer;
1459  PFN_vkDestroyBuffer vkDestroyBuffer;
1460  PFN_vkCreateImage vkCreateImage;
1461  PFN_vkDestroyImage vkDestroyImage;
1462 #if VMA_DEDICATED_ALLOCATION
1463  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1464  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1465 #endif
1467 
1469 typedef enum VmaRecordFlagBits {
1476 
1479 typedef VkFlags VmaRecordFlags;
1480 
1481 /*
1482 Define this macro to 0/1 to disable/enable support for recording functionality,
1483 available through VmaAllocatorCreateInfo::pRecordSettings.
1484 */
1485 #ifndef VMA_RECORDING_ENABLED
1486  #ifdef _WIN32
1487  #define VMA_RECORDING_ENABLED 1
1488  #else
1489  #define VMA_RECORDING_ENABLED 0
1490  #endif
1491 #endif
1492 
1494 typedef struct VmaRecordSettings
1495 {
1497  VmaRecordFlags flags;
1505  const char* pFilePath;
1507 
1510 {
1512  VmaAllocatorCreateFlags flags;
1514 
1515  VkPhysicalDevice physicalDevice;
1517 
1518  VkDevice device;
1520 
1523 
1524  const VkAllocationCallbacks* pAllocationCallbacks;
1526 
1565  const VkDeviceSize* pHeapSizeLimit;
1586 
1588 VkResult vmaCreateAllocator(
1589  const VmaAllocatorCreateInfo* pCreateInfo,
1590  VmaAllocator* pAllocator);
1591 
1593 void vmaDestroyAllocator(
1594  VmaAllocator allocator);
1595 
1601  VmaAllocator allocator,
1602  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1603 
1609  VmaAllocator allocator,
1610  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1611 
1619  VmaAllocator allocator,
1620  uint32_t memoryTypeIndex,
1621  VkMemoryPropertyFlags* pFlags);
1622 
1632  VmaAllocator allocator,
1633  uint32_t frameIndex);
1634 
1637 typedef struct VmaStatInfo
1638 {
1640  uint32_t blockCount;
1646  VkDeviceSize usedBytes;
1648  VkDeviceSize unusedBytes;
1649  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1650  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1651 } VmaStatInfo;
1652 
1654 typedef struct VmaStats
1655 {
1656  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1657  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1659 } VmaStats;
1660 
1662 void vmaCalculateStats(
1663  VmaAllocator allocator,
1664  VmaStats* pStats);
1665 
1666 #define VMA_STATS_STRING_ENABLED 1
1667 
1668 #if VMA_STATS_STRING_ENABLED
1669 
1671 
1673 void vmaBuildStatsString(
1674  VmaAllocator allocator,
1675  char** ppStatsString,
1676  VkBool32 detailedMap);
1677 
1678 void vmaFreeStatsString(
1679  VmaAllocator allocator,
1680  char* pStatsString);
1681 
1682 #endif // #if VMA_STATS_STRING_ENABLED
1683 
1692 VK_DEFINE_HANDLE(VmaPool)
1693 
1694 typedef enum VmaMemoryUsage
1695 {
1744 } VmaMemoryUsage;
1745 
1760 
1815 
1819 
1821 {
1823  VmaAllocationCreateFlags flags;
1834  VkMemoryPropertyFlags requiredFlags;
1839  VkMemoryPropertyFlags preferredFlags;
1847  uint32_t memoryTypeBits;
1860  void* pUserData;
1862 
1879 VkResult vmaFindMemoryTypeIndex(
1880  VmaAllocator allocator,
1881  uint32_t memoryTypeBits,
1882  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1883  uint32_t* pMemoryTypeIndex);
1884 
1898  VmaAllocator allocator,
1899  const VkBufferCreateInfo* pBufferCreateInfo,
1900  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1901  uint32_t* pMemoryTypeIndex);
1902 
1916  VmaAllocator allocator,
1917  const VkImageCreateInfo* pImageCreateInfo,
1918  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1919  uint32_t* pMemoryTypeIndex);
1920 
1941 
1956 
1959 typedef VkFlags VmaPoolCreateFlags;
1960 
1963 typedef struct VmaPoolCreateInfo {
1969  VmaPoolCreateFlags flags;
1978  VkDeviceSize blockSize;
2007 
2010 typedef struct VmaPoolStats {
2013  VkDeviceSize size;
2016  VkDeviceSize unusedSize;
2029  VkDeviceSize unusedRangeSizeMax;
2032  size_t blockCount;
2033 } VmaPoolStats;
2034 
2041 VkResult vmaCreatePool(
2042  VmaAllocator allocator,
2043  const VmaPoolCreateInfo* pCreateInfo,
2044  VmaPool* pPool);
2045 
2048 void vmaDestroyPool(
2049  VmaAllocator allocator,
2050  VmaPool pool);
2051 
2058 void vmaGetPoolStats(
2059  VmaAllocator allocator,
2060  VmaPool pool,
2061  VmaPoolStats* pPoolStats);
2062 
2070  VmaAllocator allocator,
2071  VmaPool pool,
2072  size_t* pLostAllocationCount);
2073 
2088 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2089 
2114 VK_DEFINE_HANDLE(VmaAllocation)
2115 
2116 
2118 typedef struct VmaAllocationInfo {
2123  uint32_t memoryType;
2132  VkDeviceMemory deviceMemory;
2137  VkDeviceSize offset;
2142  VkDeviceSize size;
2156  void* pUserData;
2158 
2169 VkResult vmaAllocateMemory(
2170  VmaAllocator allocator,
2171  const VkMemoryRequirements* pVkMemoryRequirements,
2172  const VmaAllocationCreateInfo* pCreateInfo,
2173  VmaAllocation* pAllocation,
2174  VmaAllocationInfo* pAllocationInfo);
2175 
2183  VmaAllocator allocator,
2184  VkBuffer buffer,
2185  const VmaAllocationCreateInfo* pCreateInfo,
2186  VmaAllocation* pAllocation,
2187  VmaAllocationInfo* pAllocationInfo);
2188 
2190 VkResult vmaAllocateMemoryForImage(
2191  VmaAllocator allocator,
2192  VkImage image,
2193  const VmaAllocationCreateInfo* pCreateInfo,
2194  VmaAllocation* pAllocation,
2195  VmaAllocationInfo* pAllocationInfo);
2196 
2198 void vmaFreeMemory(
2199  VmaAllocator allocator,
2200  VmaAllocation allocation);
2201 
2219  VmaAllocator allocator,
2220  VmaAllocation allocation,
2221  VmaAllocationInfo* pAllocationInfo);
2222 
2237 VkBool32 vmaTouchAllocation(
2238  VmaAllocator allocator,
2239  VmaAllocation allocation);
2240 
2255  VmaAllocator allocator,
2256  VmaAllocation allocation,
2257  void* pUserData);
2258 
2270  VmaAllocator allocator,
2271  VmaAllocation* pAllocation);
2272 
2307 VkResult vmaMapMemory(
2308  VmaAllocator allocator,
2309  VmaAllocation allocation,
2310  void** ppData);
2311 
2316 void vmaUnmapMemory(
2317  VmaAllocator allocator,
2318  VmaAllocation allocation);
2319 
2332 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2333 
2346 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2347 
2364 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2365 
2367 typedef struct VmaDefragmentationInfo {
2372  VkDeviceSize maxBytesToMove;
2379 
2381 typedef struct VmaDefragmentationStats {
2383  VkDeviceSize bytesMoved;
2385  VkDeviceSize bytesFreed;
2391 
2478 VkResult vmaDefragment(
2479  VmaAllocator allocator,
2480  VmaAllocation* pAllocations,
2481  size_t allocationCount,
2482  VkBool32* pAllocationsChanged,
2483  const VmaDefragmentationInfo *pDefragmentationInfo,
2484  VmaDefragmentationStats* pDefragmentationStats);
2485 
2498 VkResult vmaBindBufferMemory(
2499  VmaAllocator allocator,
2500  VmaAllocation allocation,
2501  VkBuffer buffer);
2502 
2515 VkResult vmaBindImageMemory(
2516  VmaAllocator allocator,
2517  VmaAllocation allocation,
2518  VkImage image);
2519 
2546 VkResult vmaCreateBuffer(
2547  VmaAllocator allocator,
2548  const VkBufferCreateInfo* pBufferCreateInfo,
2549  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2550  VkBuffer* pBuffer,
2551  VmaAllocation* pAllocation,
2552  VmaAllocationInfo* pAllocationInfo);
2553 
2565 void vmaDestroyBuffer(
2566  VmaAllocator allocator,
2567  VkBuffer buffer,
2568  VmaAllocation allocation);
2569 
2571 VkResult vmaCreateImage(
2572  VmaAllocator allocator,
2573  const VkImageCreateInfo* pImageCreateInfo,
2574  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2575  VkImage* pImage,
2576  VmaAllocation* pAllocation,
2577  VmaAllocationInfo* pAllocationInfo);
2578 
2590 void vmaDestroyImage(
2591  VmaAllocator allocator,
2592  VkImage image,
2593  VmaAllocation allocation);
2594 
2595 #ifdef __cplusplus
2596 }
2597 #endif
2598 
2599 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2600 
2601 // For Visual Studio IntelliSense.
2602 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2603 #define VMA_IMPLEMENTATION
2604 #endif
2605 
2606 #ifdef VMA_IMPLEMENTATION
2607 #undef VMA_IMPLEMENTATION
2608 
2609 #include <cstdint>
2610 #include <cstdlib>
2611 #include <cstring>
2612 
2613 /*******************************************************************************
2614 CONFIGURATION SECTION
2615 
2616 Define some of these macros before each #include of this header or change them
2617 here if you need other then default behavior depending on your environment.
2618 */
2619 
2620 /*
2621 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2622 internally, like:
2623 
2624  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2625 
2626 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2627 VmaAllocatorCreateInfo::pVulkanFunctions.
2628 */
2629 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2630 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2631 #endif
2632 
2633 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2634 //#define VMA_USE_STL_CONTAINERS 1
2635 
2636 /* Set this macro to 1 to make the library including and using STL containers:
2637 std::pair, std::vector, std::list, std::unordered_map.
2638 
2639 Set it to 0 or undefined to make the library using its own implementation of
2640 the containers.
2641 */
2642 #if VMA_USE_STL_CONTAINERS
2643  #define VMA_USE_STL_VECTOR 1
2644  #define VMA_USE_STL_UNORDERED_MAP 1
2645  #define VMA_USE_STL_LIST 1
2646 #endif
2647 
2648 #if VMA_USE_STL_VECTOR
2649  #include <vector>
2650 #endif
2651 
2652 #if VMA_USE_STL_UNORDERED_MAP
2653  #include <unordered_map>
2654 #endif
2655 
2656 #if VMA_USE_STL_LIST
2657  #include <list>
2658 #endif
2659 
2660 /*
2661 Following headers are used in this CONFIGURATION section only, so feel free to
2662 remove them if not needed.
2663 */
2664 #include <cassert> // for assert
2665 #include <algorithm> // for min, max
2666 #include <mutex> // for std::mutex
2667 #include <atomic> // for std::atomic
2668 
2669 #ifndef VMA_NULL
2670  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2671  #define VMA_NULL nullptr
2672 #endif
2673 
2674 #if defined(__APPLE__) || defined(__ANDROID__)
2675 #include <cstdlib>
2676 void *aligned_alloc(size_t alignment, size_t size)
2677 {
2678  // alignment must be >= sizeof(void*)
2679  if(alignment < sizeof(void*))
2680  {
2681  alignment = sizeof(void*);
2682  }
2683 
2684  void *pointer;
2685  if(posix_memalign(&pointer, alignment, size) == 0)
2686  return pointer;
2687  return VMA_NULL;
2688 }
2689 #endif
2690 
2691 // If your compiler is not compatible with C++11 and definition of
2692 // aligned_alloc() function is missing, uncommeting following line may help:
2693 
2694 //#include <malloc.h>
2695 
2696 // Normal assert to check for programmer's errors, especially in Debug configuration.
2697 #ifndef VMA_ASSERT
2698  #ifdef _DEBUG
2699  #define VMA_ASSERT(expr) assert(expr)
2700  #else
2701  #define VMA_ASSERT(expr)
2702  #endif
2703 #endif
2704 
2705 // Assert that will be called very often, like inside data structures e.g. operator[].
2706 // Making it non-empty can make program slow.
2707 #ifndef VMA_HEAVY_ASSERT
2708  #ifdef _DEBUG
2709  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2710  #else
2711  #define VMA_HEAVY_ASSERT(expr)
2712  #endif
2713 #endif
2714 
2715 #ifndef VMA_ALIGN_OF
2716  #define VMA_ALIGN_OF(type) (__alignof(type))
2717 #endif
2718 
2719 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2720  #if defined(_WIN32)
2721  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2722  #else
2723  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2724  #endif
2725 #endif
2726 
2727 #ifndef VMA_SYSTEM_FREE
2728  #if defined(_WIN32)
2729  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2730  #else
2731  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2732  #endif
2733 #endif
2734 
2735 #ifndef VMA_MIN
2736  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2737 #endif
2738 
2739 #ifndef VMA_MAX
2740  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2741 #endif
2742 
2743 #ifndef VMA_SWAP
2744  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2745 #endif
2746 
2747 #ifndef VMA_SORT
2748  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2749 #endif
2750 
2751 #ifndef VMA_DEBUG_LOG
2752  #define VMA_DEBUG_LOG(format, ...)
2753  /*
2754  #define VMA_DEBUG_LOG(format, ...) do { \
2755  printf(format, __VA_ARGS__); \
2756  printf("\n"); \
2757  } while(false)
2758  */
2759 #endif
2760 
2761 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2762 #if VMA_STATS_STRING_ENABLED
2763  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2764  {
2765  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2766  }
2767  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2768  {
2769  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2770  }
2771  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2772  {
2773  snprintf(outStr, strLen, "%p", ptr);
2774  }
2775 #endif
2776 
2777 #ifndef VMA_MUTEX
2778  class VmaMutex
2779  {
2780  public:
2781  VmaMutex() { }
2782  ~VmaMutex() { }
2783  void Lock() { m_Mutex.lock(); }
2784  void Unlock() { m_Mutex.unlock(); }
2785  private:
2786  std::mutex m_Mutex;
2787  };
2788  #define VMA_MUTEX VmaMutex
2789 #endif
2790 
2791 /*
2792 If providing your own implementation, you need to implement a subset of std::atomic:
2793 
2794 - Constructor(uint32_t desired)
2795 - uint32_t load() const
2796 - void store(uint32_t desired)
2797 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2798 */
2799 #ifndef VMA_ATOMIC_UINT32
2800  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2801 #endif
2802 
2803 #ifndef VMA_BEST_FIT
2804 
2816  #define VMA_BEST_FIT (1)
2817 #endif
2818 
2819 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2820 
2824  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2825 #endif
2826 
2827 #ifndef VMA_DEBUG_ALIGNMENT
2828 
2832  #define VMA_DEBUG_ALIGNMENT (1)
2833 #endif
2834 
2835 #ifndef VMA_DEBUG_MARGIN
2836 
2840  #define VMA_DEBUG_MARGIN (0)
2841 #endif
2842 
2843 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2844 
2848  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2849 #endif
2850 
2851 #ifndef VMA_DEBUG_DETECT_CORRUPTION
2852 
2857  #define VMA_DEBUG_DETECT_CORRUPTION (0)
2858 #endif
2859 
2860 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2861 
2865  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2866 #endif
2867 
2868 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2869 
2873  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2874 #endif
2875 
2876 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2877  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2879 #endif
2880 
2881 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2882  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2884 #endif
2885 
2886 #ifndef VMA_CLASS_NO_COPY
2887  #define VMA_CLASS_NO_COPY(className) \
2888  private: \
2889  className(const className&) = delete; \
2890  className& operator=(const className&) = delete;
2891 #endif
2892 
2893 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2894 
2895 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
2896 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
2897 
2898 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
2899 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
2900 
2901 /*******************************************************************************
2902 END OF CONFIGURATION
2903 */
2904 
2905 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2906  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2907 
2908 // Returns number of bits set to 1 in (v).
2909 static inline uint32_t VmaCountBitsSet(uint32_t v)
2910 {
2911  uint32_t c = v - ((v >> 1) & 0x55555555);
2912  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2913  c = ((c >> 4) + c) & 0x0F0F0F0F;
2914  c = ((c >> 8) + c) & 0x00FF00FF;
2915  c = ((c >> 16) + c) & 0x0000FFFF;
2916  return c;
2917 }
2918 
2919 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2920 // Use types like uint32_t, uint64_t as T.
2921 template <typename T>
2922 static inline T VmaAlignUp(T val, T align)
2923 {
2924  return (val + align - 1) / align * align;
2925 }
2926 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
2927 // Use types like uint32_t, uint64_t as T.
2928 template <typename T>
2929 static inline T VmaAlignDown(T val, T align)
2930 {
2931  return val / align * align;
2932 }
2933 
2934 // Division with mathematical rounding to nearest number.
2935 template <typename T>
2936 inline T VmaRoundDiv(T x, T y)
2937 {
2938  return (x + (y / (T)2)) / y;
2939 }
2940 
2941 static inline bool VmaStrIsEmpty(const char* pStr)
2942 {
2943  return pStr == VMA_NULL || *pStr == '\0';
2944 }
2945 
2946 #ifndef VMA_SORT
2947 
2948 template<typename Iterator, typename Compare>
2949 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2950 {
2951  Iterator centerValue = end; --centerValue;
2952  Iterator insertIndex = beg;
2953  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2954  {
2955  if(cmp(*memTypeIndex, *centerValue))
2956  {
2957  if(insertIndex != memTypeIndex)
2958  {
2959  VMA_SWAP(*memTypeIndex, *insertIndex);
2960  }
2961  ++insertIndex;
2962  }
2963  }
2964  if(insertIndex != centerValue)
2965  {
2966  VMA_SWAP(*insertIndex, *centerValue);
2967  }
2968  return insertIndex;
2969 }
2970 
2971 template<typename Iterator, typename Compare>
2972 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2973 {
2974  if(beg < end)
2975  {
2976  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2977  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2978  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2979  }
2980 }
2981 
2982 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2983 
2984 #endif // #ifndef VMA_SORT
2985 
2986 /*
2987 Returns true if two memory blocks occupy overlapping pages.
2988 ResourceA must be in less memory offset than ResourceB.
2989 
2990 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2991 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2992 */
2993 static inline bool VmaBlocksOnSamePage(
2994  VkDeviceSize resourceAOffset,
2995  VkDeviceSize resourceASize,
2996  VkDeviceSize resourceBOffset,
2997  VkDeviceSize pageSize)
2998 {
2999  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3000  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3001  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3002  VkDeviceSize resourceBStart = resourceBOffset;
3003  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3004  return resourceAEndPage == resourceBStartPage;
3005 }
3006 
3007 enum VmaSuballocationType
3008 {
3009  VMA_SUBALLOCATION_TYPE_FREE = 0,
3010  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3011  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3012  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3013  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3014  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3015  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3016 };
3017 
3018 /*
3019 Returns true if given suballocation types could conflict and must respect
3020 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3021 or linear image and another one is optimal image. If type is unknown, behave
3022 conservatively.
3023 */
3024 static inline bool VmaIsBufferImageGranularityConflict(
3025  VmaSuballocationType suballocType1,
3026  VmaSuballocationType suballocType2)
3027 {
3028  if(suballocType1 > suballocType2)
3029  {
3030  VMA_SWAP(suballocType1, suballocType2);
3031  }
3032 
3033  switch(suballocType1)
3034  {
3035  case VMA_SUBALLOCATION_TYPE_FREE:
3036  return false;
3037  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3038  return true;
3039  case VMA_SUBALLOCATION_TYPE_BUFFER:
3040  return
3041  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3042  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3043  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3044  return
3045  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3046  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3047  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3048  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3049  return
3050  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3051  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3052  return false;
3053  default:
3054  VMA_ASSERT(0);
3055  return true;
3056  }
3057 }
3058 
3059 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3060 {
3061  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3062  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3063  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3064  {
3065  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3066  }
3067 }
3068 
3069 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3070 {
3071  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3072  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3073  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3074  {
3075  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3076  {
3077  return false;
3078  }
3079  }
3080  return true;
3081 }
3082 
3083 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3084 struct VmaMutexLock
3085 {
3086  VMA_CLASS_NO_COPY(VmaMutexLock)
3087 public:
3088  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3089  m_pMutex(useMutex ? &mutex : VMA_NULL)
3090  {
3091  if(m_pMutex)
3092  {
3093  m_pMutex->Lock();
3094  }
3095  }
3096 
3097  ~VmaMutexLock()
3098  {
3099  if(m_pMutex)
3100  {
3101  m_pMutex->Unlock();
3102  }
3103  }
3104 
3105 private:
3106  VMA_MUTEX* m_pMutex;
3107 };
3108 
3109 #if VMA_DEBUG_GLOBAL_MUTEX
3110  static VMA_MUTEX gDebugGlobalMutex;
3111  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3112 #else
3113  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3114 #endif
3115 
3116 // Minimum size of a free suballocation to register it in the free suballocation collection.
3117 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3118 
3119 /*
3120 Performs binary search and returns iterator to first element that is greater or
3121 equal to (key), according to comparison (cmp).
3122 
3123 Cmp should return true if first argument is less than second argument.
3124 
3125 Returned value is the found element, if present in the collection or place where
3126 new element with value (key) should be inserted.
3127 */
3128 template <typename CmpLess, typename IterT, typename KeyT>
3129 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3130 {
3131  size_t down = 0, up = (end - beg);
3132  while(down < up)
3133  {
3134  const size_t mid = (down + up) / 2;
3135  if(cmp(*(beg+mid), key))
3136  {
3137  down = mid + 1;
3138  }
3139  else
3140  {
3141  up = mid;
3142  }
3143  }
3144  return beg + down;
3145 }
3146 
3148 // Memory allocation
3149 
3150 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3151 {
3152  if((pAllocationCallbacks != VMA_NULL) &&
3153  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3154  {
3155  return (*pAllocationCallbacks->pfnAllocation)(
3156  pAllocationCallbacks->pUserData,
3157  size,
3158  alignment,
3159  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3160  }
3161  else
3162  {
3163  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3164  }
3165 }
3166 
3167 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3168 {
3169  if((pAllocationCallbacks != VMA_NULL) &&
3170  (pAllocationCallbacks->pfnFree != VMA_NULL))
3171  {
3172  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3173  }
3174  else
3175  {
3176  VMA_SYSTEM_FREE(ptr);
3177  }
3178 }
3179 
3180 template<typename T>
3181 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3182 {
3183  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3184 }
3185 
3186 template<typename T>
3187 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3188 {
3189  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3190 }
3191 
3192 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3193 
3194 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3195 
3196 template<typename T>
3197 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3198 {
3199  ptr->~T();
3200  VmaFree(pAllocationCallbacks, ptr);
3201 }
3202 
3203 template<typename T>
3204 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3205 {
3206  if(ptr != VMA_NULL)
3207  {
3208  for(size_t i = count; i--; )
3209  {
3210  ptr[i].~T();
3211  }
3212  VmaFree(pAllocationCallbacks, ptr);
3213  }
3214 }
3215 
3216 // STL-compatible allocator.
3217 template<typename T>
3218 class VmaStlAllocator
3219 {
3220 public:
3221  const VkAllocationCallbacks* const m_pCallbacks;
3222  typedef T value_type;
3223 
3224  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3225  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3226 
3227  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3228  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3229 
3230  template<typename U>
3231  bool operator==(const VmaStlAllocator<U>& rhs) const
3232  {
3233  return m_pCallbacks == rhs.m_pCallbacks;
3234  }
3235  template<typename U>
3236  bool operator!=(const VmaStlAllocator<U>& rhs) const
3237  {
3238  return m_pCallbacks != rhs.m_pCallbacks;
3239  }
3240 
3241  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3242 };
3243 
3244 #if VMA_USE_STL_VECTOR
3245 
3246 #define VmaVector std::vector
3247 
3248 template<typename T, typename allocatorT>
3249 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3250 {
3251  vec.insert(vec.begin() + index, item);
3252 }
3253 
3254 template<typename T, typename allocatorT>
3255 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3256 {
3257  vec.erase(vec.begin() + index);
3258 }
3259 
3260 #else // #if VMA_USE_STL_VECTOR
3261 
3262 /* Class with interface compatible with subset of std::vector.
3263 T must be POD because constructors and destructors are not called and memcpy is
3264 used for these objects. */
3265 template<typename T, typename AllocatorT>
3266 class VmaVector
3267 {
3268 public:
3269  typedef T value_type;
3270 
3271  VmaVector(const AllocatorT& allocator) :
3272  m_Allocator(allocator),
3273  m_pArray(VMA_NULL),
3274  m_Count(0),
3275  m_Capacity(0)
3276  {
3277  }
3278 
3279  VmaVector(size_t count, const AllocatorT& allocator) :
3280  m_Allocator(allocator),
3281  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3282  m_Count(count),
3283  m_Capacity(count)
3284  {
3285  }
3286 
3287  VmaVector(const VmaVector<T, AllocatorT>& src) :
3288  m_Allocator(src.m_Allocator),
3289  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3290  m_Count(src.m_Count),
3291  m_Capacity(src.m_Count)
3292  {
3293  if(m_Count != 0)
3294  {
3295  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3296  }
3297  }
3298 
3299  ~VmaVector()
3300  {
3301  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3302  }
3303 
3304  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3305  {
3306  if(&rhs != this)
3307  {
3308  resize(rhs.m_Count);
3309  if(m_Count != 0)
3310  {
3311  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3312  }
3313  }
3314  return *this;
3315  }
3316 
3317  bool empty() const { return m_Count == 0; }
3318  size_t size() const { return m_Count; }
3319  T* data() { return m_pArray; }
3320  const T* data() const { return m_pArray; }
3321 
3322  T& operator[](size_t index)
3323  {
3324  VMA_HEAVY_ASSERT(index < m_Count);
3325  return m_pArray[index];
3326  }
3327  const T& operator[](size_t index) const
3328  {
3329  VMA_HEAVY_ASSERT(index < m_Count);
3330  return m_pArray[index];
3331  }
3332 
3333  T& front()
3334  {
3335  VMA_HEAVY_ASSERT(m_Count > 0);
3336  return m_pArray[0];
3337  }
3338  const T& front() const
3339  {
3340  VMA_HEAVY_ASSERT(m_Count > 0);
3341  return m_pArray[0];
3342  }
3343  T& back()
3344  {
3345  VMA_HEAVY_ASSERT(m_Count > 0);
3346  return m_pArray[m_Count - 1];
3347  }
3348  const T& back() const
3349  {
3350  VMA_HEAVY_ASSERT(m_Count > 0);
3351  return m_pArray[m_Count - 1];
3352  }
3353 
3354  void reserve(size_t newCapacity, bool freeMemory = false)
3355  {
3356  newCapacity = VMA_MAX(newCapacity, m_Count);
3357 
3358  if((newCapacity < m_Capacity) && !freeMemory)
3359  {
3360  newCapacity = m_Capacity;
3361  }
3362 
3363  if(newCapacity != m_Capacity)
3364  {
3365  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3366  if(m_Count != 0)
3367  {
3368  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3369  }
3370  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3371  m_Capacity = newCapacity;
3372  m_pArray = newArray;
3373  }
3374  }
3375 
3376  void resize(size_t newCount, bool freeMemory = false)
3377  {
3378  size_t newCapacity = m_Capacity;
3379  if(newCount > m_Capacity)
3380  {
3381  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3382  }
3383  else if(freeMemory)
3384  {
3385  newCapacity = newCount;
3386  }
3387 
3388  if(newCapacity != m_Capacity)
3389  {
3390  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3391  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3392  if(elementsToCopy != 0)
3393  {
3394  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3395  }
3396  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3397  m_Capacity = newCapacity;
3398  m_pArray = newArray;
3399  }
3400 
3401  m_Count = newCount;
3402  }
3403 
3404  void clear(bool freeMemory = false)
3405  {
3406  resize(0, freeMemory);
3407  }
3408 
3409  void insert(size_t index, const T& src)
3410  {
3411  VMA_HEAVY_ASSERT(index <= m_Count);
3412  const size_t oldCount = size();
3413  resize(oldCount + 1);
3414  if(index < oldCount)
3415  {
3416  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
3417  }
3418  m_pArray[index] = src;
3419  }
3420 
3421  void remove(size_t index)
3422  {
3423  VMA_HEAVY_ASSERT(index < m_Count);
3424  const size_t oldCount = size();
3425  if(index < oldCount - 1)
3426  {
3427  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
3428  }
3429  resize(oldCount - 1);
3430  }
3431 
3432  void push_back(const T& src)
3433  {
3434  const size_t newIndex = size();
3435  resize(newIndex + 1);
3436  m_pArray[newIndex] = src;
3437  }
3438 
3439  void pop_back()
3440  {
3441  VMA_HEAVY_ASSERT(m_Count > 0);
3442  resize(size() - 1);
3443  }
3444 
3445  void push_front(const T& src)
3446  {
3447  insert(0, src);
3448  }
3449 
3450  void pop_front()
3451  {
3452  VMA_HEAVY_ASSERT(m_Count > 0);
3453  remove(0);
3454  }
3455 
3456  typedef T* iterator;
3457 
3458  iterator begin() { return m_pArray; }
3459  iterator end() { return m_pArray + m_Count; }
3460 
3461 private:
3462  AllocatorT m_Allocator;
3463  T* m_pArray;
3464  size_t m_Count;
3465  size_t m_Capacity;
3466 };
3467 
3468 template<typename T, typename allocatorT>
3469 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3470 {
3471  vec.insert(index, item);
3472 }
3473 
3474 template<typename T, typename allocatorT>
3475 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3476 {
3477  vec.remove(index);
3478 }
3479 
3480 #endif // #if VMA_USE_STL_VECTOR
3481 
3482 template<typename CmpLess, typename VectorT>
3483 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3484 {
3485  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3486  vector.data(),
3487  vector.data() + vector.size(),
3488  value,
3489  CmpLess()) - vector.data();
3490  VmaVectorInsert(vector, indexToInsert, value);
3491  return indexToInsert;
3492 }
3493 
3494 template<typename CmpLess, typename VectorT>
3495 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3496 {
3497  CmpLess comparator;
3498  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3499  vector.begin(),
3500  vector.end(),
3501  value,
3502  comparator);
3503  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3504  {
3505  size_t indexToRemove = it - vector.begin();
3506  VmaVectorRemove(vector, indexToRemove);
3507  return true;
3508  }
3509  return false;
3510 }
3511 
3512 template<typename CmpLess, typename IterT, typename KeyT>
3513 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
3514 {
3515  CmpLess comparator;
3516  typename IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3517  beg, end, value, comparator);
3518  if(it == end ||
3519  !comparator(*it, value) && !comparator(value, *it))
3520  {
3521  return it;
3522  }
3523  return end;
3524 }
3525 
3527 // class VmaPoolAllocator
3528 
3529 /*
3530 Allocator for objects of type T using a list of arrays (pools) to speed up
3531 allocation. Number of elements that can be allocated is not bounded because
3532 allocator can create multiple blocks.
3533 */
3534 template<typename T>
3535 class VmaPoolAllocator
3536 {
3537  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3538 public:
3539  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3540  ~VmaPoolAllocator();
3541  void Clear();
3542  T* Alloc();
3543  void Free(T* ptr);
3544 
3545 private:
3546  union Item
3547  {
3548  uint32_t NextFreeIndex;
3549  T Value;
3550  };
3551 
3552  struct ItemBlock
3553  {
3554  Item* pItems;
3555  uint32_t FirstFreeIndex;
3556  };
3557 
3558  const VkAllocationCallbacks* m_pAllocationCallbacks;
3559  size_t m_ItemsPerBlock;
3560  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3561 
3562  ItemBlock& CreateNewBlock();
3563 };
3564 
3565 template<typename T>
3566 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3567  m_pAllocationCallbacks(pAllocationCallbacks),
3568  m_ItemsPerBlock(itemsPerBlock),
3569  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3570 {
3571  VMA_ASSERT(itemsPerBlock > 0);
3572 }
3573 
3574 template<typename T>
3575 VmaPoolAllocator<T>::~VmaPoolAllocator()
3576 {
3577  Clear();
3578 }
3579 
3580 template<typename T>
3581 void VmaPoolAllocator<T>::Clear()
3582 {
3583  for(size_t i = m_ItemBlocks.size(); i--; )
3584  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3585  m_ItemBlocks.clear();
3586 }
3587 
3588 template<typename T>
3589 T* VmaPoolAllocator<T>::Alloc()
3590 {
3591  for(size_t i = m_ItemBlocks.size(); i--; )
3592  {
3593  ItemBlock& block = m_ItemBlocks[i];
3594  // This block has some free items: Use first one.
3595  if(block.FirstFreeIndex != UINT32_MAX)
3596  {
3597  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3598  block.FirstFreeIndex = pItem->NextFreeIndex;
3599  return &pItem->Value;
3600  }
3601  }
3602 
3603  // No block has free item: Create new one and use it.
3604  ItemBlock& newBlock = CreateNewBlock();
3605  Item* const pItem = &newBlock.pItems[0];
3606  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3607  return &pItem->Value;
3608 }
3609 
3610 template<typename T>
3611 void VmaPoolAllocator<T>::Free(T* ptr)
3612 {
3613  // Search all memory blocks to find ptr.
3614  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3615  {
3616  ItemBlock& block = m_ItemBlocks[i];
3617 
3618  // Casting to union.
3619  Item* pItemPtr;
3620  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3621 
3622  // Check if pItemPtr is in address range of this block.
3623  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3624  {
3625  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3626  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3627  block.FirstFreeIndex = index;
3628  return;
3629  }
3630  }
3631  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3632 }
3633 
3634 template<typename T>
3635 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3636 {
3637  ItemBlock newBlock = {
3638  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3639 
3640  m_ItemBlocks.push_back(newBlock);
3641 
3642  // Setup singly-linked list of all free items in this block.
3643  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3644  newBlock.pItems[i].NextFreeIndex = i + 1;
3645  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3646  return m_ItemBlocks.back();
3647 }
3648 
3650 // class VmaRawList, VmaList
3651 
3652 #if VMA_USE_STL_LIST
3653 
3654 #define VmaList std::list
3655 
3656 #else // #if VMA_USE_STL_LIST
3657 
3658 template<typename T>
3659 struct VmaListItem
3660 {
3661  VmaListItem* pPrev;
3662  VmaListItem* pNext;
3663  T Value;
3664 };
3665 
3666 // Doubly linked list.
3667 template<typename T>
3668 class VmaRawList
3669 {
3670  VMA_CLASS_NO_COPY(VmaRawList)
3671 public:
3672  typedef VmaListItem<T> ItemType;
3673 
3674  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3675  ~VmaRawList();
3676  void Clear();
3677 
3678  size_t GetCount() const { return m_Count; }
3679  bool IsEmpty() const { return m_Count == 0; }
3680 
3681  ItemType* Front() { return m_pFront; }
3682  const ItemType* Front() const { return m_pFront; }
3683  ItemType* Back() { return m_pBack; }
3684  const ItemType* Back() const { return m_pBack; }
3685 
3686  ItemType* PushBack();
3687  ItemType* PushFront();
3688  ItemType* PushBack(const T& value);
3689  ItemType* PushFront(const T& value);
3690  void PopBack();
3691  void PopFront();
3692 
3693  // Item can be null - it means PushBack.
3694  ItemType* InsertBefore(ItemType* pItem);
3695  // Item can be null - it means PushFront.
3696  ItemType* InsertAfter(ItemType* pItem);
3697 
3698  ItemType* InsertBefore(ItemType* pItem, const T& value);
3699  ItemType* InsertAfter(ItemType* pItem, const T& value);
3700 
3701  void Remove(ItemType* pItem);
3702 
3703 private:
3704  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3705  VmaPoolAllocator<ItemType> m_ItemAllocator;
3706  ItemType* m_pFront;
3707  ItemType* m_pBack;
3708  size_t m_Count;
3709 };
3710 
3711 template<typename T>
3712 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3713  m_pAllocationCallbacks(pAllocationCallbacks),
3714  m_ItemAllocator(pAllocationCallbacks, 128),
3715  m_pFront(VMA_NULL),
3716  m_pBack(VMA_NULL),
3717  m_Count(0)
3718 {
3719 }
3720 
3721 template<typename T>
3722 VmaRawList<T>::~VmaRawList()
3723 {
3724  // Intentionally not calling Clear, because that would be unnecessary
3725  // computations to return all items to m_ItemAllocator as free.
3726 }
3727 
3728 template<typename T>
3729 void VmaRawList<T>::Clear()
3730 {
3731  if(IsEmpty() == false)
3732  {
3733  ItemType* pItem = m_pBack;
3734  while(pItem != VMA_NULL)
3735  {
3736  ItemType* const pPrevItem = pItem->pPrev;
3737  m_ItemAllocator.Free(pItem);
3738  pItem = pPrevItem;
3739  }
3740  m_pFront = VMA_NULL;
3741  m_pBack = VMA_NULL;
3742  m_Count = 0;
3743  }
3744 }
3745 
3746 template<typename T>
3747 VmaListItem<T>* VmaRawList<T>::PushBack()
3748 {
3749  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3750  pNewItem->pNext = VMA_NULL;
3751  if(IsEmpty())
3752  {
3753  pNewItem->pPrev = VMA_NULL;
3754  m_pFront = pNewItem;
3755  m_pBack = pNewItem;
3756  m_Count = 1;
3757  }
3758  else
3759  {
3760  pNewItem->pPrev = m_pBack;
3761  m_pBack->pNext = pNewItem;
3762  m_pBack = pNewItem;
3763  ++m_Count;
3764  }
3765  return pNewItem;
3766 }
3767 
3768 template<typename T>
3769 VmaListItem<T>* VmaRawList<T>::PushFront()
3770 {
3771  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3772  pNewItem->pPrev = VMA_NULL;
3773  if(IsEmpty())
3774  {
3775  pNewItem->pNext = VMA_NULL;
3776  m_pFront = pNewItem;
3777  m_pBack = pNewItem;
3778  m_Count = 1;
3779  }
3780  else
3781  {
3782  pNewItem->pNext = m_pFront;
3783  m_pFront->pPrev = pNewItem;
3784  m_pFront = pNewItem;
3785  ++m_Count;
3786  }
3787  return pNewItem;
3788 }
3789 
3790 template<typename T>
3791 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3792 {
3793  ItemType* const pNewItem = PushBack();
3794  pNewItem->Value = value;
3795  return pNewItem;
3796 }
3797 
3798 template<typename T>
3799 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3800 {
3801  ItemType* const pNewItem = PushFront();
3802  pNewItem->Value = value;
3803  return pNewItem;
3804 }
3805 
3806 template<typename T>
3807 void VmaRawList<T>::PopBack()
3808 {
3809  VMA_HEAVY_ASSERT(m_Count > 0);
3810  ItemType* const pBackItem = m_pBack;
3811  ItemType* const pPrevItem = pBackItem->pPrev;
3812  if(pPrevItem != VMA_NULL)
3813  {
3814  pPrevItem->pNext = VMA_NULL;
3815  }
3816  m_pBack = pPrevItem;
3817  m_ItemAllocator.Free(pBackItem);
3818  --m_Count;
3819 }
3820 
3821 template<typename T>
3822 void VmaRawList<T>::PopFront()
3823 {
3824  VMA_HEAVY_ASSERT(m_Count > 0);
3825  ItemType* const pFrontItem = m_pFront;
3826  ItemType* const pNextItem = pFrontItem->pNext;
3827  if(pNextItem != VMA_NULL)
3828  {
3829  pNextItem->pPrev = VMA_NULL;
3830  }
3831  m_pFront = pNextItem;
3832  m_ItemAllocator.Free(pFrontItem);
3833  --m_Count;
3834 }
3835 
3836 template<typename T>
3837 void VmaRawList<T>::Remove(ItemType* pItem)
3838 {
3839  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3840  VMA_HEAVY_ASSERT(m_Count > 0);
3841 
3842  if(pItem->pPrev != VMA_NULL)
3843  {
3844  pItem->pPrev->pNext = pItem->pNext;
3845  }
3846  else
3847  {
3848  VMA_HEAVY_ASSERT(m_pFront == pItem);
3849  m_pFront = pItem->pNext;
3850  }
3851 
3852  if(pItem->pNext != VMA_NULL)
3853  {
3854  pItem->pNext->pPrev = pItem->pPrev;
3855  }
3856  else
3857  {
3858  VMA_HEAVY_ASSERT(m_pBack == pItem);
3859  m_pBack = pItem->pPrev;
3860  }
3861 
3862  m_ItemAllocator.Free(pItem);
3863  --m_Count;
3864 }
3865 
3866 template<typename T>
3867 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3868 {
3869  if(pItem != VMA_NULL)
3870  {
3871  ItemType* const prevItem = pItem->pPrev;
3872  ItemType* const newItem = m_ItemAllocator.Alloc();
3873  newItem->pPrev = prevItem;
3874  newItem->pNext = pItem;
3875  pItem->pPrev = newItem;
3876  if(prevItem != VMA_NULL)
3877  {
3878  prevItem->pNext = newItem;
3879  }
3880  else
3881  {
3882  VMA_HEAVY_ASSERT(m_pFront == pItem);
3883  m_pFront = newItem;
3884  }
3885  ++m_Count;
3886  return newItem;
3887  }
3888  else
3889  return PushBack();
3890 }
3891 
3892 template<typename T>
3893 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3894 {
3895  if(pItem != VMA_NULL)
3896  {
3897  ItemType* const nextItem = pItem->pNext;
3898  ItemType* const newItem = m_ItemAllocator.Alloc();
3899  newItem->pNext = nextItem;
3900  newItem->pPrev = pItem;
3901  pItem->pNext = newItem;
3902  if(nextItem != VMA_NULL)
3903  {
3904  nextItem->pPrev = newItem;
3905  }
3906  else
3907  {
3908  VMA_HEAVY_ASSERT(m_pBack == pItem);
3909  m_pBack = newItem;
3910  }
3911  ++m_Count;
3912  return newItem;
3913  }
3914  else
3915  return PushFront();
3916 }
3917 
3918 template<typename T>
3919 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3920 {
3921  ItemType* const newItem = InsertBefore(pItem);
3922  newItem->Value = value;
3923  return newItem;
3924 }
3925 
3926 template<typename T>
3927 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3928 {
3929  ItemType* const newItem = InsertAfter(pItem);
3930  newItem->Value = value;
3931  return newItem;
3932 }
3933 
3934 template<typename T, typename AllocatorT>
3935 class VmaList
3936 {
3937  VMA_CLASS_NO_COPY(VmaList)
3938 public:
3939  class iterator
3940  {
3941  public:
3942  iterator() :
3943  m_pList(VMA_NULL),
3944  m_pItem(VMA_NULL)
3945  {
3946  }
3947 
3948  T& operator*() const
3949  {
3950  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3951  return m_pItem->Value;
3952  }
3953  T* operator->() const
3954  {
3955  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3956  return &m_pItem->Value;
3957  }
3958 
3959  iterator& operator++()
3960  {
3961  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3962  m_pItem = m_pItem->pNext;
3963  return *this;
3964  }
3965  iterator& operator--()
3966  {
3967  if(m_pItem != VMA_NULL)
3968  {
3969  m_pItem = m_pItem->pPrev;
3970  }
3971  else
3972  {
3973  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3974  m_pItem = m_pList->Back();
3975  }
3976  return *this;
3977  }
3978 
3979  iterator operator++(int)
3980  {
3981  iterator result = *this;
3982  ++*this;
3983  return result;
3984  }
3985  iterator operator--(int)
3986  {
3987  iterator result = *this;
3988  --*this;
3989  return result;
3990  }
3991 
3992  bool operator==(const iterator& rhs) const
3993  {
3994  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3995  return m_pItem == rhs.m_pItem;
3996  }
3997  bool operator!=(const iterator& rhs) const
3998  {
3999  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4000  return m_pItem != rhs.m_pItem;
4001  }
4002 
4003  private:
4004  VmaRawList<T>* m_pList;
4005  VmaListItem<T>* m_pItem;
4006 
4007  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4008  m_pList(pList),
4009  m_pItem(pItem)
4010  {
4011  }
4012 
4013  friend class VmaList<T, AllocatorT>;
4014  };
4015 
4016  class const_iterator
4017  {
4018  public:
4019  const_iterator() :
4020  m_pList(VMA_NULL),
4021  m_pItem(VMA_NULL)
4022  {
4023  }
4024 
4025  const_iterator(const iterator& src) :
4026  m_pList(src.m_pList),
4027  m_pItem(src.m_pItem)
4028  {
4029  }
4030 
4031  const T& operator*() const
4032  {
4033  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4034  return m_pItem->Value;
4035  }
4036  const T* operator->() const
4037  {
4038  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4039  return &m_pItem->Value;
4040  }
4041 
4042  const_iterator& operator++()
4043  {
4044  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4045  m_pItem = m_pItem->pNext;
4046  return *this;
4047  }
4048  const_iterator& operator--()
4049  {
4050  if(m_pItem != VMA_NULL)
4051  {
4052  m_pItem = m_pItem->pPrev;
4053  }
4054  else
4055  {
4056  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4057  m_pItem = m_pList->Back();
4058  }
4059  return *this;
4060  }
4061 
4062  const_iterator operator++(int)
4063  {
4064  const_iterator result = *this;
4065  ++*this;
4066  return result;
4067  }
4068  const_iterator operator--(int)
4069  {
4070  const_iterator result = *this;
4071  --*this;
4072  return result;
4073  }
4074 
4075  bool operator==(const const_iterator& rhs) const
4076  {
4077  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4078  return m_pItem == rhs.m_pItem;
4079  }
4080  bool operator!=(const const_iterator& rhs) const
4081  {
4082  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4083  return m_pItem != rhs.m_pItem;
4084  }
4085 
4086  private:
4087  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4088  m_pList(pList),
4089  m_pItem(pItem)
4090  {
4091  }
4092 
4093  const VmaRawList<T>* m_pList;
4094  const VmaListItem<T>* m_pItem;
4095 
4096  friend class VmaList<T, AllocatorT>;
4097  };
4098 
4099  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4100 
4101  bool empty() const { return m_RawList.IsEmpty(); }
4102  size_t size() const { return m_RawList.GetCount(); }
4103 
4104  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4105  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4106 
4107  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4108  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4109 
4110  void clear() { m_RawList.Clear(); }
4111  void push_back(const T& value) { m_RawList.PushBack(value); }
4112  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4113  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4114 
4115 private:
4116  VmaRawList<T> m_RawList;
4117 };
4118 
4119 #endif // #if VMA_USE_STL_LIST
4120 
4122 // class VmaMap
4123 
4124 // Unused in this version.
4125 #if 0
4126 
4127 #if VMA_USE_STL_UNORDERED_MAP
4128 
4129 #define VmaPair std::pair
4130 
4131 #define VMA_MAP_TYPE(KeyT, ValueT) \
4132  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4133 
4134 #else // #if VMA_USE_STL_UNORDERED_MAP
4135 
4136 template<typename T1, typename T2>
4137 struct VmaPair
4138 {
4139  T1 first;
4140  T2 second;
4141 
4142  VmaPair() : first(), second() { }
4143  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4144 };
4145 
4146 /* Class compatible with subset of interface of std::unordered_map.
4147 KeyT, ValueT must be POD because they will be stored in VmaVector.
4148 */
4149 template<typename KeyT, typename ValueT>
4150 class VmaMap
4151 {
4152 public:
4153  typedef VmaPair<KeyT, ValueT> PairType;
4154  typedef PairType* iterator;
4155 
4156  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4157 
4158  iterator begin() { return m_Vector.begin(); }
4159  iterator end() { return m_Vector.end(); }
4160 
4161  void insert(const PairType& pair);
4162  iterator find(const KeyT& key);
4163  void erase(iterator it);
4164 
4165 private:
4166  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4167 };
4168 
4169 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4170 
4171 template<typename FirstT, typename SecondT>
4172 struct VmaPairFirstLess
4173 {
4174  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4175  {
4176  return lhs.first < rhs.first;
4177  }
4178  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4179  {
4180  return lhs.first < rhsFirst;
4181  }
4182 };
4183 
4184 template<typename KeyT, typename ValueT>
4185 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4186 {
4187  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4188  m_Vector.data(),
4189  m_Vector.data() + m_Vector.size(),
4190  pair,
4191  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4192  VmaVectorInsert(m_Vector, indexToInsert, pair);
4193 }
4194 
4195 template<typename KeyT, typename ValueT>
4196 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4197 {
4198  PairType* it = VmaBinaryFindFirstNotLess(
4199  m_Vector.data(),
4200  m_Vector.data() + m_Vector.size(),
4201  key,
4202  VmaPairFirstLess<KeyT, ValueT>());
4203  if((it != m_Vector.end()) && (it->first == key))
4204  {
4205  return it;
4206  }
4207  else
4208  {
4209  return m_Vector.end();
4210  }
4211 }
4212 
4213 template<typename KeyT, typename ValueT>
4214 void VmaMap<KeyT, ValueT>::erase(iterator it)
4215 {
4216  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4217 }
4218 
4219 #endif // #if VMA_USE_STL_UNORDERED_MAP
4220 
4221 #endif // #if 0
4222 
4224 
4225 class VmaDeviceMemoryBlock;
4226 
4227 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4228 
4229 struct VmaAllocation_T
4230 {
4231  VMA_CLASS_NO_COPY(VmaAllocation_T)
4232 private:
4233  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4234 
4235  enum FLAGS
4236  {
4237  FLAG_USER_DATA_STRING = 0x01,
4238  };
4239 
4240 public:
4241  enum ALLOCATION_TYPE
4242  {
4243  ALLOCATION_TYPE_NONE,
4244  ALLOCATION_TYPE_BLOCK,
4245  ALLOCATION_TYPE_DEDICATED,
4246  };
4247 
4248  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4249  m_Alignment(1),
4250  m_Size(0),
4251  m_pUserData(VMA_NULL),
4252  m_LastUseFrameIndex(currentFrameIndex),
4253  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4254  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4255  m_MapCount(0),
4256  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4257  {
4258 #if VMA_STATS_STRING_ENABLED
4259  m_CreationFrameIndex = currentFrameIndex;
4260  m_BufferImageUsage = 0;
4261 #endif
4262  }
4263 
4264  ~VmaAllocation_T()
4265  {
4266  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4267 
4268  // Check if owned string was freed.
4269  VMA_ASSERT(m_pUserData == VMA_NULL);
4270  }
4271 
4272  void InitBlockAllocation(
4273  VmaPool hPool,
4274  VmaDeviceMemoryBlock* block,
4275  VkDeviceSize offset,
4276  VkDeviceSize alignment,
4277  VkDeviceSize size,
4278  VmaSuballocationType suballocationType,
4279  bool mapped,
4280  bool canBecomeLost)
4281  {
4282  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4283  VMA_ASSERT(block != VMA_NULL);
4284  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4285  m_Alignment = alignment;
4286  m_Size = size;
4287  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4288  m_SuballocationType = (uint8_t)suballocationType;
4289  m_BlockAllocation.m_hPool = hPool;
4290  m_BlockAllocation.m_Block = block;
4291  m_BlockAllocation.m_Offset = offset;
4292  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4293  }
4294 
4295  void InitLost()
4296  {
4297  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4298  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4299  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4300  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4301  m_BlockAllocation.m_Block = VMA_NULL;
4302  m_BlockAllocation.m_Offset = 0;
4303  m_BlockAllocation.m_CanBecomeLost = true;
4304  }
4305 
4306  void ChangeBlockAllocation(
4307  VmaAllocator hAllocator,
4308  VmaDeviceMemoryBlock* block,
4309  VkDeviceSize offset);
4310 
4311  // pMappedData not null means allocation is created with MAPPED flag.
4312  void InitDedicatedAllocation(
4313  uint32_t memoryTypeIndex,
4314  VkDeviceMemory hMemory,
4315  VmaSuballocationType suballocationType,
4316  void* pMappedData,
4317  VkDeviceSize size)
4318  {
4319  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4320  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4321  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4322  m_Alignment = 0;
4323  m_Size = size;
4324  m_SuballocationType = (uint8_t)suballocationType;
4325  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4326  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4327  m_DedicatedAllocation.m_hMemory = hMemory;
4328  m_DedicatedAllocation.m_pMappedData = pMappedData;
4329  }
4330 
4331  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4332  VkDeviceSize GetAlignment() const { return m_Alignment; }
4333  VkDeviceSize GetSize() const { return m_Size; }
4334  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4335  void* GetUserData() const { return m_pUserData; }
4336  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4337  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4338 
4339  VmaDeviceMemoryBlock* GetBlock() const
4340  {
4341  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4342  return m_BlockAllocation.m_Block;
4343  }
4344  VkDeviceSize GetOffset() const;
4345  VkDeviceMemory GetMemory() const;
4346  uint32_t GetMemoryTypeIndex() const;
4347  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4348  void* GetMappedData() const;
4349  bool CanBecomeLost() const;
4350  VmaPool GetPool() const;
4351 
4352  uint32_t GetLastUseFrameIndex() const
4353  {
4354  return m_LastUseFrameIndex.load();
4355  }
4356  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4357  {
4358  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4359  }
4360  /*
4361  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4362  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4363  - Else, returns false.
4364 
4365  If hAllocation is already lost, assert - you should not call it then.
4366  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4367  */
4368  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4369 
4370  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4371  {
4372  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4373  outInfo.blockCount = 1;
4374  outInfo.allocationCount = 1;
4375  outInfo.unusedRangeCount = 0;
4376  outInfo.usedBytes = m_Size;
4377  outInfo.unusedBytes = 0;
4378  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4379  outInfo.unusedRangeSizeMin = UINT64_MAX;
4380  outInfo.unusedRangeSizeMax = 0;
4381  }
4382 
4383  void BlockAllocMap();
4384  void BlockAllocUnmap();
4385  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4386  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4387 
4388 #if VMA_STATS_STRING_ENABLED
4389  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4390  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4391 
4392  void InitBufferImageUsage(uint32_t bufferImageUsage)
4393  {
4394  VMA_ASSERT(m_BufferImageUsage == 0);
4395  m_BufferImageUsage = bufferImageUsage;
4396  }
4397 
4398  void PrintParameters(class VmaJsonWriter& json) const;
4399 #endif
4400 
4401 private:
4402  VkDeviceSize m_Alignment;
4403  VkDeviceSize m_Size;
4404  void* m_pUserData;
4405  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
4406  uint8_t m_Type; // ALLOCATION_TYPE
4407  uint8_t m_SuballocationType; // VmaSuballocationType
4408  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
4409  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
4410  uint8_t m_MapCount;
4411  uint8_t m_Flags; // enum FLAGS
4412 
4413  // Allocation out of VmaDeviceMemoryBlock.
4414  struct BlockAllocation
4415  {
4416  VmaPool m_hPool; // Null if belongs to general memory.
4417  VmaDeviceMemoryBlock* m_Block;
4418  VkDeviceSize m_Offset;
4419  bool m_CanBecomeLost;
4420  };
4421 
4422  // Allocation for an object that has its own private VkDeviceMemory.
4423  struct DedicatedAllocation
4424  {
4425  uint32_t m_MemoryTypeIndex;
4426  VkDeviceMemory m_hMemory;
4427  void* m_pMappedData; // Not null means memory is mapped.
4428  };
4429 
4430  union
4431  {
4432  // Allocation out of VmaDeviceMemoryBlock.
4433  BlockAllocation m_BlockAllocation;
4434  // Allocation for an object that has its own private VkDeviceMemory.
4435  DedicatedAllocation m_DedicatedAllocation;
4436  };
4437 
4438 #if VMA_STATS_STRING_ENABLED
4439  uint32_t m_CreationFrameIndex;
4440  uint32_t m_BufferImageUsage; // 0 if unknown.
4441 #endif
4442 
4443  void FreeUserDataString(VmaAllocator hAllocator);
4444 };
4445 
4446 /*
4447 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
4448 allocated memory block or free.
4449 */
4450 struct VmaSuballocation
4451 {
4452  VkDeviceSize offset;
4453  VkDeviceSize size;
4454  VmaAllocation hAllocation;
4455  VmaSuballocationType type;
4456 };
4457 
4458 // Comparator for offsets.
4459 struct VmaSuballocationOffsetLess
4460 {
4461  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4462  {
4463  return lhs.offset < rhs.offset;
4464  }
4465 };
4466 struct VmaSuballocationOffsetGreater
4467 {
4468  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
4469  {
4470  return lhs.offset > rhs.offset;
4471  }
4472 };
4473 
4474 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
4475 
4476 // Cost of one additional allocation lost, as equivalent in bytes.
4477 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
4478 
4479 /*
4480 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
4481 
4482 If canMakeOtherLost was false:
4483 - item points to a FREE suballocation.
4484 - itemsToMakeLostCount is 0.
4485 
4486 If canMakeOtherLost was true:
4487 - item points to first of sequence of suballocations, which are either FREE,
4488  or point to VmaAllocations that can become lost.
4489 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4490  the requested allocation to succeed.
4491 */
4492 struct VmaAllocationRequest
4493 {
4494  VkDeviceSize offset;
4495  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4496  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4497  VmaSuballocationList::iterator item;
4498  size_t itemsToMakeLostCount;
4499 
4500  VkDeviceSize CalcCost() const
4501  {
4502  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4503  }
4504 };
4505 
4506 /*
4507 Data structure used for bookkeeping of allocations and unused ranges of memory
4508 in a single VkDeviceMemory block.
4509 */
4510 class VmaBlockMetadata
4511 {
4512 public:
4513  VmaBlockMetadata() : m_Size(0) { }
4514  virtual ~VmaBlockMetadata() { }
4515  virtual void Init(VkDeviceSize size) { m_Size = size; }
4516 
4517  // Validates all data structures inside this object. If not valid, returns false.
4518  virtual bool Validate() const = 0;
4519  VkDeviceSize GetSize() const { return m_Size; }
4520  virtual size_t GetAllocationCount() const = 0;
4521  virtual VkDeviceSize GetSumFreeSize() const = 0;
4522  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
4523  // Returns true if this block is empty - contains only single free suballocation.
4524  virtual bool IsEmpty() const = 0;
4525 
4526  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
4527  // Shouldn't modify blockCount.
4528  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
4529 
4530 #if VMA_STATS_STRING_ENABLED
4531  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
4532 #endif
4533 
4534  // Tries to find a place for suballocation with given parameters inside this block.
4535  // If succeeded, fills pAllocationRequest and returns true.
4536  // If failed, returns false.
4537  virtual bool CreateAllocationRequest(
4538  uint32_t currentFrameIndex,
4539  uint32_t frameInUseCount,
4540  VkDeviceSize bufferImageGranularity,
4541  VkDeviceSize allocSize,
4542  VkDeviceSize allocAlignment,
4543  bool upperAddress,
4544  VmaSuballocationType allocType,
4545  bool canMakeOtherLost,
4546  VmaAllocationRequest* pAllocationRequest) = 0;
4547 
4548  virtual bool MakeRequestedAllocationsLost(
4549  uint32_t currentFrameIndex,
4550  uint32_t frameInUseCount,
4551  VmaAllocationRequest* pAllocationRequest) = 0;
4552 
4553  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
4554 
4555  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
4556 
4557  // Makes actual allocation based on request. Request must already be checked and valid.
4558  virtual void Alloc(
4559  const VmaAllocationRequest& request,
4560  VmaSuballocationType type,
4561  VkDeviceSize allocSize,
4562  bool upperAddress,
4563  VmaAllocation hAllocation) = 0;
4564 
4565  // Frees suballocation assigned to given memory region.
4566  virtual void Free(const VmaAllocation allocation) = 0;
4567  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
4568 
4569 protected:
4570 #if VMA_STATS_STRING_ENABLED
4571  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
4572  VkDeviceSize unusedBytes,
4573  size_t allocationCount,
4574  size_t unusedRangeCount) const;
4575  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
4576  VkDeviceSize offset,
4577  VmaAllocation hAllocation) const;
4578  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
4579  VkDeviceSize offset,
4580  VkDeviceSize size) const;
4581  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
4582 #endif
4583 
4584 private:
4585  VkDeviceSize m_Size;
4586 };
4587 
4588 class VmaBlockMetadata_Generic : public VmaBlockMetadata
4589 {
4590  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
4591 public:
4592  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
4593  virtual ~VmaBlockMetadata_Generic();
4594  virtual void Init(VkDeviceSize size);
4595 
4596  virtual bool Validate() const;
4597  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4598  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4599  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4600  virtual bool IsEmpty() const;
4601 
4602  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4603  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4604 
4605 #if VMA_STATS_STRING_ENABLED
4606  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4607 #endif
4608 
4609  virtual bool CreateAllocationRequest(
4610  uint32_t currentFrameIndex,
4611  uint32_t frameInUseCount,
4612  VkDeviceSize bufferImageGranularity,
4613  VkDeviceSize allocSize,
4614  VkDeviceSize allocAlignment,
4615  bool upperAddress,
4616  VmaSuballocationType allocType,
4617  bool canMakeOtherLost,
4618  VmaAllocationRequest* pAllocationRequest);
4619 
4620  virtual bool MakeRequestedAllocationsLost(
4621  uint32_t currentFrameIndex,
4622  uint32_t frameInUseCount,
4623  VmaAllocationRequest* pAllocationRequest);
4624 
4625  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4626 
4627  virtual VkResult CheckCorruption(const void* pBlockData);
4628 
4629  virtual void Alloc(
4630  const VmaAllocationRequest& request,
4631  VmaSuballocationType type,
4632  VkDeviceSize allocSize,
4633  bool upperAddress,
4634  VmaAllocation hAllocation);
4635 
4636  virtual void Free(const VmaAllocation allocation);
4637  virtual void FreeAtOffset(VkDeviceSize offset);
4638 
4639 private:
4640  uint32_t m_FreeCount;
4641  VkDeviceSize m_SumFreeSize;
4642  VmaSuballocationList m_Suballocations;
4643  // Suballocations that are free and have size greater than certain threshold.
4644  // Sorted by size, ascending.
4645  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4646 
4647  bool ValidateFreeSuballocationList() const;
4648 
4649  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4650  // If yes, fills pOffset and returns true. If no, returns false.
4651  bool CheckAllocation(
4652  uint32_t currentFrameIndex,
4653  uint32_t frameInUseCount,
4654  VkDeviceSize bufferImageGranularity,
4655  VkDeviceSize allocSize,
4656  VkDeviceSize allocAlignment,
4657  VmaSuballocationType allocType,
4658  VmaSuballocationList::const_iterator suballocItem,
4659  bool canMakeOtherLost,
4660  VkDeviceSize* pOffset,
4661  size_t* itemsToMakeLostCount,
4662  VkDeviceSize* pSumFreeSize,
4663  VkDeviceSize* pSumItemSize) const;
4664  // Given free suballocation, it merges it with following one, which must also be free.
4665  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4666  // Releases given suballocation, making it free.
4667  // Merges it with adjacent free suballocations if applicable.
4668  // Returns iterator to new free suballocation at this place.
4669  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4670  // Given free suballocation, it inserts it into sorted list of
4671  // m_FreeSuballocationsBySize if it's suitable.
4672  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4673  // Given free suballocation, it removes it from sorted list of
4674  // m_FreeSuballocationsBySize if it's suitable.
4675  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4676 };
4677 
4678 /*
4679 Allocations and their references in internal data structure look like this:
4680 
4681 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
4682 
4683  0 +-------+
4684  | |
4685  | |
4686  | |
4687  +-------+
4688  | Alloc | 1st[m_1stNullItemsBeginCount]
4689  +-------+
4690  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4691  +-------+
4692  | ... |
4693  +-------+
4694  | Alloc | 1st[1st.size() - 1]
4695  +-------+
4696  | |
4697  | |
4698  | |
4699 GetSize() +-------+
4700 
4701 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
4702 
4703  0 +-------+
4704  | Alloc | 2nd[0]
4705  +-------+
4706  | Alloc | 2nd[1]
4707  +-------+
4708  | ... |
4709  +-------+
4710  | Alloc | 2nd[2nd.size() - 1]
4711  +-------+
4712  | |
4713  | |
4714  | |
4715  +-------+
4716  | Alloc | 1st[m_1stNullItemsBeginCount]
4717  +-------+
4718  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4719  +-------+
4720  | ... |
4721  +-------+
4722  | Alloc | 1st[1st.size() - 1]
4723  +-------+
4724  | |
4725 GetSize() +-------+
4726 
4727 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
4728 
4729  0 +-------+
4730  | |
4731  | |
4732  | |
4733  +-------+
4734  | Alloc | 1st[m_1stNullItemsBeginCount]
4735  +-------+
4736  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
4737  +-------+
4738  | ... |
4739  +-------+
4740  | Alloc | 1st[1st.size() - 1]
4741  +-------+
4742  | |
4743  | |
4744  | |
4745  +-------+
4746  | Alloc | 2nd[2nd.size() - 1]
4747  +-------+
4748  | ... |
4749  +-------+
4750  | Alloc | 2nd[1]
4751  +-------+
4752  | Alloc | 2nd[0]
4753 GetSize() +-------+
4754 
4755 */
4756 class VmaBlockMetadata_Linear : public VmaBlockMetadata
4757 {
4758  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
4759 public:
4760  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
4761  virtual ~VmaBlockMetadata_Linear();
4762  virtual void Init(VkDeviceSize size);
4763 
4764  virtual bool Validate() const;
4765  virtual size_t GetAllocationCount() const;
4766  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4767  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
4768  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
4769 
4770  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4771  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
4772 
4773 #if VMA_STATS_STRING_ENABLED
4774  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
4775 #endif
4776 
4777  virtual bool CreateAllocationRequest(
4778  uint32_t currentFrameIndex,
4779  uint32_t frameInUseCount,
4780  VkDeviceSize bufferImageGranularity,
4781  VkDeviceSize allocSize,
4782  VkDeviceSize allocAlignment,
4783  bool upperAddress,
4784  VmaSuballocationType allocType,
4785  bool canMakeOtherLost,
4786  VmaAllocationRequest* pAllocationRequest);
4787 
4788  virtual bool MakeRequestedAllocationsLost(
4789  uint32_t currentFrameIndex,
4790  uint32_t frameInUseCount,
4791  VmaAllocationRequest* pAllocationRequest);
4792 
4793  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4794 
4795  virtual VkResult CheckCorruption(const void* pBlockData);
4796 
4797  virtual void Alloc(
4798  const VmaAllocationRequest& request,
4799  VmaSuballocationType type,
4800  VkDeviceSize allocSize,
4801  bool upperAddress,
4802  VmaAllocation hAllocation);
4803 
4804  virtual void Free(const VmaAllocation allocation);
4805  virtual void FreeAtOffset(VkDeviceSize offset);
4806 
4807 private:
4808  /*
4809  There are two suballocation vectors, used in ping-pong way.
4810  The one with index m_1stVectorIndex is called 1st.
4811  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
4812  2nd can be non-empty only when 1st is not empty.
4813  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
4814  */
4815  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
4816 
4817  enum SECOND_VECTOR_MODE
4818  {
4819  SECOND_VECTOR_EMPTY,
4820  /*
4821  Suballocations in 2nd vector are created later than the ones in 1st, but they
4822  all have smaller offset.
4823  */
4824  SECOND_VECTOR_RING_BUFFER,
4825  /*
4826  Suballocations in 2nd vector are upper side of double stack.
4827  They all have offsets higher than those in 1st vector.
4828  Top of this stack means smaller offsets, but higher indices in this vector.
4829  */
4830  SECOND_VECTOR_DOUBLE_STACK,
4831  };
4832 
4833  VkDeviceSize m_SumFreeSize;
4834  SuballocationVectorType m_Suballocations0, m_Suballocations1;
4835  uint32_t m_1stVectorIndex;
4836  SECOND_VECTOR_MODE m_2ndVectorMode;
4837 
4838  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4839  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4840  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
4841  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
4842 
4843  // Number of items in 1st vector with hAllocation = null at the beginning.
4844  size_t m_1stNullItemsBeginCount;
4845  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
4846  size_t m_1stNullItemsMiddleCount;
4847  // Number of items in 2nd vector with hAllocation = null.
4848  size_t m_2ndNullItemsCount;
4849 
4850  bool ShouldCompact1st() const;
4851  void CleanupAfterFree();
4852 };
4853 
4854 /*
4855 Represents a single block of device memory (`VkDeviceMemory`) with all the
4856 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4857 
4858 Thread-safety: This class must be externally synchronized.
4859 */
4860 class VmaDeviceMemoryBlock
4861 {
4862  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
4863 public:
4864  VmaBlockMetadata* m_pMetadata;
4865 
4866  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4867 
4868  ~VmaDeviceMemoryBlock()
4869  {
4870  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4871  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4872  }
4873 
4874  // Always call after construction.
4875  void Init(
4876  VmaAllocator hAllocator,
4877  uint32_t newMemoryTypeIndex,
4878  VkDeviceMemory newMemory,
4879  VkDeviceSize newSize,
4880  uint32_t id,
4881  bool linearAlgorithm);
4882  // Always call before destruction.
4883  void Destroy(VmaAllocator allocator);
4884 
4885  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4886  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4887  uint32_t GetId() const { return m_Id; }
4888  void* GetMappedData() const { return m_pMappedData; }
4889 
4890  // Validates all data structures inside this object. If not valid, returns false.
4891  bool Validate() const;
4892 
4893  VkResult CheckCorruption(VmaAllocator hAllocator);
4894 
4895  // ppData can be null.
4896  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4897  void Unmap(VmaAllocator hAllocator, uint32_t count);
4898 
4899  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4900  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
4901 
4902  VkResult BindBufferMemory(
4903  const VmaAllocator hAllocator,
4904  const VmaAllocation hAllocation,
4905  VkBuffer hBuffer);
4906  VkResult BindImageMemory(
4907  const VmaAllocator hAllocator,
4908  const VmaAllocation hAllocation,
4909  VkImage hImage);
4910 
4911 private:
4912  uint32_t m_MemoryTypeIndex;
4913  uint32_t m_Id;
4914  VkDeviceMemory m_hMemory;
4915 
4916  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4917  // Also protects m_MapCount, m_pMappedData.
4918  VMA_MUTEX m_Mutex;
4919  uint32_t m_MapCount;
4920  void* m_pMappedData;
4921 };
4922 
4923 struct VmaPointerLess
4924 {
4925  bool operator()(const void* lhs, const void* rhs) const
4926  {
4927  return lhs < rhs;
4928  }
4929 };
4930 
4931 class VmaDefragmentator;
4932 
4933 /*
4934 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4935 Vulkan memory type.
4936 
4937 Synchronized internally with a mutex.
4938 */
4939 struct VmaBlockVector
4940 {
4941  VMA_CLASS_NO_COPY(VmaBlockVector)
4942 public:
4943  VmaBlockVector(
4944  VmaAllocator hAllocator,
4945  uint32_t memoryTypeIndex,
4946  VkDeviceSize preferredBlockSize,
4947  size_t minBlockCount,
4948  size_t maxBlockCount,
4949  VkDeviceSize bufferImageGranularity,
4950  uint32_t frameInUseCount,
4951  bool isCustomPool,
4952  bool explicitBlockSize,
4953  bool linearAlgorithm);
4954  ~VmaBlockVector();
4955 
4956  VkResult CreateMinBlocks();
4957 
4958  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4959  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4960  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4961  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4962  bool UsesLinearAlgorithm() const { return m_LinearAlgorithm; }
4963 
4964  void GetPoolStats(VmaPoolStats* pStats);
4965 
4966  bool IsEmpty() const { return m_Blocks.empty(); }
4967  bool IsCorruptionDetectionEnabled() const;
4968 
4969  VkResult Allocate(
4970  VmaPool hCurrentPool,
4971  uint32_t currentFrameIndex,
4972  VkDeviceSize size,
4973  VkDeviceSize alignment,
4974  const VmaAllocationCreateInfo& createInfo,
4975  VmaSuballocationType suballocType,
4976  VmaAllocation* pAllocation);
4977 
4978  void Free(
4979  VmaAllocation hAllocation);
4980 
4981  // Adds statistics of this BlockVector to pStats.
4982  void AddStats(VmaStats* pStats);
4983 
4984 #if VMA_STATS_STRING_ENABLED
4985  void PrintDetailedMap(class VmaJsonWriter& json);
4986 #endif
4987 
4988  void MakePoolAllocationsLost(
4989  uint32_t currentFrameIndex,
4990  size_t* pLostAllocationCount);
4991  VkResult CheckCorruption();
4992 
4993  VmaDefragmentator* EnsureDefragmentator(
4994  VmaAllocator hAllocator,
4995  uint32_t currentFrameIndex);
4996 
4997  VkResult Defragment(
4998  VmaDefragmentationStats* pDefragmentationStats,
4999  VkDeviceSize& maxBytesToMove,
5000  uint32_t& maxAllocationsToMove);
5001 
5002  void DestroyDefragmentator();
5003 
5004 private:
5005  friend class VmaDefragmentator;
5006 
5007  const VmaAllocator m_hAllocator;
5008  const uint32_t m_MemoryTypeIndex;
5009  const VkDeviceSize m_PreferredBlockSize;
5010  const size_t m_MinBlockCount;
5011  const size_t m_MaxBlockCount;
5012  const VkDeviceSize m_BufferImageGranularity;
5013  const uint32_t m_FrameInUseCount;
5014  const bool m_IsCustomPool;
5015  const bool m_ExplicitBlockSize;
5016  const bool m_LinearAlgorithm;
5017  bool m_HasEmptyBlock;
5018  VMA_MUTEX m_Mutex;
5019  // Incrementally sorted by sumFreeSize, ascending.
5020  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5021  /* There can be at most one allocation that is completely empty - a
5022  hysteresis to avoid pessimistic case of alternating creation and destruction
5023  of a VkDeviceMemory. */
5024  VmaDefragmentator* m_pDefragmentator;
5025  uint32_t m_NextBlockId;
5026 
5027  VkDeviceSize CalcMaxBlockSize() const;
5028 
5029  // Finds and removes given block from vector.
5030  void Remove(VmaDeviceMemoryBlock* pBlock);
5031 
5032  // Performs single step in sorting m_Blocks. They may not be fully sorted
5033  // after this call.
5034  void IncrementallySortBlocks();
5035 
5036  // To be used only without CAN_MAKE_OTHER_LOST flag.
5037  VkResult AllocateFromBlock(
5038  VmaDeviceMemoryBlock* pBlock,
5039  VmaPool hCurrentPool,
5040  uint32_t currentFrameIndex,
5041  VkDeviceSize size,
5042  VkDeviceSize alignment,
5043  VmaAllocationCreateFlags allocFlags,
5044  void* pUserData,
5045  VmaSuballocationType suballocType,
5046  VmaAllocation* pAllocation);
5047 
5048  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5049 };
5050 
5051 struct VmaPool_T
5052 {
5053  VMA_CLASS_NO_COPY(VmaPool_T)
5054 public:
5055  VmaBlockVector m_BlockVector;
5056 
5057  VmaPool_T(
5058  VmaAllocator hAllocator,
5059  const VmaPoolCreateInfo& createInfo,
5060  VkDeviceSize preferredBlockSize);
5061  ~VmaPool_T();
5062 
5063  uint32_t GetId() const { return m_Id; }
5064  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5065 
5066 #if VMA_STATS_STRING_ENABLED
5067  //void PrintDetailedMap(class VmaStringBuilder& sb);
5068 #endif
5069 
5070 private:
5071  uint32_t m_Id;
5072 };
5073 
5074 class VmaDefragmentator
5075 {
5076  VMA_CLASS_NO_COPY(VmaDefragmentator)
5077 private:
5078  const VmaAllocator m_hAllocator;
5079  VmaBlockVector* const m_pBlockVector;
5080  uint32_t m_CurrentFrameIndex;
5081  VkDeviceSize m_BytesMoved;
5082  uint32_t m_AllocationsMoved;
5083 
5084  struct AllocationInfo
5085  {
5086  VmaAllocation m_hAllocation;
5087  VkBool32* m_pChanged;
5088 
5089  AllocationInfo() :
5090  m_hAllocation(VK_NULL_HANDLE),
5091  m_pChanged(VMA_NULL)
5092  {
5093  }
5094  };
5095 
5096  struct AllocationInfoSizeGreater
5097  {
5098  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5099  {
5100  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5101  }
5102  };
5103 
5104  // Used between AddAllocation and Defragment.
5105  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5106 
5107  struct BlockInfo
5108  {
5109  VmaDeviceMemoryBlock* m_pBlock;
5110  bool m_HasNonMovableAllocations;
5111  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5112 
5113  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
5114  m_pBlock(VMA_NULL),
5115  m_HasNonMovableAllocations(true),
5116  m_Allocations(pAllocationCallbacks),
5117  m_pMappedDataForDefragmentation(VMA_NULL)
5118  {
5119  }
5120 
5121  void CalcHasNonMovableAllocations()
5122  {
5123  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
5124  const size_t defragmentAllocCount = m_Allocations.size();
5125  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
5126  }
5127 
5128  void SortAllocationsBySizeDescecnding()
5129  {
5130  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
5131  }
5132 
5133  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
5134  void Unmap(VmaAllocator hAllocator);
5135 
5136  private:
5137  // Not null if mapped for defragmentation only, not originally mapped.
5138  void* m_pMappedDataForDefragmentation;
5139  };
5140 
5141  struct BlockPointerLess
5142  {
5143  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
5144  {
5145  return pLhsBlockInfo->m_pBlock < pRhsBlock;
5146  }
5147  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5148  {
5149  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
5150  }
5151  };
5152 
5153  // 1. Blocks with some non-movable allocations go first.
5154  // 2. Blocks with smaller sumFreeSize go first.
5155  struct BlockInfoCompareMoveDestination
5156  {
5157  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
5158  {
5159  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
5160  {
5161  return true;
5162  }
5163  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
5164  {
5165  return false;
5166  }
5167  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
5168  {
5169  return true;
5170  }
5171  return false;
5172  }
5173  };
5174 
5175  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
5176  BlockInfoVector m_Blocks;
5177 
5178  VkResult DefragmentRound(
5179  VkDeviceSize maxBytesToMove,
5180  uint32_t maxAllocationsToMove);
5181 
5182  static bool MoveMakesSense(
5183  size_t dstBlockIndex, VkDeviceSize dstOffset,
5184  size_t srcBlockIndex, VkDeviceSize srcOffset);
5185 
5186 public:
5187  VmaDefragmentator(
5188  VmaAllocator hAllocator,
5189  VmaBlockVector* pBlockVector,
5190  uint32_t currentFrameIndex);
5191 
5192  ~VmaDefragmentator();
5193 
5194  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5195  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5196 
5197  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5198 
5199  VkResult Defragment(
5200  VkDeviceSize maxBytesToMove,
5201  uint32_t maxAllocationsToMove);
5202 };
5203 
5204 #if VMA_RECORDING_ENABLED
5205 
5206 class VmaRecorder
5207 {
5208 public:
5209  VmaRecorder();
5210  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
5211  void WriteConfiguration(
5212  const VkPhysicalDeviceProperties& devProps,
5213  const VkPhysicalDeviceMemoryProperties& memProps,
5214  bool dedicatedAllocationExtensionEnabled);
5215  ~VmaRecorder();
5216 
5217  void RecordCreateAllocator(uint32_t frameIndex);
5218  void RecordDestroyAllocator(uint32_t frameIndex);
5219  void RecordCreatePool(uint32_t frameIndex,
5220  const VmaPoolCreateInfo& createInfo,
5221  VmaPool pool);
5222  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
5223  void RecordAllocateMemory(uint32_t frameIndex,
5224  const VkMemoryRequirements& vkMemReq,
5225  const VmaAllocationCreateInfo& createInfo,
5226  VmaAllocation allocation);
5227  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
5228  const VkMemoryRequirements& vkMemReq,
5229  bool requiresDedicatedAllocation,
5230  bool prefersDedicatedAllocation,
5231  const VmaAllocationCreateInfo& createInfo,
5232  VmaAllocation allocation);
5233  void RecordAllocateMemoryForImage(uint32_t frameIndex,
5234  const VkMemoryRequirements& vkMemReq,
5235  bool requiresDedicatedAllocation,
5236  bool prefersDedicatedAllocation,
5237  const VmaAllocationCreateInfo& createInfo,
5238  VmaAllocation allocation);
5239  void RecordFreeMemory(uint32_t frameIndex,
5240  VmaAllocation allocation);
5241  void RecordSetAllocationUserData(uint32_t frameIndex,
5242  VmaAllocation allocation,
5243  const void* pUserData);
5244  void RecordCreateLostAllocation(uint32_t frameIndex,
5245  VmaAllocation allocation);
5246  void RecordMapMemory(uint32_t frameIndex,
5247  VmaAllocation allocation);
5248  void RecordUnmapMemory(uint32_t frameIndex,
5249  VmaAllocation allocation);
5250  void RecordFlushAllocation(uint32_t frameIndex,
5251  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5252  void RecordInvalidateAllocation(uint32_t frameIndex,
5253  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
5254  void RecordCreateBuffer(uint32_t frameIndex,
5255  const VkBufferCreateInfo& bufCreateInfo,
5256  const VmaAllocationCreateInfo& allocCreateInfo,
5257  VmaAllocation allocation);
5258  void RecordCreateImage(uint32_t frameIndex,
5259  const VkImageCreateInfo& imageCreateInfo,
5260  const VmaAllocationCreateInfo& allocCreateInfo,
5261  VmaAllocation allocation);
5262  void RecordDestroyBuffer(uint32_t frameIndex,
5263  VmaAllocation allocation);
5264  void RecordDestroyImage(uint32_t frameIndex,
5265  VmaAllocation allocation);
5266  void RecordTouchAllocation(uint32_t frameIndex,
5267  VmaAllocation allocation);
5268  void RecordGetAllocationInfo(uint32_t frameIndex,
5269  VmaAllocation allocation);
5270  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
5271  VmaPool pool);
5272 
5273 private:
5274  struct CallParams
5275  {
5276  uint32_t threadId;
5277  double time;
5278  };
5279 
5280  class UserDataString
5281  {
5282  public:
5283  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
5284  const char* GetString() const { return m_Str; }
5285 
5286  private:
5287  char m_PtrStr[17];
5288  const char* m_Str;
5289  };
5290 
5291  bool m_UseMutex;
5292  VmaRecordFlags m_Flags;
5293  FILE* m_File;
5294  VMA_MUTEX m_FileMutex;
5295  int64_t m_Freq;
5296  int64_t m_StartCounter;
5297 
5298  void GetBasicParams(CallParams& outParams);
5299  void Flush();
5300 };
5301 
5302 #endif // #if VMA_RECORDING_ENABLED
5303 
5304 // Main allocator object.
5305 struct VmaAllocator_T
5306 {
5307  VMA_CLASS_NO_COPY(VmaAllocator_T)
5308 public:
5309  bool m_UseMutex;
5310  bool m_UseKhrDedicatedAllocation;
5311  VkDevice m_hDevice;
5312  bool m_AllocationCallbacksSpecified;
5313  VkAllocationCallbacks m_AllocationCallbacks;
5314  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
5315 
5316  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
5317  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
5318  VMA_MUTEX m_HeapSizeLimitMutex;
5319 
5320  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
5321  VkPhysicalDeviceMemoryProperties m_MemProps;
5322 
5323  // Default pools.
5324  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
5325 
5326  // Each vector is sorted by memory (handle value).
5327  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
5328  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
5329  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
5330 
5331  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
5332  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
5333  ~VmaAllocator_T();
5334 
5335  const VkAllocationCallbacks* GetAllocationCallbacks() const
5336  {
5337  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
5338  }
5339  const VmaVulkanFunctions& GetVulkanFunctions() const
5340  {
5341  return m_VulkanFunctions;
5342  }
5343 
5344  VkDeviceSize GetBufferImageGranularity() const
5345  {
5346  return VMA_MAX(
5347  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
5348  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
5349  }
5350 
5351  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
5352  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
5353 
5354  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
5355  {
5356  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
5357  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
5358  }
5359  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
5360  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
5361  {
5362  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
5363  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
5364  }
5365  // Minimum alignment for all allocations in specific memory type.
5366  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
5367  {
5368  return IsMemoryTypeNonCoherent(memTypeIndex) ?
5369  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
5370  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
5371  }
5372 
5373  bool IsIntegratedGpu() const
5374  {
5375  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
5376  }
5377 
5378 #if VMA_RECORDING_ENABLED
5379  VmaRecorder* GetRecorder() const { return m_pRecorder; }
5380 #endif
5381 
5382  void GetBufferMemoryRequirements(
5383  VkBuffer hBuffer,
5384  VkMemoryRequirements& memReq,
5385  bool& requiresDedicatedAllocation,
5386  bool& prefersDedicatedAllocation) const;
5387  void GetImageMemoryRequirements(
5388  VkImage hImage,
5389  VkMemoryRequirements& memReq,
5390  bool& requiresDedicatedAllocation,
5391  bool& prefersDedicatedAllocation) const;
5392 
5393  // Main allocation function.
5394  VkResult AllocateMemory(
5395  const VkMemoryRequirements& vkMemReq,
5396  bool requiresDedicatedAllocation,
5397  bool prefersDedicatedAllocation,
5398  VkBuffer dedicatedBuffer,
5399  VkImage dedicatedImage,
5400  const VmaAllocationCreateInfo& createInfo,
5401  VmaSuballocationType suballocType,
5402  VmaAllocation* pAllocation);
5403 
5404  // Main deallocation function.
5405  void FreeMemory(const VmaAllocation allocation);
5406 
5407  void CalculateStats(VmaStats* pStats);
5408 
5409 #if VMA_STATS_STRING_ENABLED
5410  void PrintDetailedMap(class VmaJsonWriter& json);
5411 #endif
5412 
5413  VkResult Defragment(
5414  VmaAllocation* pAllocations,
5415  size_t allocationCount,
5416  VkBool32* pAllocationsChanged,
5417  const VmaDefragmentationInfo* pDefragmentationInfo,
5418  VmaDefragmentationStats* pDefragmentationStats);
5419 
5420  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
5421  bool TouchAllocation(VmaAllocation hAllocation);
5422 
5423  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
5424  void DestroyPool(VmaPool pool);
5425  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
5426 
5427  void SetCurrentFrameIndex(uint32_t frameIndex);
5428  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
5429 
5430  void MakePoolAllocationsLost(
5431  VmaPool hPool,
5432  size_t* pLostAllocationCount);
5433  VkResult CheckPoolCorruption(VmaPool hPool);
5434  VkResult CheckCorruption(uint32_t memoryTypeBits);
5435 
5436  void CreateLostAllocation(VmaAllocation* pAllocation);
5437 
5438  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
5439  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
5440 
5441  VkResult Map(VmaAllocation hAllocation, void** ppData);
5442  void Unmap(VmaAllocation hAllocation);
5443 
5444  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
5445  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
5446 
5447  void FlushOrInvalidateAllocation(
5448  VmaAllocation hAllocation,
5449  VkDeviceSize offset, VkDeviceSize size,
5450  VMA_CACHE_OPERATION op);
5451 
5452  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
5453 
5454 private:
5455  VkDeviceSize m_PreferredLargeHeapBlockSize;
5456 
5457  VkPhysicalDevice m_PhysicalDevice;
5458  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
5459 
5460  VMA_MUTEX m_PoolsMutex;
5461  // Protected by m_PoolsMutex. Sorted by pointer value.
5462  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
5463  uint32_t m_NextPoolId;
5464 
5465  VmaVulkanFunctions m_VulkanFunctions;
5466 
5467 #if VMA_RECORDING_ENABLED
5468  VmaRecorder* m_pRecorder;
5469 #endif
5470 
5471  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
5472 
5473  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
5474 
5475  VkResult AllocateMemoryOfType(
5476  VkDeviceSize size,
5477  VkDeviceSize alignment,
5478  bool dedicatedAllocation,
5479  VkBuffer dedicatedBuffer,
5480  VkImage dedicatedImage,
5481  const VmaAllocationCreateInfo& createInfo,
5482  uint32_t memTypeIndex,
5483  VmaSuballocationType suballocType,
5484  VmaAllocation* pAllocation);
5485 
5486  // Allocates and registers new VkDeviceMemory specifically for single allocation.
5487  VkResult AllocateDedicatedMemory(
5488  VkDeviceSize size,
5489  VmaSuballocationType suballocType,
5490  uint32_t memTypeIndex,
5491  bool map,
5492  bool isUserDataString,
5493  void* pUserData,
5494  VkBuffer dedicatedBuffer,
5495  VkImage dedicatedImage,
5496  VmaAllocation* pAllocation);
5497 
5498  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
5499  void FreeDedicatedMemory(VmaAllocation allocation);
5500 };
5501 
5503 // Memory allocation #2 after VmaAllocator_T definition
5504 
5505 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
5506 {
5507  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
5508 }
5509 
5510 static void VmaFree(VmaAllocator hAllocator, void* ptr)
5511 {
5512  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
5513 }
5514 
5515 template<typename T>
5516 static T* VmaAllocate(VmaAllocator hAllocator)
5517 {
5518  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
5519 }
5520 
5521 template<typename T>
5522 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
5523 {
5524  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
5525 }
5526 
5527 template<typename T>
5528 static void vma_delete(VmaAllocator hAllocator, T* ptr)
5529 {
5530  if(ptr != VMA_NULL)
5531  {
5532  ptr->~T();
5533  VmaFree(hAllocator, ptr);
5534  }
5535 }
5536 
5537 template<typename T>
5538 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
5539 {
5540  if(ptr != VMA_NULL)
5541  {
5542  for(size_t i = count; i--; )
5543  ptr[i].~T();
5544  VmaFree(hAllocator, ptr);
5545  }
5546 }
5547 
5549 // VmaStringBuilder
5550 
5551 #if VMA_STATS_STRING_ENABLED
5552 
5553 class VmaStringBuilder
5554 {
5555 public:
5556  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
5557  size_t GetLength() const { return m_Data.size(); }
5558  const char* GetData() const { return m_Data.data(); }
5559 
5560  void Add(char ch) { m_Data.push_back(ch); }
5561  void Add(const char* pStr);
5562  void AddNewLine() { Add('\n'); }
5563  void AddNumber(uint32_t num);
5564  void AddNumber(uint64_t num);
5565  void AddPointer(const void* ptr);
5566 
5567 private:
5568  VmaVector< char, VmaStlAllocator<char> > m_Data;
5569 };
5570 
5571 void VmaStringBuilder::Add(const char* pStr)
5572 {
5573  const size_t strLen = strlen(pStr);
5574  if(strLen > 0)
5575  {
5576  const size_t oldCount = m_Data.size();
5577  m_Data.resize(oldCount + strLen);
5578  memcpy(m_Data.data() + oldCount, pStr, strLen);
5579  }
5580 }
5581 
5582 void VmaStringBuilder::AddNumber(uint32_t num)
5583 {
5584  char buf[11];
5585  VmaUint32ToStr(buf, sizeof(buf), num);
5586  Add(buf);
5587 }
5588 
5589 void VmaStringBuilder::AddNumber(uint64_t num)
5590 {
5591  char buf[21];
5592  VmaUint64ToStr(buf, sizeof(buf), num);
5593  Add(buf);
5594 }
5595 
5596 void VmaStringBuilder::AddPointer(const void* ptr)
5597 {
5598  char buf[21];
5599  VmaPtrToStr(buf, sizeof(buf), ptr);
5600  Add(buf);
5601 }
5602 
5603 #endif // #if VMA_STATS_STRING_ENABLED
5604 
5606 // VmaJsonWriter
5607 
5608 #if VMA_STATS_STRING_ENABLED
5609 
5610 class VmaJsonWriter
5611 {
5612  VMA_CLASS_NO_COPY(VmaJsonWriter)
5613 public:
5614  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5615  ~VmaJsonWriter();
5616 
5617  void BeginObject(bool singleLine = false);
5618  void EndObject();
5619 
5620  void BeginArray(bool singleLine = false);
5621  void EndArray();
5622 
5623  void WriteString(const char* pStr);
5624  void BeginString(const char* pStr = VMA_NULL);
5625  void ContinueString(const char* pStr);
5626  void ContinueString(uint32_t n);
5627  void ContinueString(uint64_t n);
5628  void ContinueString_Pointer(const void* ptr);
5629  void EndString(const char* pStr = VMA_NULL);
5630 
5631  void WriteNumber(uint32_t n);
5632  void WriteNumber(uint64_t n);
5633  void WriteBool(bool b);
5634  void WriteNull();
5635 
5636 private:
5637  static const char* const INDENT;
5638 
5639  enum COLLECTION_TYPE
5640  {
5641  COLLECTION_TYPE_OBJECT,
5642  COLLECTION_TYPE_ARRAY,
5643  };
5644  struct StackItem
5645  {
5646  COLLECTION_TYPE type;
5647  uint32_t valueCount;
5648  bool singleLineMode;
5649  };
5650 
5651  VmaStringBuilder& m_SB;
5652  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5653  bool m_InsideString;
5654 
5655  void BeginValue(bool isString);
5656  void WriteIndent(bool oneLess = false);
5657 };
5658 
5659 const char* const VmaJsonWriter::INDENT = " ";
5660 
5661 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
5662  m_SB(sb),
5663  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5664  m_InsideString(false)
5665 {
5666 }
5667 
5668 VmaJsonWriter::~VmaJsonWriter()
5669 {
5670  VMA_ASSERT(!m_InsideString);
5671  VMA_ASSERT(m_Stack.empty());
5672 }
5673 
5674 void VmaJsonWriter::BeginObject(bool singleLine)
5675 {
5676  VMA_ASSERT(!m_InsideString);
5677 
5678  BeginValue(false);
5679  m_SB.Add('{');
5680 
5681  StackItem item;
5682  item.type = COLLECTION_TYPE_OBJECT;
5683  item.valueCount = 0;
5684  item.singleLineMode = singleLine;
5685  m_Stack.push_back(item);
5686 }
5687 
5688 void VmaJsonWriter::EndObject()
5689 {
5690  VMA_ASSERT(!m_InsideString);
5691 
5692  WriteIndent(true);
5693  m_SB.Add('}');
5694 
5695  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5696  m_Stack.pop_back();
5697 }
5698 
5699 void VmaJsonWriter::BeginArray(bool singleLine)
5700 {
5701  VMA_ASSERT(!m_InsideString);
5702 
5703  BeginValue(false);
5704  m_SB.Add('[');
5705 
5706  StackItem item;
5707  item.type = COLLECTION_TYPE_ARRAY;
5708  item.valueCount = 0;
5709  item.singleLineMode = singleLine;
5710  m_Stack.push_back(item);
5711 }
5712 
5713 void VmaJsonWriter::EndArray()
5714 {
5715  VMA_ASSERT(!m_InsideString);
5716 
5717  WriteIndent(true);
5718  m_SB.Add(']');
5719 
5720  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5721  m_Stack.pop_back();
5722 }
5723 
5724 void VmaJsonWriter::WriteString(const char* pStr)
5725 {
5726  BeginString(pStr);
5727  EndString();
5728 }
5729 
5730 void VmaJsonWriter::BeginString(const char* pStr)
5731 {
5732  VMA_ASSERT(!m_InsideString);
5733 
5734  BeginValue(true);
5735  m_SB.Add('"');
5736  m_InsideString = true;
5737  if(pStr != VMA_NULL && pStr[0] != '\0')
5738  {
5739  ContinueString(pStr);
5740  }
5741 }
5742 
5743 void VmaJsonWriter::ContinueString(const char* pStr)
5744 {
5745  VMA_ASSERT(m_InsideString);
5746 
5747  const size_t strLen = strlen(pStr);
5748  for(size_t i = 0; i < strLen; ++i)
5749  {
5750  char ch = pStr[i];
5751  if(ch == '\\')
5752  {
5753  m_SB.Add("\\\\");
5754  }
5755  else if(ch == '"')
5756  {
5757  m_SB.Add("\\\"");
5758  }
5759  else if(ch >= 32)
5760  {
5761  m_SB.Add(ch);
5762  }
5763  else switch(ch)
5764  {
5765  case '\b':
5766  m_SB.Add("\\b");
5767  break;
5768  case '\f':
5769  m_SB.Add("\\f");
5770  break;
5771  case '\n':
5772  m_SB.Add("\\n");
5773  break;
5774  case '\r':
5775  m_SB.Add("\\r");
5776  break;
5777  case '\t':
5778  m_SB.Add("\\t");
5779  break;
5780  default:
5781  VMA_ASSERT(0 && "Character not currently supported.");
5782  break;
5783  }
5784  }
5785 }
5786 
5787 void VmaJsonWriter::ContinueString(uint32_t n)
5788 {
5789  VMA_ASSERT(m_InsideString);
5790  m_SB.AddNumber(n);
5791 }
5792 
5793 void VmaJsonWriter::ContinueString(uint64_t n)
5794 {
5795  VMA_ASSERT(m_InsideString);
5796  m_SB.AddNumber(n);
5797 }
5798 
5799 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5800 {
5801  VMA_ASSERT(m_InsideString);
5802  m_SB.AddPointer(ptr);
5803 }
5804 
5805 void VmaJsonWriter::EndString(const char* pStr)
5806 {
5807  VMA_ASSERT(m_InsideString);
5808  if(pStr != VMA_NULL && pStr[0] != '\0')
5809  {
5810  ContinueString(pStr);
5811  }
5812  m_SB.Add('"');
5813  m_InsideString = false;
5814 }
5815 
5816 void VmaJsonWriter::WriteNumber(uint32_t n)
5817 {
5818  VMA_ASSERT(!m_InsideString);
5819  BeginValue(false);
5820  m_SB.AddNumber(n);
5821 }
5822 
5823 void VmaJsonWriter::WriteNumber(uint64_t n)
5824 {
5825  VMA_ASSERT(!m_InsideString);
5826  BeginValue(false);
5827  m_SB.AddNumber(n);
5828 }
5829 
5830 void VmaJsonWriter::WriteBool(bool b)
5831 {
5832  VMA_ASSERT(!m_InsideString);
5833  BeginValue(false);
5834  m_SB.Add(b ? "true" : "false");
5835 }
5836 
5837 void VmaJsonWriter::WriteNull()
5838 {
5839  VMA_ASSERT(!m_InsideString);
5840  BeginValue(false);
5841  m_SB.Add("null");
5842 }
5843 
5844 void VmaJsonWriter::BeginValue(bool isString)
5845 {
5846  if(!m_Stack.empty())
5847  {
5848  StackItem& currItem = m_Stack.back();
5849  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5850  currItem.valueCount % 2 == 0)
5851  {
5852  VMA_ASSERT(isString);
5853  }
5854 
5855  if(currItem.type == COLLECTION_TYPE_OBJECT &&
5856  currItem.valueCount % 2 != 0)
5857  {
5858  m_SB.Add(": ");
5859  }
5860  else if(currItem.valueCount > 0)
5861  {
5862  m_SB.Add(", ");
5863  WriteIndent();
5864  }
5865  else
5866  {
5867  WriteIndent();
5868  }
5869  ++currItem.valueCount;
5870  }
5871 }
5872 
5873 void VmaJsonWriter::WriteIndent(bool oneLess)
5874 {
5875  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
5876  {
5877  m_SB.AddNewLine();
5878 
5879  size_t count = m_Stack.size();
5880  if(count > 0 && oneLess)
5881  {
5882  --count;
5883  }
5884  for(size_t i = 0; i < count; ++i)
5885  {
5886  m_SB.Add(INDENT);
5887  }
5888  }
5889 }
5890 
5891 #endif // #if VMA_STATS_STRING_ENABLED
5892 
5894 
5895 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
5896 {
5897  if(IsUserDataString())
5898  {
5899  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
5900 
5901  FreeUserDataString(hAllocator);
5902 
5903  if(pUserData != VMA_NULL)
5904  {
5905  const char* const newStrSrc = (char*)pUserData;
5906  const size_t newStrLen = strlen(newStrSrc);
5907  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
5908  memcpy(newStrDst, newStrSrc, newStrLen + 1);
5909  m_pUserData = newStrDst;
5910  }
5911  }
5912  else
5913  {
5914  m_pUserData = pUserData;
5915  }
5916 }
5917 
5918 void VmaAllocation_T::ChangeBlockAllocation(
5919  VmaAllocator hAllocator,
5920  VmaDeviceMemoryBlock* block,
5921  VkDeviceSize offset)
5922 {
5923  VMA_ASSERT(block != VMA_NULL);
5924  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5925 
5926  // Move mapping reference counter from old block to new block.
5927  if(block != m_BlockAllocation.m_Block)
5928  {
5929  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
5930  if(IsPersistentMap())
5931  ++mapRefCount;
5932  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
5933  block->Map(hAllocator, mapRefCount, VMA_NULL);
5934  }
5935 
5936  m_BlockAllocation.m_Block = block;
5937  m_BlockAllocation.m_Offset = offset;
5938 }
5939 
5940 VkDeviceSize VmaAllocation_T::GetOffset() const
5941 {
5942  switch(m_Type)
5943  {
5944  case ALLOCATION_TYPE_BLOCK:
5945  return m_BlockAllocation.m_Offset;
5946  case ALLOCATION_TYPE_DEDICATED:
5947  return 0;
5948  default:
5949  VMA_ASSERT(0);
5950  return 0;
5951  }
5952 }
5953 
5954 VkDeviceMemory VmaAllocation_T::GetMemory() const
5955 {
5956  switch(m_Type)
5957  {
5958  case ALLOCATION_TYPE_BLOCK:
5959  return m_BlockAllocation.m_Block->GetDeviceMemory();
5960  case ALLOCATION_TYPE_DEDICATED:
5961  return m_DedicatedAllocation.m_hMemory;
5962  default:
5963  VMA_ASSERT(0);
5964  return VK_NULL_HANDLE;
5965  }
5966 }
5967 
5968 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5969 {
5970  switch(m_Type)
5971  {
5972  case ALLOCATION_TYPE_BLOCK:
5973  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5974  case ALLOCATION_TYPE_DEDICATED:
5975  return m_DedicatedAllocation.m_MemoryTypeIndex;
5976  default:
5977  VMA_ASSERT(0);
5978  return UINT32_MAX;
5979  }
5980 }
5981 
5982 void* VmaAllocation_T::GetMappedData() const
5983 {
5984  switch(m_Type)
5985  {
5986  case ALLOCATION_TYPE_BLOCK:
5987  if(m_MapCount != 0)
5988  {
5989  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5990  VMA_ASSERT(pBlockData != VMA_NULL);
5991  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5992  }
5993  else
5994  {
5995  return VMA_NULL;
5996  }
5997  break;
5998  case ALLOCATION_TYPE_DEDICATED:
5999  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
6000  return m_DedicatedAllocation.m_pMappedData;
6001  default:
6002  VMA_ASSERT(0);
6003  return VMA_NULL;
6004  }
6005 }
6006 
6007 bool VmaAllocation_T::CanBecomeLost() const
6008 {
6009  switch(m_Type)
6010  {
6011  case ALLOCATION_TYPE_BLOCK:
6012  return m_BlockAllocation.m_CanBecomeLost;
6013  case ALLOCATION_TYPE_DEDICATED:
6014  return false;
6015  default:
6016  VMA_ASSERT(0);
6017  return false;
6018  }
6019 }
6020 
6021 VmaPool VmaAllocation_T::GetPool() const
6022 {
6023  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6024  return m_BlockAllocation.m_hPool;
6025 }
6026 
6027 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6028 {
6029  VMA_ASSERT(CanBecomeLost());
6030 
6031  /*
6032  Warning: This is a carefully designed algorithm.
6033  Do not modify unless you really know what you're doing :)
6034  */
6035  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
6036  for(;;)
6037  {
6038  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
6039  {
6040  VMA_ASSERT(0);
6041  return false;
6042  }
6043  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
6044  {
6045  return false;
6046  }
6047  else // Last use time earlier than current time.
6048  {
6049  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
6050  {
6051  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
6052  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
6053  return true;
6054  }
6055  }
6056  }
6057 }
6058 
6059 #if VMA_STATS_STRING_ENABLED
6060 
6061 // Correspond to values of enum VmaSuballocationType.
6062 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
6063  "FREE",
6064  "UNKNOWN",
6065  "BUFFER",
6066  "IMAGE_UNKNOWN",
6067  "IMAGE_LINEAR",
6068  "IMAGE_OPTIMAL",
6069 };
6070 
6071 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
6072 {
6073  json.WriteString("Type");
6074  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
6075 
6076  json.WriteString("Size");
6077  json.WriteNumber(m_Size);
6078 
6079  if(m_pUserData != VMA_NULL)
6080  {
6081  json.WriteString("UserData");
6082  if(IsUserDataString())
6083  {
6084  json.WriteString((const char*)m_pUserData);
6085  }
6086  else
6087  {
6088  json.BeginString();
6089  json.ContinueString_Pointer(m_pUserData);
6090  json.EndString();
6091  }
6092  }
6093 
6094  json.WriteString("CreationFrameIndex");
6095  json.WriteNumber(m_CreationFrameIndex);
6096 
6097  json.WriteString("LastUseFrameIndex");
6098  json.WriteNumber(GetLastUseFrameIndex());
6099 
6100  if(m_BufferImageUsage != 0)
6101  {
6102  json.WriteString("Usage");
6103  json.WriteNumber(m_BufferImageUsage);
6104  }
6105 }
6106 
6107 #endif
6108 
6109 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
6110 {
6111  VMA_ASSERT(IsUserDataString());
6112  if(m_pUserData != VMA_NULL)
6113  {
6114  char* const oldStr = (char*)m_pUserData;
6115  const size_t oldStrLen = strlen(oldStr);
6116  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
6117  m_pUserData = VMA_NULL;
6118  }
6119 }
6120 
6121 void VmaAllocation_T::BlockAllocMap()
6122 {
6123  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6124 
6125  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6126  {
6127  ++m_MapCount;
6128  }
6129  else
6130  {
6131  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
6132  }
6133 }
6134 
6135 void VmaAllocation_T::BlockAllocUnmap()
6136 {
6137  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
6138 
6139  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6140  {
6141  --m_MapCount;
6142  }
6143  else
6144  {
6145  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
6146  }
6147 }
6148 
6149 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
6150 {
6151  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6152 
6153  if(m_MapCount != 0)
6154  {
6155  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
6156  {
6157  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
6158  *ppData = m_DedicatedAllocation.m_pMappedData;
6159  ++m_MapCount;
6160  return VK_SUCCESS;
6161  }
6162  else
6163  {
6164  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
6165  return VK_ERROR_MEMORY_MAP_FAILED;
6166  }
6167  }
6168  else
6169  {
6170  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6171  hAllocator->m_hDevice,
6172  m_DedicatedAllocation.m_hMemory,
6173  0, // offset
6174  VK_WHOLE_SIZE,
6175  0, // flags
6176  ppData);
6177  if(result == VK_SUCCESS)
6178  {
6179  m_DedicatedAllocation.m_pMappedData = *ppData;
6180  m_MapCount = 1;
6181  }
6182  return result;
6183  }
6184 }
6185 
6186 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
6187 {
6188  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
6189 
6190  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
6191  {
6192  --m_MapCount;
6193  if(m_MapCount == 0)
6194  {
6195  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
6196  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
6197  hAllocator->m_hDevice,
6198  m_DedicatedAllocation.m_hMemory);
6199  }
6200  }
6201  else
6202  {
6203  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
6204  }
6205 }
6206 
6207 #if VMA_STATS_STRING_ENABLED
6208 
6209 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
6210 {
6211  json.BeginObject();
6212 
6213  json.WriteString("Blocks");
6214  json.WriteNumber(stat.blockCount);
6215 
6216  json.WriteString("Allocations");
6217  json.WriteNumber(stat.allocationCount);
6218 
6219  json.WriteString("UnusedRanges");
6220  json.WriteNumber(stat.unusedRangeCount);
6221 
6222  json.WriteString("UsedBytes");
6223  json.WriteNumber(stat.usedBytes);
6224 
6225  json.WriteString("UnusedBytes");
6226  json.WriteNumber(stat.unusedBytes);
6227 
6228  if(stat.allocationCount > 1)
6229  {
6230  json.WriteString("AllocationSize");
6231  json.BeginObject(true);
6232  json.WriteString("Min");
6233  json.WriteNumber(stat.allocationSizeMin);
6234  json.WriteString("Avg");
6235  json.WriteNumber(stat.allocationSizeAvg);
6236  json.WriteString("Max");
6237  json.WriteNumber(stat.allocationSizeMax);
6238  json.EndObject();
6239  }
6240 
6241  if(stat.unusedRangeCount > 1)
6242  {
6243  json.WriteString("UnusedRangeSize");
6244  json.BeginObject(true);
6245  json.WriteString("Min");
6246  json.WriteNumber(stat.unusedRangeSizeMin);
6247  json.WriteString("Avg");
6248  json.WriteNumber(stat.unusedRangeSizeAvg);
6249  json.WriteString("Max");
6250  json.WriteNumber(stat.unusedRangeSizeMax);
6251  json.EndObject();
6252  }
6253 
6254  json.EndObject();
6255 }
6256 
6257 #endif // #if VMA_STATS_STRING_ENABLED
6258 
6259 struct VmaSuballocationItemSizeLess
6260 {
6261  bool operator()(
6262  const VmaSuballocationList::iterator lhs,
6263  const VmaSuballocationList::iterator rhs) const
6264  {
6265  return lhs->size < rhs->size;
6266  }
6267  bool operator()(
6268  const VmaSuballocationList::iterator lhs,
6269  VkDeviceSize rhsSize) const
6270  {
6271  return lhs->size < rhsSize;
6272  }
6273 };
6274 
6275 
6277 // class VmaBlockMetadata
6278 
6279 #if VMA_STATS_STRING_ENABLED
6280 
6281 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6282  VkDeviceSize unusedBytes,
6283  size_t allocationCount,
6284  size_t unusedRangeCount) const
6285 {
6286  json.BeginObject();
6287 
6288  json.WriteString("TotalBytes");
6289  json.WriteNumber(GetSize());
6290 
6291  json.WriteString("UnusedBytes");
6292  json.WriteNumber(unusedBytes);
6293 
6294  json.WriteString("Allocations");
6295  json.WriteNumber((uint64_t)allocationCount);
6296 
6297  json.WriteString("UnusedRanges");
6298  json.WriteNumber((uint64_t)unusedRangeCount);
6299 
6300  json.WriteString("Suballocations");
6301  json.BeginArray();
6302 }
6303 
6304 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6305  VkDeviceSize offset,
6306  VmaAllocation hAllocation) const
6307 {
6308  json.BeginObject(true);
6309 
6310  json.WriteString("Offset");
6311  json.WriteNumber(offset);
6312 
6313  hAllocation->PrintParameters(json);
6314 
6315  json.EndObject();
6316 }
6317 
6318 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6319  VkDeviceSize offset,
6320  VkDeviceSize size) const
6321 {
6322  json.BeginObject(true);
6323 
6324  json.WriteString("Offset");
6325  json.WriteNumber(offset);
6326 
6327  json.WriteString("Type");
6328  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6329 
6330  json.WriteString("Size");
6331  json.WriteNumber(size);
6332 
6333  json.EndObject();
6334 }
6335 
6336 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6337 {
6338  json.EndArray();
6339  json.EndObject();
6340 }
6341 
6342 #endif // #if VMA_STATS_STRING_ENABLED
6343 
6345 // class VmaBlockMetadata_Generic
6346 
6347 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
6348  m_FreeCount(0),
6349  m_SumFreeSize(0),
6350  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
6351  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
6352 {
6353 }
6354 
6355 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
6356 {
6357 }
6358 
6359 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
6360 {
6361  VmaBlockMetadata::Init(size);
6362  m_FreeCount = 1;
6363  m_SumFreeSize = size;
6364 
6365  VmaSuballocation suballoc = {};
6366  suballoc.offset = 0;
6367  suballoc.size = size;
6368  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6369  suballoc.hAllocation = VK_NULL_HANDLE;
6370 
6371  m_Suballocations.push_back(suballoc);
6372  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
6373  --suballocItem;
6374  m_FreeSuballocationsBySize.push_back(suballocItem);
6375 }
6376 
6377 bool VmaBlockMetadata_Generic::Validate() const
6378 {
6379  if(m_Suballocations.empty())
6380  {
6381  return false;
6382  }
6383 
6384  // Expected offset of new suballocation as calculated from previous ones.
6385  VkDeviceSize calculatedOffset = 0;
6386  // Expected number of free suballocations as calculated from traversing their list.
6387  uint32_t calculatedFreeCount = 0;
6388  // Expected sum size of free suballocations as calculated from traversing their list.
6389  VkDeviceSize calculatedSumFreeSize = 0;
6390  // Expected number of free suballocations that should be registered in
6391  // m_FreeSuballocationsBySize calculated from traversing their list.
6392  size_t freeSuballocationsToRegister = 0;
6393  // True if previous visited suballocation was free.
6394  bool prevFree = false;
6395 
6396  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6397  suballocItem != m_Suballocations.cend();
6398  ++suballocItem)
6399  {
6400  const VmaSuballocation& subAlloc = *suballocItem;
6401 
6402  // Actual offset of this suballocation doesn't match expected one.
6403  if(subAlloc.offset != calculatedOffset)
6404  {
6405  return false;
6406  }
6407 
6408  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6409  // Two adjacent free suballocations are invalid. They should be merged.
6410  if(prevFree && currFree)
6411  {
6412  return false;
6413  }
6414 
6415  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
6416  {
6417  return false;
6418  }
6419 
6420  if(currFree)
6421  {
6422  calculatedSumFreeSize += subAlloc.size;
6423  ++calculatedFreeCount;
6424  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6425  {
6426  ++freeSuballocationsToRegister;
6427  }
6428 
6429  // Margin required between allocations - every free space must be at least that large.
6430  if(subAlloc.size < VMA_DEBUG_MARGIN)
6431  {
6432  return false;
6433  }
6434  }
6435  else
6436  {
6437  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
6438  {
6439  return false;
6440  }
6441  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
6442  {
6443  return false;
6444  }
6445 
6446  // Margin required between allocations - previous allocation must be free.
6447  if(VMA_DEBUG_MARGIN > 0 && !prevFree)
6448  {
6449  return false;
6450  }
6451  }
6452 
6453  calculatedOffset += subAlloc.size;
6454  prevFree = currFree;
6455  }
6456 
6457  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6458  // match expected one.
6459  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
6460  {
6461  return false;
6462  }
6463 
6464  VkDeviceSize lastSize = 0;
6465  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6466  {
6467  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6468 
6469  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6470  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
6471  {
6472  return false;
6473  }
6474  // They must be sorted by size ascending.
6475  if(suballocItem->size < lastSize)
6476  {
6477  return false;
6478  }
6479 
6480  lastSize = suballocItem->size;
6481  }
6482 
6483  // Check if totals match calculacted values.
6484  if(!ValidateFreeSuballocationList() ||
6485  (calculatedOffset != GetSize()) ||
6486  (calculatedSumFreeSize != m_SumFreeSize) ||
6487  (calculatedFreeCount != m_FreeCount))
6488  {
6489  return false;
6490  }
6491 
6492  return true;
6493 }
6494 
6495 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
6496 {
6497  if(!m_FreeSuballocationsBySize.empty())
6498  {
6499  return m_FreeSuballocationsBySize.back()->size;
6500  }
6501  else
6502  {
6503  return 0;
6504  }
6505 }
6506 
6507 bool VmaBlockMetadata_Generic::IsEmpty() const
6508 {
6509  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
6510 }
6511 
6512 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
6513 {
6514  outInfo.blockCount = 1;
6515 
6516  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6517  outInfo.allocationCount = rangeCount - m_FreeCount;
6518  outInfo.unusedRangeCount = m_FreeCount;
6519 
6520  outInfo.unusedBytes = m_SumFreeSize;
6521  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
6522 
6523  outInfo.allocationSizeMin = UINT64_MAX;
6524  outInfo.allocationSizeMax = 0;
6525  outInfo.unusedRangeSizeMin = UINT64_MAX;
6526  outInfo.unusedRangeSizeMax = 0;
6527 
6528  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6529  suballocItem != m_Suballocations.cend();
6530  ++suballocItem)
6531  {
6532  const VmaSuballocation& suballoc = *suballocItem;
6533  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6534  {
6535  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
6536  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
6537  }
6538  else
6539  {
6540  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
6541  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
6542  }
6543  }
6544 }
6545 
6546 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
6547 {
6548  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6549 
6550  inoutStats.size += GetSize();
6551  inoutStats.unusedSize += m_SumFreeSize;
6552  inoutStats.allocationCount += rangeCount - m_FreeCount;
6553  inoutStats.unusedRangeCount += m_FreeCount;
6554  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
6555 }
6556 
6557 #if VMA_STATS_STRING_ENABLED
6558 
6559 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
6560 {
6561  PrintDetailedMap_Begin(json,
6562  m_SumFreeSize, // unusedBytes
6563  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6564  m_FreeCount); // unusedRangeCount
6565 
6566  size_t i = 0;
6567  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
6568  suballocItem != m_Suballocations.cend();
6569  ++suballocItem, ++i)
6570  {
6571  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6572  {
6573  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
6574  }
6575  else
6576  {
6577  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
6578  }
6579  }
6580 
6581  PrintDetailedMap_End(json);
6582 }
6583 
6584 #endif // #if VMA_STATS_STRING_ENABLED
6585 
6586 /*
6587 How many suitable free suballocations to analyze before choosing best one.
6588 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
6589  be chosen.
6590 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
6591  suballocations will be analized and best one will be chosen.
6592 - Any other value is also acceptable.
6593 */
6594 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
6595 
6596 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6597  uint32_t currentFrameIndex,
6598  uint32_t frameInUseCount,
6599  VkDeviceSize bufferImageGranularity,
6600  VkDeviceSize allocSize,
6601  VkDeviceSize allocAlignment,
6602  bool upperAddress,
6603  VmaSuballocationType allocType,
6604  bool canMakeOtherLost,
6605  VmaAllocationRequest* pAllocationRequest)
6606 {
6607  VMA_ASSERT(allocSize > 0);
6608  VMA_ASSERT(!upperAddress);
6609  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6610  VMA_ASSERT(pAllocationRequest != VMA_NULL);
6611  VMA_HEAVY_ASSERT(Validate());
6612 
6613  // There is not enough total free space in this block to fullfill the request: Early return.
6614  if(canMakeOtherLost == false && m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
6615  {
6616  return false;
6617  }
6618 
6619  // New algorithm, efficiently searching freeSuballocationsBySize.
6620  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6621  if(freeSuballocCount > 0)
6622  {
6623  if(VMA_BEST_FIT)
6624  {
6625  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
6626  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6627  m_FreeSuballocationsBySize.data(),
6628  m_FreeSuballocationsBySize.data() + freeSuballocCount,
6629  allocSize + 2 * VMA_DEBUG_MARGIN,
6630  VmaSuballocationItemSizeLess());
6631  size_t index = it - m_FreeSuballocationsBySize.data();
6632  for(; index < freeSuballocCount; ++index)
6633  {
6634  if(CheckAllocation(
6635  currentFrameIndex,
6636  frameInUseCount,
6637  bufferImageGranularity,
6638  allocSize,
6639  allocAlignment,
6640  allocType,
6641  m_FreeSuballocationsBySize[index],
6642  false, // canMakeOtherLost
6643  &pAllocationRequest->offset,
6644  &pAllocationRequest->itemsToMakeLostCount,
6645  &pAllocationRequest->sumFreeSize,
6646  &pAllocationRequest->sumItemSize))
6647  {
6648  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6649  return true;
6650  }
6651  }
6652  }
6653  else
6654  {
6655  // Search staring from biggest suballocations.
6656  for(size_t index = freeSuballocCount; index--; )
6657  {
6658  if(CheckAllocation(
6659  currentFrameIndex,
6660  frameInUseCount,
6661  bufferImageGranularity,
6662  allocSize,
6663  allocAlignment,
6664  allocType,
6665  m_FreeSuballocationsBySize[index],
6666  false, // canMakeOtherLost
6667  &pAllocationRequest->offset,
6668  &pAllocationRequest->itemsToMakeLostCount,
6669  &pAllocationRequest->sumFreeSize,
6670  &pAllocationRequest->sumItemSize))
6671  {
6672  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
6673  return true;
6674  }
6675  }
6676  }
6677  }
6678 
6679  if(canMakeOtherLost)
6680  {
6681  // Brute-force algorithm. TODO: Come up with something better.
6682 
6683  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
6684  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
6685 
6686  VmaAllocationRequest tmpAllocRequest = {};
6687  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
6688  suballocIt != m_Suballocations.end();
6689  ++suballocIt)
6690  {
6691  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
6692  suballocIt->hAllocation->CanBecomeLost())
6693  {
6694  if(CheckAllocation(
6695  currentFrameIndex,
6696  frameInUseCount,
6697  bufferImageGranularity,
6698  allocSize,
6699  allocAlignment,
6700  allocType,
6701  suballocIt,
6702  canMakeOtherLost,
6703  &tmpAllocRequest.offset,
6704  &tmpAllocRequest.itemsToMakeLostCount,
6705  &tmpAllocRequest.sumFreeSize,
6706  &tmpAllocRequest.sumItemSize))
6707  {
6708  tmpAllocRequest.item = suballocIt;
6709 
6710  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
6711  {
6712  *pAllocationRequest = tmpAllocRequest;
6713  }
6714  }
6715  }
6716  }
6717 
6718  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
6719  {
6720  return true;
6721  }
6722  }
6723 
6724  return false;
6725 }
6726 
6727 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
6728  uint32_t currentFrameIndex,
6729  uint32_t frameInUseCount,
6730  VmaAllocationRequest* pAllocationRequest)
6731 {
6732  while(pAllocationRequest->itemsToMakeLostCount > 0)
6733  {
6734  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
6735  {
6736  ++pAllocationRequest->item;
6737  }
6738  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6739  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
6740  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
6741  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6742  {
6743  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
6744  --pAllocationRequest->itemsToMakeLostCount;
6745  }
6746  else
6747  {
6748  return false;
6749  }
6750  }
6751 
6752  VMA_HEAVY_ASSERT(Validate());
6753  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
6754  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
6755 
6756  return true;
6757 }
6758 
6759 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
6760 {
6761  uint32_t lostAllocationCount = 0;
6762  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6763  it != m_Suballocations.end();
6764  ++it)
6765  {
6766  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
6767  it->hAllocation->CanBecomeLost() &&
6768  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
6769  {
6770  it = FreeSuballocation(it);
6771  ++lostAllocationCount;
6772  }
6773  }
6774  return lostAllocationCount;
6775 }
6776 
6777 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
6778 {
6779  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
6780  it != m_Suballocations.end();
6781  ++it)
6782  {
6783  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6784  {
6785  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
6786  {
6787  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
6788  return VK_ERROR_VALIDATION_FAILED_EXT;
6789  }
6790  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
6791  {
6792  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
6793  return VK_ERROR_VALIDATION_FAILED_EXT;
6794  }
6795  }
6796  }
6797 
6798  return VK_SUCCESS;
6799 }
6800 
6801 void VmaBlockMetadata_Generic::Alloc(
6802  const VmaAllocationRequest& request,
6803  VmaSuballocationType type,
6804  VkDeviceSize allocSize,
6805  bool upperAddress,
6806  VmaAllocation hAllocation)
6807 {
6808  VMA_ASSERT(!upperAddress);
6809  VMA_ASSERT(request.item != m_Suballocations.end());
6810  VmaSuballocation& suballoc = *request.item;
6811  // Given suballocation is a free block.
6812  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6813  // Given offset is inside this suballocation.
6814  VMA_ASSERT(request.offset >= suballoc.offset);
6815  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
6816  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
6817  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
6818 
6819  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
6820  // it to become used.
6821  UnregisterFreeSuballocation(request.item);
6822 
6823  suballoc.offset = request.offset;
6824  suballoc.size = allocSize;
6825  suballoc.type = type;
6826  suballoc.hAllocation = hAllocation;
6827 
6828  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
6829  if(paddingEnd)
6830  {
6831  VmaSuballocation paddingSuballoc = {};
6832  paddingSuballoc.offset = request.offset + allocSize;
6833  paddingSuballoc.size = paddingEnd;
6834  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6835  VmaSuballocationList::iterator next = request.item;
6836  ++next;
6837  const VmaSuballocationList::iterator paddingEndItem =
6838  m_Suballocations.insert(next, paddingSuballoc);
6839  RegisterFreeSuballocation(paddingEndItem);
6840  }
6841 
6842  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
6843  if(paddingBegin)
6844  {
6845  VmaSuballocation paddingSuballoc = {};
6846  paddingSuballoc.offset = request.offset - paddingBegin;
6847  paddingSuballoc.size = paddingBegin;
6848  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6849  const VmaSuballocationList::iterator paddingBeginItem =
6850  m_Suballocations.insert(request.item, paddingSuballoc);
6851  RegisterFreeSuballocation(paddingBeginItem);
6852  }
6853 
6854  // Update totals.
6855  m_FreeCount = m_FreeCount - 1;
6856  if(paddingBegin > 0)
6857  {
6858  ++m_FreeCount;
6859  }
6860  if(paddingEnd > 0)
6861  {
6862  ++m_FreeCount;
6863  }
6864  m_SumFreeSize -= allocSize;
6865 }
6866 
6867 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
6868 {
6869  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6870  suballocItem != m_Suballocations.end();
6871  ++suballocItem)
6872  {
6873  VmaSuballocation& suballoc = *suballocItem;
6874  if(suballoc.hAllocation == allocation)
6875  {
6876  FreeSuballocation(suballocItem);
6877  VMA_HEAVY_ASSERT(Validate());
6878  return;
6879  }
6880  }
6881  VMA_ASSERT(0 && "Not found!");
6882 }
6883 
6884 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
6885 {
6886  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
6887  suballocItem != m_Suballocations.end();
6888  ++suballocItem)
6889  {
6890  VmaSuballocation& suballoc = *suballocItem;
6891  if(suballoc.offset == offset)
6892  {
6893  FreeSuballocation(suballocItem);
6894  return;
6895  }
6896  }
6897  VMA_ASSERT(0 && "Not found!");
6898 }
6899 
6900 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
6901 {
6902  VkDeviceSize lastSize = 0;
6903  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
6904  {
6905  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
6906 
6907  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
6908  {
6909  VMA_ASSERT(0);
6910  return false;
6911  }
6912  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6913  {
6914  VMA_ASSERT(0);
6915  return false;
6916  }
6917  if(it->size < lastSize)
6918  {
6919  VMA_ASSERT(0);
6920  return false;
6921  }
6922 
6923  lastSize = it->size;
6924  }
6925  return true;
6926 }
6927 
6928 bool VmaBlockMetadata_Generic::CheckAllocation(
6929  uint32_t currentFrameIndex,
6930  uint32_t frameInUseCount,
6931  VkDeviceSize bufferImageGranularity,
6932  VkDeviceSize allocSize,
6933  VkDeviceSize allocAlignment,
6934  VmaSuballocationType allocType,
6935  VmaSuballocationList::const_iterator suballocItem,
6936  bool canMakeOtherLost,
6937  VkDeviceSize* pOffset,
6938  size_t* itemsToMakeLostCount,
6939  VkDeviceSize* pSumFreeSize,
6940  VkDeviceSize* pSumItemSize) const
6941 {
6942  VMA_ASSERT(allocSize > 0);
6943  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6944  VMA_ASSERT(suballocItem != m_Suballocations.cend());
6945  VMA_ASSERT(pOffset != VMA_NULL);
6946 
6947  *itemsToMakeLostCount = 0;
6948  *pSumFreeSize = 0;
6949  *pSumItemSize = 0;
6950 
6951  if(canMakeOtherLost)
6952  {
6953  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6954  {
6955  *pSumFreeSize = suballocItem->size;
6956  }
6957  else
6958  {
6959  if(suballocItem->hAllocation->CanBecomeLost() &&
6960  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6961  {
6962  ++*itemsToMakeLostCount;
6963  *pSumItemSize = suballocItem->size;
6964  }
6965  else
6966  {
6967  return false;
6968  }
6969  }
6970 
6971  // Remaining size is too small for this request: Early return.
6972  if(GetSize() - suballocItem->offset < allocSize)
6973  {
6974  return false;
6975  }
6976 
6977  // Start from offset equal to beginning of this suballocation.
6978  *pOffset = suballocItem->offset;
6979 
6980  // Apply VMA_DEBUG_MARGIN at the beginning.
6981  if(VMA_DEBUG_MARGIN > 0)
6982  {
6983  *pOffset += VMA_DEBUG_MARGIN;
6984  }
6985 
6986  // Apply alignment.
6987  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
6988 
6989  // Check previous suballocations for BufferImageGranularity conflicts.
6990  // Make bigger alignment if necessary.
6991  if(bufferImageGranularity > 1)
6992  {
6993  bool bufferImageGranularityConflict = false;
6994  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6995  while(prevSuballocItem != m_Suballocations.cbegin())
6996  {
6997  --prevSuballocItem;
6998  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6999  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7000  {
7001  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7002  {
7003  bufferImageGranularityConflict = true;
7004  break;
7005  }
7006  }
7007  else
7008  // Already on previous page.
7009  break;
7010  }
7011  if(bufferImageGranularityConflict)
7012  {
7013  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7014  }
7015  }
7016 
7017  // Now that we have final *pOffset, check if we are past suballocItem.
7018  // If yes, return false - this function should be called for another suballocItem as starting point.
7019  if(*pOffset >= suballocItem->offset + suballocItem->size)
7020  {
7021  return false;
7022  }
7023 
7024  // Calculate padding at the beginning based on current offset.
7025  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
7026 
7027  // Calculate required margin at the end.
7028  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7029 
7030  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
7031  // Another early return check.
7032  if(suballocItem->offset + totalSize > GetSize())
7033  {
7034  return false;
7035  }
7036 
7037  // Advance lastSuballocItem until desired size is reached.
7038  // Update itemsToMakeLostCount.
7039  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
7040  if(totalSize > suballocItem->size)
7041  {
7042  VkDeviceSize remainingSize = totalSize - suballocItem->size;
7043  while(remainingSize > 0)
7044  {
7045  ++lastSuballocItem;
7046  if(lastSuballocItem == m_Suballocations.cend())
7047  {
7048  return false;
7049  }
7050  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7051  {
7052  *pSumFreeSize += lastSuballocItem->size;
7053  }
7054  else
7055  {
7056  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
7057  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
7058  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7059  {
7060  ++*itemsToMakeLostCount;
7061  *pSumItemSize += lastSuballocItem->size;
7062  }
7063  else
7064  {
7065  return false;
7066  }
7067  }
7068  remainingSize = (lastSuballocItem->size < remainingSize) ?
7069  remainingSize - lastSuballocItem->size : 0;
7070  }
7071  }
7072 
7073  // Check next suballocations for BufferImageGranularity conflicts.
7074  // If conflict exists, we must mark more allocations lost or fail.
7075  if(bufferImageGranularity > 1)
7076  {
7077  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
7078  ++nextSuballocItem;
7079  while(nextSuballocItem != m_Suballocations.cend())
7080  {
7081  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7082  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7083  {
7084  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7085  {
7086  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
7087  if(nextSuballoc.hAllocation->CanBecomeLost() &&
7088  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
7089  {
7090  ++*itemsToMakeLostCount;
7091  }
7092  else
7093  {
7094  return false;
7095  }
7096  }
7097  }
7098  else
7099  {
7100  // Already on next page.
7101  break;
7102  }
7103  ++nextSuballocItem;
7104  }
7105  }
7106  }
7107  else
7108  {
7109  const VmaSuballocation& suballoc = *suballocItem;
7110  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7111 
7112  *pSumFreeSize = suballoc.size;
7113 
7114  // Size of this suballocation is too small for this request: Early return.
7115  if(suballoc.size < allocSize)
7116  {
7117  return false;
7118  }
7119 
7120  // Start from offset equal to beginning of this suballocation.
7121  *pOffset = suballoc.offset;
7122 
7123  // Apply VMA_DEBUG_MARGIN at the beginning.
7124  if(VMA_DEBUG_MARGIN > 0)
7125  {
7126  *pOffset += VMA_DEBUG_MARGIN;
7127  }
7128 
7129  // Apply alignment.
7130  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
7131 
7132  // Check previous suballocations for BufferImageGranularity conflicts.
7133  // Make bigger alignment if necessary.
7134  if(bufferImageGranularity > 1)
7135  {
7136  bool bufferImageGranularityConflict = false;
7137  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7138  while(prevSuballocItem != m_Suballocations.cbegin())
7139  {
7140  --prevSuballocItem;
7141  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7142  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
7143  {
7144  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7145  {
7146  bufferImageGranularityConflict = true;
7147  break;
7148  }
7149  }
7150  else
7151  // Already on previous page.
7152  break;
7153  }
7154  if(bufferImageGranularityConflict)
7155  {
7156  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
7157  }
7158  }
7159 
7160  // Calculate padding at the beginning based on current offset.
7161  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
7162 
7163  // Calculate required margin at the end.
7164  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
7165 
7166  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
7167  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
7168  {
7169  return false;
7170  }
7171 
7172  // Check next suballocations for BufferImageGranularity conflicts.
7173  // If conflict exists, allocation cannot be made here.
7174  if(bufferImageGranularity > 1)
7175  {
7176  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7177  ++nextSuballocItem;
7178  while(nextSuballocItem != m_Suballocations.cend())
7179  {
7180  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7181  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7182  {
7183  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7184  {
7185  return false;
7186  }
7187  }
7188  else
7189  {
7190  // Already on next page.
7191  break;
7192  }
7193  ++nextSuballocItem;
7194  }
7195  }
7196  }
7197 
7198  // All tests passed: Success. pOffset is already filled.
7199  return true;
7200 }
7201 
7202 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7203 {
7204  VMA_ASSERT(item != m_Suballocations.end());
7205  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7206 
7207  VmaSuballocationList::iterator nextItem = item;
7208  ++nextItem;
7209  VMA_ASSERT(nextItem != m_Suballocations.end());
7210  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7211 
7212  item->size += nextItem->size;
7213  --m_FreeCount;
7214  m_Suballocations.erase(nextItem);
7215 }
7216 
7217 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7218 {
7219  // Change this suballocation to be marked as free.
7220  VmaSuballocation& suballoc = *suballocItem;
7221  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7222  suballoc.hAllocation = VK_NULL_HANDLE;
7223 
7224  // Update totals.
7225  ++m_FreeCount;
7226  m_SumFreeSize += suballoc.size;
7227 
7228  // Merge with previous and/or next suballocation if it's also free.
7229  bool mergeWithNext = false;
7230  bool mergeWithPrev = false;
7231 
7232  VmaSuballocationList::iterator nextItem = suballocItem;
7233  ++nextItem;
7234  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7235  {
7236  mergeWithNext = true;
7237  }
7238 
7239  VmaSuballocationList::iterator prevItem = suballocItem;
7240  if(suballocItem != m_Suballocations.begin())
7241  {
7242  --prevItem;
7243  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7244  {
7245  mergeWithPrev = true;
7246  }
7247  }
7248 
7249  if(mergeWithNext)
7250  {
7251  UnregisterFreeSuballocation(nextItem);
7252  MergeFreeWithNext(suballocItem);
7253  }
7254 
7255  if(mergeWithPrev)
7256  {
7257  UnregisterFreeSuballocation(prevItem);
7258  MergeFreeWithNext(prevItem);
7259  RegisterFreeSuballocation(prevItem);
7260  return prevItem;
7261  }
7262  else
7263  {
7264  RegisterFreeSuballocation(suballocItem);
7265  return suballocItem;
7266  }
7267 }
7268 
7269 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7270 {
7271  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7272  VMA_ASSERT(item->size > 0);
7273 
7274  // You may want to enable this validation at the beginning or at the end of
7275  // this function, depending on what do you want to check.
7276  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7277 
7278  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7279  {
7280  if(m_FreeSuballocationsBySize.empty())
7281  {
7282  m_FreeSuballocationsBySize.push_back(item);
7283  }
7284  else
7285  {
7286  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7287  }
7288  }
7289 
7290  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7291 }
7292 
7293 
7294 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7295 {
7296  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7297  VMA_ASSERT(item->size > 0);
7298 
7299  // You may want to enable this validation at the beginning or at the end of
7300  // this function, depending on what do you want to check.
7301  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7302 
7303  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7304  {
7305  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7306  m_FreeSuballocationsBySize.data(),
7307  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7308  item,
7309  VmaSuballocationItemSizeLess());
7310  for(size_t index = it - m_FreeSuballocationsBySize.data();
7311  index < m_FreeSuballocationsBySize.size();
7312  ++index)
7313  {
7314  if(m_FreeSuballocationsBySize[index] == item)
7315  {
7316  VmaVectorRemove(m_FreeSuballocationsBySize, index);
7317  return;
7318  }
7319  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7320  }
7321  VMA_ASSERT(0 && "Not found.");
7322  }
7323 
7324  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7325 }
7326 
7328 // class VmaBlockMetadata_Linear
7329 
7330 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
7331  m_SumFreeSize(0),
7332  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7333  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7334  m_1stVectorIndex(0),
7335  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7336  m_1stNullItemsBeginCount(0),
7337  m_1stNullItemsMiddleCount(0),
7338  m_2ndNullItemsCount(0)
7339 {
7340 }
7341 
7342 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
7343 {
7344 }
7345 
7346 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
7347 {
7348  VmaBlockMetadata::Init(size);
7349  m_SumFreeSize = size;
7350 }
7351 
7352 bool VmaBlockMetadata_Linear::Validate() const
7353 {
7354  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7355  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7356 
7357  if(suballocations2nd.empty() != (m_2ndVectorMode == SECOND_VECTOR_EMPTY))
7358  {
7359  return false;
7360  }
7361  if(suballocations1st.empty() && !suballocations2nd.empty() &&
7362  m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7363  {
7364  return false;
7365  }
7366  if(!suballocations1st.empty())
7367  {
7368  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7369  if(suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
7370  {
7371  return false;
7372  }
7373  // Null item at the end should be just pop_back().
7374  if(suballocations1st.back().hAllocation == VK_NULL_HANDLE)
7375  {
7376  return false;
7377  }
7378  }
7379  if(!suballocations2nd.empty())
7380  {
7381  // Null item at the end should be just pop_back().
7382  if(suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
7383  {
7384  return false;
7385  }
7386  }
7387 
7388  if(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount > suballocations1st.size())
7389  {
7390  return false;
7391  }
7392  if(m_2ndNullItemsCount > suballocations2nd.size())
7393  {
7394  return false;
7395  }
7396 
7397  VkDeviceSize sumUsedSize = 0;
7398  const size_t suballoc1stCount = suballocations1st.size();
7399  VkDeviceSize offset = VMA_DEBUG_MARGIN;
7400 
7401  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7402  {
7403  const size_t suballoc2ndCount = suballocations2nd.size();
7404  size_t nullItem2ndCount = 0;
7405  for(size_t i = 0; i < suballoc2ndCount; ++i)
7406  {
7407  const VmaSuballocation& suballoc = suballocations2nd[i];
7408  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7409 
7410  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7411  {
7412  return false;
7413  }
7414  if(suballoc.offset < offset)
7415  {
7416  return false;
7417  }
7418 
7419  if(!currFree)
7420  {
7421  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7422  {
7423  return false;
7424  }
7425  if(suballoc.hAllocation->GetSize() != suballoc.size)
7426  {
7427  return false;
7428  }
7429  sumUsedSize += suballoc.size;
7430  }
7431  else
7432  {
7433  ++nullItem2ndCount;
7434  }
7435 
7436  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7437  }
7438 
7439  if(nullItem2ndCount != m_2ndNullItemsCount)
7440  {
7441  return false;
7442  }
7443  }
7444 
7445  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7446  {
7447  const VmaSuballocation& suballoc = suballocations1st[i];
7448  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE ||
7449  suballoc.hAllocation != VK_NULL_HANDLE)
7450  {
7451  return false;
7452  }
7453  }
7454 
7455  size_t nullItem1stCount = m_1stNullItemsBeginCount;
7456 
7457  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7458  {
7459  const VmaSuballocation& suballoc = suballocations1st[i];
7460  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7461 
7462  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7463  {
7464  return false;
7465  }
7466  if(suballoc.offset < offset)
7467  {
7468  return false;
7469  }
7470  if(i < m_1stNullItemsBeginCount && !currFree)
7471  {
7472  return false;
7473  }
7474 
7475  if(!currFree)
7476  {
7477  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7478  {
7479  return false;
7480  }
7481  if(suballoc.hAllocation->GetSize() != suballoc.size)
7482  {
7483  return false;
7484  }
7485  sumUsedSize += suballoc.size;
7486  }
7487  else
7488  {
7489  ++nullItem1stCount;
7490  }
7491 
7492  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7493  }
7494  if(nullItem1stCount != m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount)
7495  {
7496  return false;
7497  }
7498 
7499  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7500  {
7501  const size_t suballoc2ndCount = suballocations2nd.size();
7502  size_t nullItem2ndCount = 0;
7503  for(size_t i = suballoc2ndCount; i--; )
7504  {
7505  const VmaSuballocation& suballoc = suballocations2nd[i];
7506  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7507 
7508  if(currFree != (suballoc.hAllocation == VK_NULL_HANDLE))
7509  {
7510  return false;
7511  }
7512  if(suballoc.offset < offset)
7513  {
7514  return false;
7515  }
7516 
7517  if(!currFree)
7518  {
7519  if(suballoc.hAllocation->GetOffset() != suballoc.offset)
7520  {
7521  return false;
7522  }
7523  if(suballoc.hAllocation->GetSize() != suballoc.size)
7524  {
7525  return false;
7526  }
7527  sumUsedSize += suballoc.size;
7528  }
7529  else
7530  {
7531  ++nullItem2ndCount;
7532  }
7533 
7534  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
7535  }
7536 
7537  if(nullItem2ndCount != m_2ndNullItemsCount)
7538  {
7539  return false;
7540  }
7541  }
7542 
7543  if(offset > GetSize())
7544  {
7545  return false;
7546  }
7547  if(m_SumFreeSize != GetSize() - sumUsedSize)
7548  {
7549  return false;
7550  }
7551 
7552  return true;
7553 }
7554 
7555 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7556 {
7557  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
7558  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7559 }
7560 
7561 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
7562 {
7563  const VkDeviceSize size = GetSize();
7564 
7565  /*
7566  We don't consider gaps inside allocation vectors with freed allocations because
7567  they are not suitable for reuse in linear allocator. We consider only space that
7568  is available for new allocations.
7569  */
7570  if(IsEmpty())
7571  {
7572  return size;
7573  }
7574 
7575  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7576 
7577  switch(m_2ndVectorMode)
7578  {
7579  case SECOND_VECTOR_EMPTY:
7580  /*
7581  Available space is after end of 1st, as well as before beginning of 1st (which
7582  whould make it a ring buffer).
7583  */
7584  {
7585  const size_t suballocations1stCount = suballocations1st.size();
7586  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
7587  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
7588  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
7589  return VMA_MAX(
7590  firstSuballoc.offset,
7591  size - (lastSuballoc.offset + lastSuballoc.size));
7592  }
7593  break;
7594 
7595  case SECOND_VECTOR_RING_BUFFER:
7596  /*
7597  Available space is only between end of 2nd and beginning of 1st.
7598  */
7599  {
7600  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7601  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
7602  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
7603  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
7604  }
7605  break;
7606 
7607  case SECOND_VECTOR_DOUBLE_STACK:
7608  /*
7609  Available space is only between end of 1st and top of 2nd.
7610  */
7611  {
7612  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7613  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
7614  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
7615  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
7616  }
7617  break;
7618 
7619  default:
7620  VMA_ASSERT(0);
7621  return 0;
7622  }
7623 }
7624 
7625 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7626 {
7627  const VkDeviceSize size = GetSize();
7628  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7629  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7630  const size_t suballoc1stCount = suballocations1st.size();
7631  const size_t suballoc2ndCount = suballocations2nd.size();
7632 
7633  outInfo.blockCount = 1;
7634  outInfo.allocationCount = (uint32_t)GetAllocationCount();
7635  outInfo.unusedRangeCount = 0;
7636  outInfo.usedBytes = 0;
7637  outInfo.allocationSizeMin = UINT64_MAX;
7638  outInfo.allocationSizeMax = 0;
7639  outInfo.unusedRangeSizeMin = UINT64_MAX;
7640  outInfo.unusedRangeSizeMax = 0;
7641 
7642  VkDeviceSize lastOffset = 0;
7643 
7644  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7645  {
7646  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7647  size_t nextAlloc2ndIndex = 0;
7648  while(lastOffset < freeSpace2ndTo1stEnd)
7649  {
7650  // Find next non-null allocation or move nextAllocIndex to the end.
7651  while(nextAlloc2ndIndex < suballoc2ndCount &&
7652  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7653  {
7654  ++nextAlloc2ndIndex;
7655  }
7656 
7657  // Found non-null allocation.
7658  if(nextAlloc2ndIndex < suballoc2ndCount)
7659  {
7660  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7661 
7662  // 1. Process free space before this allocation.
7663  if(lastOffset < suballoc.offset)
7664  {
7665  // There is free space from lastOffset to suballoc.offset.
7666  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7667  ++outInfo.unusedRangeCount;
7668  outInfo.unusedBytes += unusedRangeSize;
7669  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7670  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7671  }
7672 
7673  // 2. Process this allocation.
7674  // There is allocation with suballoc.offset, suballoc.size.
7675  outInfo.usedBytes += suballoc.size;
7676  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7677  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7678 
7679  // 3. Prepare for next iteration.
7680  lastOffset = suballoc.offset + suballoc.size;
7681  ++nextAlloc2ndIndex;
7682  }
7683  // We are at the end.
7684  else
7685  {
7686  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7687  if(lastOffset < freeSpace2ndTo1stEnd)
7688  {
7689  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7690  ++outInfo.unusedRangeCount;
7691  outInfo.unusedBytes += unusedRangeSize;
7692  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7693  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7694  }
7695 
7696  // End of loop.
7697  lastOffset = freeSpace2ndTo1stEnd;
7698  }
7699  }
7700  }
7701 
7702  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7703  const VkDeviceSize freeSpace1stTo2ndEnd =
7704  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7705  while(lastOffset < freeSpace1stTo2ndEnd)
7706  {
7707  // Find next non-null allocation or move nextAllocIndex to the end.
7708  while(nextAlloc1stIndex < suballoc1stCount &&
7709  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7710  {
7711  ++nextAlloc1stIndex;
7712  }
7713 
7714  // Found non-null allocation.
7715  if(nextAlloc1stIndex < suballoc1stCount)
7716  {
7717  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7718 
7719  // 1. Process free space before this allocation.
7720  if(lastOffset < suballoc.offset)
7721  {
7722  // There is free space from lastOffset to suballoc.offset.
7723  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7724  ++outInfo.unusedRangeCount;
7725  outInfo.unusedBytes += unusedRangeSize;
7726  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7727  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7728  }
7729 
7730  // 2. Process this allocation.
7731  // There is allocation with suballoc.offset, suballoc.size.
7732  outInfo.usedBytes += suballoc.size;
7733  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7734  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7735 
7736  // 3. Prepare for next iteration.
7737  lastOffset = suballoc.offset + suballoc.size;
7738  ++nextAlloc1stIndex;
7739  }
7740  // We are at the end.
7741  else
7742  {
7743  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7744  if(lastOffset < freeSpace1stTo2ndEnd)
7745  {
7746  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7747  ++outInfo.unusedRangeCount;
7748  outInfo.unusedBytes += unusedRangeSize;
7749  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7750  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7751  }
7752 
7753  // End of loop.
7754  lastOffset = freeSpace1stTo2ndEnd;
7755  }
7756  }
7757 
7758  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7759  {
7760  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7761  while(lastOffset < size)
7762  {
7763  // Find next non-null allocation or move nextAllocIndex to the end.
7764  while(nextAlloc2ndIndex != SIZE_MAX &&
7765  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7766  {
7767  --nextAlloc2ndIndex;
7768  }
7769 
7770  // Found non-null allocation.
7771  if(nextAlloc2ndIndex != SIZE_MAX)
7772  {
7773  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7774 
7775  // 1. Process free space before this allocation.
7776  if(lastOffset < suballoc.offset)
7777  {
7778  // There is free space from lastOffset to suballoc.offset.
7779  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7780  ++outInfo.unusedRangeCount;
7781  outInfo.unusedBytes += unusedRangeSize;
7782  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7783  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7784  }
7785 
7786  // 2. Process this allocation.
7787  // There is allocation with suballoc.offset, suballoc.size.
7788  outInfo.usedBytes += suballoc.size;
7789  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7790  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
7791 
7792  // 3. Prepare for next iteration.
7793  lastOffset = suballoc.offset + suballoc.size;
7794  --nextAlloc2ndIndex;
7795  }
7796  // We are at the end.
7797  else
7798  {
7799  // There is free space from lastOffset to size.
7800  if(lastOffset < size)
7801  {
7802  const VkDeviceSize unusedRangeSize = size - lastOffset;
7803  ++outInfo.unusedRangeCount;
7804  outInfo.unusedBytes += unusedRangeSize;
7805  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
7806  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
7807  }
7808 
7809  // End of loop.
7810  lastOffset = size;
7811  }
7812  }
7813  }
7814 
7815  outInfo.unusedBytes = size - outInfo.usedBytes;
7816 }
7817 
7818 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
7819 {
7820  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7821  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7822  const VkDeviceSize size = GetSize();
7823  const size_t suballoc1stCount = suballocations1st.size();
7824  const size_t suballoc2ndCount = suballocations2nd.size();
7825 
7826  inoutStats.size += size;
7827 
7828  VkDeviceSize lastOffset = 0;
7829 
7830  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7831  {
7832  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7833  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
7834  while(lastOffset < freeSpace2ndTo1stEnd)
7835  {
7836  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7837  while(nextAlloc2ndIndex < suballoc2ndCount &&
7838  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7839  {
7840  ++nextAlloc2ndIndex;
7841  }
7842 
7843  // Found non-null allocation.
7844  if(nextAlloc2ndIndex < suballoc2ndCount)
7845  {
7846  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7847 
7848  // 1. Process free space before this allocation.
7849  if(lastOffset < suballoc.offset)
7850  {
7851  // There is free space from lastOffset to suballoc.offset.
7852  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7853  inoutStats.unusedSize += unusedRangeSize;
7854  ++inoutStats.unusedRangeCount;
7855  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7856  }
7857 
7858  // 2. Process this allocation.
7859  // There is allocation with suballoc.offset, suballoc.size.
7860  ++inoutStats.allocationCount;
7861 
7862  // 3. Prepare for next iteration.
7863  lastOffset = suballoc.offset + suballoc.size;
7864  ++nextAlloc2ndIndex;
7865  }
7866  // We are at the end.
7867  else
7868  {
7869  if(lastOffset < freeSpace2ndTo1stEnd)
7870  {
7871  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7872  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7873  inoutStats.unusedSize += unusedRangeSize;
7874  ++inoutStats.unusedRangeCount;
7875  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7876  }
7877 
7878  // End of loop.
7879  lastOffset = freeSpace2ndTo1stEnd;
7880  }
7881  }
7882  }
7883 
7884  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7885  const VkDeviceSize freeSpace1stTo2ndEnd =
7886  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7887  while(lastOffset < freeSpace1stTo2ndEnd)
7888  {
7889  // Find next non-null allocation or move nextAllocIndex to the end.
7890  while(nextAlloc1stIndex < suballoc1stCount &&
7891  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
7892  {
7893  ++nextAlloc1stIndex;
7894  }
7895 
7896  // Found non-null allocation.
7897  if(nextAlloc1stIndex < suballoc1stCount)
7898  {
7899  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7900 
7901  // 1. Process free space before this allocation.
7902  if(lastOffset < suballoc.offset)
7903  {
7904  // There is free space from lastOffset to suballoc.offset.
7905  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7906  inoutStats.unusedSize += unusedRangeSize;
7907  ++inoutStats.unusedRangeCount;
7908  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7909  }
7910 
7911  // 2. Process this allocation.
7912  // There is allocation with suballoc.offset, suballoc.size.
7913  ++inoutStats.allocationCount;
7914 
7915  // 3. Prepare for next iteration.
7916  lastOffset = suballoc.offset + suballoc.size;
7917  ++nextAlloc1stIndex;
7918  }
7919  // We are at the end.
7920  else
7921  {
7922  if(lastOffset < freeSpace1stTo2ndEnd)
7923  {
7924  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7925  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7926  inoutStats.unusedSize += unusedRangeSize;
7927  ++inoutStats.unusedRangeCount;
7928  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7929  }
7930 
7931  // End of loop.
7932  lastOffset = freeSpace1stTo2ndEnd;
7933  }
7934  }
7935 
7936  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7937  {
7938  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7939  while(lastOffset < size)
7940  {
7941  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
7942  while(nextAlloc2ndIndex != SIZE_MAX &&
7943  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
7944  {
7945  --nextAlloc2ndIndex;
7946  }
7947 
7948  // Found non-null allocation.
7949  if(nextAlloc2ndIndex != SIZE_MAX)
7950  {
7951  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7952 
7953  // 1. Process free space before this allocation.
7954  if(lastOffset < suballoc.offset)
7955  {
7956  // There is free space from lastOffset to suballoc.offset.
7957  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7958  inoutStats.unusedSize += unusedRangeSize;
7959  ++inoutStats.unusedRangeCount;
7960  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7961  }
7962 
7963  // 2. Process this allocation.
7964  // There is allocation with suballoc.offset, suballoc.size.
7965  ++inoutStats.allocationCount;
7966 
7967  // 3. Prepare for next iteration.
7968  lastOffset = suballoc.offset + suballoc.size;
7969  --nextAlloc2ndIndex;
7970  }
7971  // We are at the end.
7972  else
7973  {
7974  if(lastOffset < size)
7975  {
7976  // There is free space from lastOffset to size.
7977  const VkDeviceSize unusedRangeSize = size - lastOffset;
7978  inoutStats.unusedSize += unusedRangeSize;
7979  ++inoutStats.unusedRangeCount;
7980  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
7981  }
7982 
7983  // End of loop.
7984  lastOffset = size;
7985  }
7986  }
7987  }
7988 }
7989 
7990 #if VMA_STATS_STRING_ENABLED
7991 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
7992 {
7993  const VkDeviceSize size = GetSize();
7994  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7995  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7996  const size_t suballoc1stCount = suballocations1st.size();
7997  const size_t suballoc2ndCount = suballocations2nd.size();
7998 
7999  // FIRST PASS
8000 
8001  size_t unusedRangeCount = 0;
8002  VkDeviceSize usedBytes = 0;
8003 
8004  VkDeviceSize lastOffset = 0;
8005 
8006  size_t alloc2ndCount = 0;
8007  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8008  {
8009  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8010  size_t nextAlloc2ndIndex = 0;
8011  while(lastOffset < freeSpace2ndTo1stEnd)
8012  {
8013  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8014  while(nextAlloc2ndIndex < suballoc2ndCount &&
8015  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8016  {
8017  ++nextAlloc2ndIndex;
8018  }
8019 
8020  // Found non-null allocation.
8021  if(nextAlloc2ndIndex < suballoc2ndCount)
8022  {
8023  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8024 
8025  // 1. Process free space before this allocation.
8026  if(lastOffset < suballoc.offset)
8027  {
8028  // There is free space from lastOffset to suballoc.offset.
8029  ++unusedRangeCount;
8030  }
8031 
8032  // 2. Process this allocation.
8033  // There is allocation with suballoc.offset, suballoc.size.
8034  ++alloc2ndCount;
8035  usedBytes += suballoc.size;
8036 
8037  // 3. Prepare for next iteration.
8038  lastOffset = suballoc.offset + suballoc.size;
8039  ++nextAlloc2ndIndex;
8040  }
8041  // We are at the end.
8042  else
8043  {
8044  if(lastOffset < freeSpace2ndTo1stEnd)
8045  {
8046  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8047  ++unusedRangeCount;
8048  }
8049 
8050  // End of loop.
8051  lastOffset = freeSpace2ndTo1stEnd;
8052  }
8053  }
8054  }
8055 
8056  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8057  size_t alloc1stCount = 0;
8058  const VkDeviceSize freeSpace1stTo2ndEnd =
8059  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8060  while(lastOffset < freeSpace1stTo2ndEnd)
8061  {
8062  // Find next non-null allocation or move nextAllocIndex to the end.
8063  while(nextAlloc1stIndex < suballoc1stCount &&
8064  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8065  {
8066  ++nextAlloc1stIndex;
8067  }
8068 
8069  // Found non-null allocation.
8070  if(nextAlloc1stIndex < suballoc1stCount)
8071  {
8072  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8073 
8074  // 1. Process free space before this allocation.
8075  if(lastOffset < suballoc.offset)
8076  {
8077  // There is free space from lastOffset to suballoc.offset.
8078  ++unusedRangeCount;
8079  }
8080 
8081  // 2. Process this allocation.
8082  // There is allocation with suballoc.offset, suballoc.size.
8083  ++alloc1stCount;
8084  usedBytes += suballoc.size;
8085 
8086  // 3. Prepare for next iteration.
8087  lastOffset = suballoc.offset + suballoc.size;
8088  ++nextAlloc1stIndex;
8089  }
8090  // We are at the end.
8091  else
8092  {
8093  if(lastOffset < size)
8094  {
8095  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8096  ++unusedRangeCount;
8097  }
8098 
8099  // End of loop.
8100  lastOffset = freeSpace1stTo2ndEnd;
8101  }
8102  }
8103 
8104  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8105  {
8106  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8107  while(lastOffset < size)
8108  {
8109  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8110  while(nextAlloc2ndIndex != SIZE_MAX &&
8111  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8112  {
8113  --nextAlloc2ndIndex;
8114  }
8115 
8116  // Found non-null allocation.
8117  if(nextAlloc2ndIndex != SIZE_MAX)
8118  {
8119  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8120 
8121  // 1. Process free space before this allocation.
8122  if(lastOffset < suballoc.offset)
8123  {
8124  // There is free space from lastOffset to suballoc.offset.
8125  ++unusedRangeCount;
8126  }
8127 
8128  // 2. Process this allocation.
8129  // There is allocation with suballoc.offset, suballoc.size.
8130  ++alloc2ndCount;
8131  usedBytes += suballoc.size;
8132 
8133  // 3. Prepare for next iteration.
8134  lastOffset = suballoc.offset + suballoc.size;
8135  --nextAlloc2ndIndex;
8136  }
8137  // We are at the end.
8138  else
8139  {
8140  if(lastOffset < size)
8141  {
8142  // There is free space from lastOffset to size.
8143  ++unusedRangeCount;
8144  }
8145 
8146  // End of loop.
8147  lastOffset = size;
8148  }
8149  }
8150  }
8151 
8152  const VkDeviceSize unusedBytes = size - usedBytes;
8153  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8154 
8155  // SECOND PASS
8156  lastOffset = 0;
8157 
8158  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8159  {
8160  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8161  size_t nextAlloc2ndIndex = 0;
8162  while(lastOffset < freeSpace2ndTo1stEnd)
8163  {
8164  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8165  while(nextAlloc2ndIndex < suballoc2ndCount &&
8166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8167  {
8168  ++nextAlloc2ndIndex;
8169  }
8170 
8171  // Found non-null allocation.
8172  if(nextAlloc2ndIndex < suballoc2ndCount)
8173  {
8174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8175 
8176  // 1. Process free space before this allocation.
8177  if(lastOffset < suballoc.offset)
8178  {
8179  // There is free space from lastOffset to suballoc.offset.
8180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8181  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8182  }
8183 
8184  // 2. Process this allocation.
8185  // There is allocation with suballoc.offset, suballoc.size.
8186  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8187 
8188  // 3. Prepare for next iteration.
8189  lastOffset = suballoc.offset + suballoc.size;
8190  ++nextAlloc2ndIndex;
8191  }
8192  // We are at the end.
8193  else
8194  {
8195  if(lastOffset < freeSpace2ndTo1stEnd)
8196  {
8197  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8198  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8199  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8200  }
8201 
8202  // End of loop.
8203  lastOffset = freeSpace2ndTo1stEnd;
8204  }
8205  }
8206  }
8207 
8208  nextAlloc1stIndex = m_1stNullItemsBeginCount;
8209  while(lastOffset < freeSpace1stTo2ndEnd)
8210  {
8211  // Find next non-null allocation or move nextAllocIndex to the end.
8212  while(nextAlloc1stIndex < suballoc1stCount &&
8213  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8214  {
8215  ++nextAlloc1stIndex;
8216  }
8217 
8218  // Found non-null allocation.
8219  if(nextAlloc1stIndex < suballoc1stCount)
8220  {
8221  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8222 
8223  // 1. Process free space before this allocation.
8224  if(lastOffset < suballoc.offset)
8225  {
8226  // There is free space from lastOffset to suballoc.offset.
8227  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8228  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8229  }
8230 
8231  // 2. Process this allocation.
8232  // There is allocation with suballoc.offset, suballoc.size.
8233  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8234 
8235  // 3. Prepare for next iteration.
8236  lastOffset = suballoc.offset + suballoc.size;
8237  ++nextAlloc1stIndex;
8238  }
8239  // We are at the end.
8240  else
8241  {
8242  if(lastOffset < freeSpace1stTo2ndEnd)
8243  {
8244  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8245  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8246  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8247  }
8248 
8249  // End of loop.
8250  lastOffset = freeSpace1stTo2ndEnd;
8251  }
8252  }
8253 
8254  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8255  {
8256  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8257  while(lastOffset < size)
8258  {
8259  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8260  while(nextAlloc2ndIndex != SIZE_MAX &&
8261  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8262  {
8263  --nextAlloc2ndIndex;
8264  }
8265 
8266  // Found non-null allocation.
8267  if(nextAlloc2ndIndex != SIZE_MAX)
8268  {
8269  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8270 
8271  // 1. Process free space before this allocation.
8272  if(lastOffset < suballoc.offset)
8273  {
8274  // There is free space from lastOffset to suballoc.offset.
8275  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8276  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8277  }
8278 
8279  // 2. Process this allocation.
8280  // There is allocation with suballoc.offset, suballoc.size.
8281  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
8282 
8283  // 3. Prepare for next iteration.
8284  lastOffset = suballoc.offset + suballoc.size;
8285  --nextAlloc2ndIndex;
8286  }
8287  // We are at the end.
8288  else
8289  {
8290  if(lastOffset < size)
8291  {
8292  // There is free space from lastOffset to size.
8293  const VkDeviceSize unusedRangeSize = size - lastOffset;
8294  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8295  }
8296 
8297  // End of loop.
8298  lastOffset = size;
8299  }
8300  }
8301  }
8302 
8303  PrintDetailedMap_End(json);
8304 }
8305 #endif // #if VMA_STATS_STRING_ENABLED
8306 
8307 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8308  uint32_t currentFrameIndex,
8309  uint32_t frameInUseCount,
8310  VkDeviceSize bufferImageGranularity,
8311  VkDeviceSize allocSize,
8312  VkDeviceSize allocAlignment,
8313  bool upperAddress,
8314  VmaSuballocationType allocType,
8315  bool canMakeOtherLost,
8316  VmaAllocationRequest* pAllocationRequest)
8317 {
8318  VMA_ASSERT(allocSize > 0);
8319  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8320  VMA_ASSERT(pAllocationRequest != VMA_NULL);
8321  VMA_HEAVY_ASSERT(Validate());
8322 
8323  const VkDeviceSize size = GetSize();
8324  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8325  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8326 
8327  if(upperAddress)
8328  {
8329  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8330  {
8331  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
8332  return false;
8333  }
8334 
8335  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
8336  if(allocSize > size)
8337  {
8338  return false;
8339  }
8340  VkDeviceSize resultBaseOffset = size - allocSize;
8341  if(!suballocations2nd.empty())
8342  {
8343  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8344  resultBaseOffset = lastSuballoc.offset - allocSize;
8345  if(allocSize > lastSuballoc.offset)
8346  {
8347  return false;
8348  }
8349  }
8350 
8351  // Start from offset equal to end of free space.
8352  VkDeviceSize resultOffset = resultBaseOffset;
8353 
8354  // Apply VMA_DEBUG_MARGIN at the end.
8355  if(VMA_DEBUG_MARGIN > 0)
8356  {
8357  if(resultOffset < VMA_DEBUG_MARGIN)
8358  {
8359  return false;
8360  }
8361  resultOffset -= VMA_DEBUG_MARGIN;
8362  }
8363 
8364  // Apply alignment.
8365  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
8366 
8367  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
8368  // Make bigger alignment if necessary.
8369  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8370  {
8371  bool bufferImageGranularityConflict = false;
8372  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8373  {
8374  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8375  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8376  {
8377  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
8378  {
8379  bufferImageGranularityConflict = true;
8380  break;
8381  }
8382  }
8383  else
8384  // Already on previous page.
8385  break;
8386  }
8387  if(bufferImageGranularityConflict)
8388  {
8389  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
8390  }
8391  }
8392 
8393  // There is enough free space.
8394  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
8395  suballocations1st.back().offset + suballocations1st.back().size :
8396  0;
8397  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
8398  {
8399  // Check previous suballocations for BufferImageGranularity conflicts.
8400  // If conflict exists, allocation cannot be made here.
8401  if(bufferImageGranularity > 1)
8402  {
8403  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8404  {
8405  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8406  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8407  {
8408  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
8409  {
8410  return false;
8411  }
8412  }
8413  else
8414  {
8415  // Already on next page.
8416  break;
8417  }
8418  }
8419  }
8420 
8421  // All tests passed: Success.
8422  pAllocationRequest->offset = resultOffset;
8423  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
8424  pAllocationRequest->sumItemSize = 0;
8425  // pAllocationRequest->item unused.
8426  pAllocationRequest->itemsToMakeLostCount = 0;
8427  return true;
8428  }
8429  }
8430  else // !upperAddress
8431  {
8432  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8433  {
8434  // Try to allocate at the end of 1st vector.
8435 
8436  VkDeviceSize resultBaseOffset = 0;
8437  if(!suballocations1st.empty())
8438  {
8439  const VmaSuballocation& lastSuballoc = suballocations1st.back();
8440  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8441  }
8442 
8443  // Start from offset equal to beginning of free space.
8444  VkDeviceSize resultOffset = resultBaseOffset;
8445 
8446  // Apply VMA_DEBUG_MARGIN at the beginning.
8447  if(VMA_DEBUG_MARGIN > 0)
8448  {
8449  resultOffset += VMA_DEBUG_MARGIN;
8450  }
8451 
8452  // Apply alignment.
8453  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8454 
8455  // Check previous suballocations for BufferImageGranularity conflicts.
8456  // Make bigger alignment if necessary.
8457  if(bufferImageGranularity > 1 && !suballocations1st.empty())
8458  {
8459  bool bufferImageGranularityConflict = false;
8460  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8461  {
8462  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8463  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8464  {
8465  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8466  {
8467  bufferImageGranularityConflict = true;
8468  break;
8469  }
8470  }
8471  else
8472  // Already on previous page.
8473  break;
8474  }
8475  if(bufferImageGranularityConflict)
8476  {
8477  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8478  }
8479  }
8480 
8481  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8482  suballocations2nd.back().offset : size;
8483 
8484  // There is enough free space at the end after alignment.
8485  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
8486  {
8487  // Check next suballocations for BufferImageGranularity conflicts.
8488  // If conflict exists, allocation cannot be made here.
8489  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8490  {
8491  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8492  {
8493  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8494  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8495  {
8496  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8497  {
8498  return false;
8499  }
8500  }
8501  else
8502  {
8503  // Already on previous page.
8504  break;
8505  }
8506  }
8507  }
8508 
8509  // All tests passed: Success.
8510  pAllocationRequest->offset = resultOffset;
8511  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
8512  pAllocationRequest->sumItemSize = 0;
8513  // pAllocationRequest->item unused.
8514  pAllocationRequest->itemsToMakeLostCount = 0;
8515  return true;
8516  }
8517  }
8518 
8519  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
8520  // beginning of 1st vector as the end of free space.
8521  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8522  {
8523  VMA_ASSERT(!suballocations1st.empty());
8524 
8525  VkDeviceSize resultBaseOffset = 0;
8526  if(!suballocations2nd.empty())
8527  {
8528  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
8529  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
8530  }
8531 
8532  // Start from offset equal to beginning of free space.
8533  VkDeviceSize resultOffset = resultBaseOffset;
8534 
8535  // Apply VMA_DEBUG_MARGIN at the beginning.
8536  if(VMA_DEBUG_MARGIN > 0)
8537  {
8538  resultOffset += VMA_DEBUG_MARGIN;
8539  }
8540 
8541  // Apply alignment.
8542  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8543 
8544  // Check previous suballocations for BufferImageGranularity conflicts.
8545  // Make bigger alignment if necessary.
8546  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
8547  {
8548  bool bufferImageGranularityConflict = false;
8549  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
8550  {
8551  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
8552  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8553  {
8554  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8555  {
8556  bufferImageGranularityConflict = true;
8557  break;
8558  }
8559  }
8560  else
8561  // Already on previous page.
8562  break;
8563  }
8564  if(bufferImageGranularityConflict)
8565  {
8566  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8567  }
8568  }
8569 
8570  pAllocationRequest->itemsToMakeLostCount = 0;
8571  pAllocationRequest->sumItemSize = 0;
8572  size_t index1st = m_1stNullItemsBeginCount;
8573 
8574  if(canMakeOtherLost)
8575  {
8576  while(index1st < suballocations1st.size() &&
8577  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
8578  {
8579  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
8580  const VmaSuballocation& suballoc = suballocations1st[index1st];
8581  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
8582  {
8583  // No problem.
8584  }
8585  else
8586  {
8587  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8588  if(suballoc.hAllocation->CanBecomeLost() &&
8589  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8590  {
8591  ++pAllocationRequest->itemsToMakeLostCount;
8592  pAllocationRequest->sumItemSize += suballoc.size;
8593  }
8594  else
8595  {
8596  return false;
8597  }
8598  }
8599  ++index1st;
8600  }
8601 
8602  // Check next suballocations for BufferImageGranularity conflicts.
8603  // If conflict exists, we must mark more allocations lost or fail.
8604  if(bufferImageGranularity > 1)
8605  {
8606  while(index1st < suballocations1st.size())
8607  {
8608  const VmaSuballocation& suballoc = suballocations1st[index1st];
8609  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
8610  {
8611  if(suballoc.hAllocation != VK_NULL_HANDLE)
8612  {
8613  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
8614  if(suballoc.hAllocation->CanBecomeLost() &&
8615  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8616  {
8617  ++pAllocationRequest->itemsToMakeLostCount;
8618  pAllocationRequest->sumItemSize += suballoc.size;
8619  }
8620  else
8621  {
8622  return false;
8623  }
8624  }
8625  }
8626  else
8627  {
8628  // Already on next page.
8629  break;
8630  }
8631  ++index1st;
8632  }
8633  }
8634  }
8635 
8636  // There is enough free space at the end after alignment.
8637  if(index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size ||
8638  index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)
8639  {
8640  // Check next suballocations for BufferImageGranularity conflicts.
8641  // If conflict exists, allocation cannot be made here.
8642  if(bufferImageGranularity > 1)
8643  {
8644  for(size_t nextSuballocIndex = index1st;
8645  nextSuballocIndex < suballocations1st.size();
8646  nextSuballocIndex++)
8647  {
8648  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
8649  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8650  {
8651  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8652  {
8653  return false;
8654  }
8655  }
8656  else
8657  {
8658  // Already on next page.
8659  break;
8660  }
8661  }
8662  }
8663 
8664  // All tests passed: Success.
8665  pAllocationRequest->offset = resultOffset;
8666  pAllocationRequest->sumFreeSize =
8667  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
8668  - resultBaseOffset
8669  - pAllocationRequest->sumItemSize;
8670  // pAllocationRequest->item unused.
8671  return true;
8672  }
8673  }
8674  }
8675 
8676  return false;
8677 }
8678 
8679 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
8680  uint32_t currentFrameIndex,
8681  uint32_t frameInUseCount,
8682  VmaAllocationRequest* pAllocationRequest)
8683 {
8684  if(pAllocationRequest->itemsToMakeLostCount == 0)
8685  {
8686  return true;
8687  }
8688 
8689  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
8690 
8691  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8692  size_t index1st = m_1stNullItemsBeginCount;
8693  size_t madeLostCount = 0;
8694  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
8695  {
8696  VMA_ASSERT(index1st < suballocations1st.size());
8697  VmaSuballocation& suballoc = suballocations1st[index1st];
8698  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8699  {
8700  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
8701  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
8702  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8703  {
8704  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8705  suballoc.hAllocation = VK_NULL_HANDLE;
8706  m_SumFreeSize += suballoc.size;
8707  ++m_1stNullItemsMiddleCount;
8708  ++madeLostCount;
8709  }
8710  else
8711  {
8712  return false;
8713  }
8714  }
8715  ++index1st;
8716  }
8717 
8718  CleanupAfterFree();
8719  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
8720 
8721  return true;
8722 }
8723 
8724 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8725 {
8726  uint32_t lostAllocationCount = 0;
8727 
8728  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8729  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8730  {
8731  VmaSuballocation& suballoc = suballocations1st[i];
8732  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8733  suballoc.hAllocation->CanBecomeLost() &&
8734  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8735  {
8736  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8737  suballoc.hAllocation = VK_NULL_HANDLE;
8738  ++m_1stNullItemsMiddleCount;
8739  m_SumFreeSize += suballoc.size;
8740  ++lostAllocationCount;
8741  }
8742  }
8743 
8744  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8745  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8746  {
8747  VmaSuballocation& suballoc = suballocations2nd[i];
8748  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
8749  suballoc.hAllocation->CanBecomeLost() &&
8750  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
8751  {
8752  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8753  suballoc.hAllocation = VK_NULL_HANDLE;
8754  ++m_2ndNullItemsCount;
8755  ++lostAllocationCount;
8756  }
8757  }
8758 
8759  if(lostAllocationCount)
8760  {
8761  CleanupAfterFree();
8762  }
8763 
8764  return lostAllocationCount;
8765 }
8766 
8767 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8768 {
8769  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8770  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8771  {
8772  const VmaSuballocation& suballoc = suballocations1st[i];
8773  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8774  {
8775  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8776  {
8777  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8778  return VK_ERROR_VALIDATION_FAILED_EXT;
8779  }
8780  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8781  {
8782  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8783  return VK_ERROR_VALIDATION_FAILED_EXT;
8784  }
8785  }
8786  }
8787 
8788  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8789  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8790  {
8791  const VmaSuballocation& suballoc = suballocations2nd[i];
8792  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8793  {
8794  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
8795  {
8796  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
8797  return VK_ERROR_VALIDATION_FAILED_EXT;
8798  }
8799  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8800  {
8801  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8802  return VK_ERROR_VALIDATION_FAILED_EXT;
8803  }
8804  }
8805  }
8806 
8807  return VK_SUCCESS;
8808 }
8809 
8810 void VmaBlockMetadata_Linear::Alloc(
8811  const VmaAllocationRequest& request,
8812  VmaSuballocationType type,
8813  VkDeviceSize allocSize,
8814  bool upperAddress,
8815  VmaAllocation hAllocation)
8816 {
8817  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
8818 
8819  if(upperAddress)
8820  {
8821  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
8822  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
8823  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8824  suballocations2nd.push_back(newSuballoc);
8825  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
8826  }
8827  else
8828  {
8829  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8830 
8831  // First allocation.
8832  if(suballocations1st.empty())
8833  {
8834  suballocations1st.push_back(newSuballoc);
8835  }
8836  else
8837  {
8838  // New allocation at the end of 1st vector.
8839  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
8840  {
8841  // Check if it fits before the end of the block.
8842  VMA_ASSERT(request.offset + allocSize <= GetSize());
8843  suballocations1st.push_back(newSuballoc);
8844  }
8845  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
8846  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
8847  {
8848  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8849 
8850  switch(m_2ndVectorMode)
8851  {
8852  case SECOND_VECTOR_EMPTY:
8853  // First allocation from second part ring buffer.
8854  VMA_ASSERT(suballocations2nd.empty());
8855  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
8856  break;
8857  case SECOND_VECTOR_RING_BUFFER:
8858  // 2-part ring buffer is already started.
8859  VMA_ASSERT(!suballocations2nd.empty());
8860  break;
8861  case SECOND_VECTOR_DOUBLE_STACK:
8862  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
8863  break;
8864  default:
8865  VMA_ASSERT(0);
8866  }
8867 
8868  suballocations2nd.push_back(newSuballoc);
8869  }
8870  else
8871  {
8872  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
8873  }
8874  }
8875  }
8876 
8877  m_SumFreeSize -= newSuballoc.size;
8878 }
8879 
8880 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
8881 {
8882  FreeAtOffset(allocation->GetOffset());
8883 }
8884 
8885 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
8886 {
8887  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8888  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8889 
8890  if(!suballocations1st.empty())
8891  {
8892  // First allocation: Mark it as next empty at the beginning.
8893  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8894  if(firstSuballoc.offset == offset)
8895  {
8896  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8897  firstSuballoc.hAllocation = VK_NULL_HANDLE;
8898  m_SumFreeSize += firstSuballoc.size;
8899  ++m_1stNullItemsBeginCount;
8900  CleanupAfterFree();
8901  return;
8902  }
8903  }
8904 
8905  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
8906  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
8907  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8908  {
8909  VmaSuballocation& lastSuballoc = suballocations2nd.back();
8910  if(lastSuballoc.offset == offset)
8911  {
8912  m_SumFreeSize += lastSuballoc.size;
8913  suballocations2nd.pop_back();
8914  CleanupAfterFree();
8915  return;
8916  }
8917  }
8918  // Last allocation in 1st vector.
8919  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
8920  {
8921  VmaSuballocation& lastSuballoc = suballocations1st.back();
8922  if(lastSuballoc.offset == offset)
8923  {
8924  m_SumFreeSize += lastSuballoc.size;
8925  suballocations1st.pop_back();
8926  CleanupAfterFree();
8927  return;
8928  }
8929  }
8930 
8931  // Item from the middle of 1st vector.
8932  {
8933  VmaSuballocation refSuballoc;
8934  refSuballoc.offset = offset;
8935  // Rest of members stays uninitialized intentionally for better performance.
8936  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
8937  suballocations1st.begin() + m_1stNullItemsBeginCount,
8938  suballocations1st.end(),
8939  refSuballoc);
8940  if(it != suballocations1st.end())
8941  {
8942  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8943  it->hAllocation = VK_NULL_HANDLE;
8944  ++m_1stNullItemsMiddleCount;
8945  m_SumFreeSize += it->size;
8946  CleanupAfterFree();
8947  return;
8948  }
8949  }
8950 
8951  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8952  {
8953  // Item from the middle of 2nd vector.
8954  VmaSuballocation refSuballoc;
8955  refSuballoc.offset = offset;
8956  // Rest of members stays uninitialized intentionally for better performance.
8957  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8958  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
8959  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
8960  if(it != suballocations2nd.end())
8961  {
8962  it->type = VMA_SUBALLOCATION_TYPE_FREE;
8963  it->hAllocation = VK_NULL_HANDLE;
8964  ++m_2ndNullItemsCount;
8965  m_SumFreeSize += it->size;
8966  CleanupAfterFree();
8967  return;
8968  }
8969  }
8970 
8971  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
8972 }
8973 
8974 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
8975 {
8976  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8977  const size_t suballocCount = AccessSuballocations1st().size();
8978  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
8979 }
8980 
8981 void VmaBlockMetadata_Linear::CleanupAfterFree()
8982 {
8983  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8984  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8985 
8986  if(IsEmpty())
8987  {
8988  suballocations1st.clear();
8989  suballocations2nd.clear();
8990  m_1stNullItemsBeginCount = 0;
8991  m_1stNullItemsMiddleCount = 0;
8992  m_2ndNullItemsCount = 0;
8993  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8994  }
8995  else
8996  {
8997  const size_t suballoc1stCount = suballocations1st.size();
8998  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8999  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
9000 
9001  // Find more null items at the beginning of 1st vector.
9002  while(m_1stNullItemsBeginCount < suballoc1stCount &&
9003  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9004  {
9005  ++m_1stNullItemsBeginCount;
9006  --m_1stNullItemsMiddleCount;
9007  }
9008 
9009  // Find more null items at the end of 1st vector.
9010  while(m_1stNullItemsMiddleCount > 0 &&
9011  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
9012  {
9013  --m_1stNullItemsMiddleCount;
9014  suballocations1st.pop_back();
9015  }
9016 
9017  // Find more null items at the end of 2nd vector.
9018  while(m_2ndNullItemsCount > 0 &&
9019  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
9020  {
9021  --m_2ndNullItemsCount;
9022  suballocations2nd.pop_back();
9023  }
9024 
9025  if(ShouldCompact1st())
9026  {
9027  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
9028  size_t srcIndex = m_1stNullItemsBeginCount;
9029  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
9030  {
9031  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
9032  {
9033  ++srcIndex;
9034  }
9035  if(dstIndex != srcIndex)
9036  {
9037  suballocations1st[dstIndex] = suballocations1st[srcIndex];
9038  }
9039  ++srcIndex;
9040  }
9041  suballocations1st.resize(nonNullItemCount);
9042  m_1stNullItemsBeginCount = 0;
9043  m_1stNullItemsMiddleCount = 0;
9044  }
9045 
9046  // 2nd vector became empty.
9047  if(suballocations2nd.empty())
9048  {
9049  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9050  }
9051 
9052  // 1st vector became empty.
9053  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
9054  {
9055  suballocations1st.clear();
9056  m_1stNullItemsBeginCount = 0;
9057 
9058  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9059  {
9060  // Swap 1st with 2nd. Now 2nd is empty.
9061  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
9062  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
9063  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
9064  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
9065  {
9066  ++m_1stNullItemsBeginCount;
9067  --m_1stNullItemsMiddleCount;
9068  }
9069  m_2ndNullItemsCount = 0;
9070  m_1stVectorIndex ^= 1;
9071  }
9072  }
9073  }
9074 
9075  VMA_HEAVY_ASSERT(Validate());
9076 }
9077 
9078 
9080 // class VmaDeviceMemoryBlock
9081 
9082 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
9083  m_pMetadata(VMA_NULL),
9084  m_MemoryTypeIndex(UINT32_MAX),
9085  m_Id(0),
9086  m_hMemory(VK_NULL_HANDLE),
9087  m_MapCount(0),
9088  m_pMappedData(VMA_NULL)
9089 {
9090 }
9091 
9092 void VmaDeviceMemoryBlock::Init(
9093  VmaAllocator hAllocator,
9094  uint32_t newMemoryTypeIndex,
9095  VkDeviceMemory newMemory,
9096  VkDeviceSize newSize,
9097  uint32_t id,
9098  bool linearAlgorithm)
9099 {
9100  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
9101 
9102  m_MemoryTypeIndex = newMemoryTypeIndex;
9103  m_Id = id;
9104  m_hMemory = newMemory;
9105 
9106  if(linearAlgorithm)
9107  {
9108  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
9109  }
9110  else
9111  {
9112  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
9113  }
9114  m_pMetadata->Init(newSize);
9115 }
9116 
9117 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
9118 {
9119  // This is the most important assert in the entire library.
9120  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
9121  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
9122 
9123  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
9124  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
9125  m_hMemory = VK_NULL_HANDLE;
9126 
9127  vma_delete(allocator, m_pMetadata);
9128  m_pMetadata = VMA_NULL;
9129 }
9130 
9131 bool VmaDeviceMemoryBlock::Validate() const
9132 {
9133  if((m_hMemory == VK_NULL_HANDLE) ||
9134  (m_pMetadata->GetSize() == 0))
9135  {
9136  return false;
9137  }
9138 
9139  return m_pMetadata->Validate();
9140 }
9141 
9142 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
9143 {
9144  void* pData = nullptr;
9145  VkResult res = Map(hAllocator, 1, &pData);
9146  if(res != VK_SUCCESS)
9147  {
9148  return res;
9149  }
9150 
9151  res = m_pMetadata->CheckCorruption(pData);
9152 
9153  Unmap(hAllocator, 1);
9154 
9155  return res;
9156 }
9157 
9158 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
9159 {
9160  if(count == 0)
9161  {
9162  return VK_SUCCESS;
9163  }
9164 
9165  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9166  if(m_MapCount != 0)
9167  {
9168  m_MapCount += count;
9169  VMA_ASSERT(m_pMappedData != VMA_NULL);
9170  if(ppData != VMA_NULL)
9171  {
9172  *ppData = m_pMappedData;
9173  }
9174  return VK_SUCCESS;
9175  }
9176  else
9177  {
9178  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9179  hAllocator->m_hDevice,
9180  m_hMemory,
9181  0, // offset
9182  VK_WHOLE_SIZE,
9183  0, // flags
9184  &m_pMappedData);
9185  if(result == VK_SUCCESS)
9186  {
9187  if(ppData != VMA_NULL)
9188  {
9189  *ppData = m_pMappedData;
9190  }
9191  m_MapCount = count;
9192  }
9193  return result;
9194  }
9195 }
9196 
9197 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
9198 {
9199  if(count == 0)
9200  {
9201  return;
9202  }
9203 
9204  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9205  if(m_MapCount >= count)
9206  {
9207  m_MapCount -= count;
9208  if(m_MapCount == 0)
9209  {
9210  m_pMappedData = VMA_NULL;
9211  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
9212  }
9213  }
9214  else
9215  {
9216  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
9217  }
9218 }
9219 
9220 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9221 {
9222  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9223  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9224 
9225  void* pData;
9226  VkResult res = Map(hAllocator, 1, &pData);
9227  if(res != VK_SUCCESS)
9228  {
9229  return res;
9230  }
9231 
9232  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
9233  VmaWriteMagicValue(pData, allocOffset + allocSize);
9234 
9235  Unmap(hAllocator, 1);
9236 
9237  return VK_SUCCESS;
9238 }
9239 
9240 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
9241 {
9242  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
9243  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
9244 
9245  void* pData;
9246  VkResult res = Map(hAllocator, 1, &pData);
9247  if(res != VK_SUCCESS)
9248  {
9249  return res;
9250  }
9251 
9252  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
9253  {
9254  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
9255  }
9256  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
9257  {
9258  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
9259  }
9260 
9261  Unmap(hAllocator, 1);
9262 
9263  return VK_SUCCESS;
9264 }
9265 
9266 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
9267  const VmaAllocator hAllocator,
9268  const VmaAllocation hAllocation,
9269  VkBuffer hBuffer)
9270 {
9271  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9272  hAllocation->GetBlock() == this);
9273  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9274  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9275  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
9276  hAllocator->m_hDevice,
9277  hBuffer,
9278  m_hMemory,
9279  hAllocation->GetOffset());
9280 }
9281 
9282 VkResult VmaDeviceMemoryBlock::BindImageMemory(
9283  const VmaAllocator hAllocator,
9284  const VmaAllocation hAllocation,
9285  VkImage hImage)
9286 {
9287  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
9288  hAllocation->GetBlock() == this);
9289  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
9290  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
9291  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
9292  hAllocator->m_hDevice,
9293  hImage,
9294  m_hMemory,
9295  hAllocation->GetOffset());
9296 }
9297 
9298 static void InitStatInfo(VmaStatInfo& outInfo)
9299 {
9300  memset(&outInfo, 0, sizeof(outInfo));
9301  outInfo.allocationSizeMin = UINT64_MAX;
9302  outInfo.unusedRangeSizeMin = UINT64_MAX;
9303 }
9304 
9305 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
9306 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
9307 {
9308  inoutInfo.blockCount += srcInfo.blockCount;
9309  inoutInfo.allocationCount += srcInfo.allocationCount;
9310  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
9311  inoutInfo.usedBytes += srcInfo.usedBytes;
9312  inoutInfo.unusedBytes += srcInfo.unusedBytes;
9313  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
9314  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
9315  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
9316  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
9317 }
9318 
9319 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
9320 {
9321  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
9322  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
9323  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
9324  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
9325 }
9326 
9327 VmaPool_T::VmaPool_T(
9328  VmaAllocator hAllocator,
9329  const VmaPoolCreateInfo& createInfo,
9330  VkDeviceSize preferredBlockSize) :
9331  m_BlockVector(
9332  hAllocator,
9333  createInfo.memoryTypeIndex,
9334  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
9335  createInfo.minBlockCount,
9336  createInfo.maxBlockCount,
9337  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
9338  createInfo.frameInUseCount,
9339  true, // isCustomPool
9340  createInfo.blockSize != 0, // explicitBlockSize
9341  (createInfo.flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0), // linearAlgorithm
9342  m_Id(0)
9343 {
9344 }
9345 
9346 VmaPool_T::~VmaPool_T()
9347 {
9348 }
9349 
9350 #if VMA_STATS_STRING_ENABLED
9351 
9352 #endif // #if VMA_STATS_STRING_ENABLED
9353 
9354 VmaBlockVector::VmaBlockVector(
9355  VmaAllocator hAllocator,
9356  uint32_t memoryTypeIndex,
9357  VkDeviceSize preferredBlockSize,
9358  size_t minBlockCount,
9359  size_t maxBlockCount,
9360  VkDeviceSize bufferImageGranularity,
9361  uint32_t frameInUseCount,
9362  bool isCustomPool,
9363  bool explicitBlockSize,
9364  bool linearAlgorithm) :
9365  m_hAllocator(hAllocator),
9366  m_MemoryTypeIndex(memoryTypeIndex),
9367  m_PreferredBlockSize(preferredBlockSize),
9368  m_MinBlockCount(minBlockCount),
9369  m_MaxBlockCount(maxBlockCount),
9370  m_BufferImageGranularity(bufferImageGranularity),
9371  m_FrameInUseCount(frameInUseCount),
9372  m_IsCustomPool(isCustomPool),
9373  m_ExplicitBlockSize(explicitBlockSize),
9374  m_LinearAlgorithm(linearAlgorithm),
9375  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
9376  m_HasEmptyBlock(false),
9377  m_pDefragmentator(VMA_NULL),
9378  m_NextBlockId(0)
9379 {
9380 }
9381 
9382 VmaBlockVector::~VmaBlockVector()
9383 {
9384  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
9385 
9386  for(size_t i = m_Blocks.size(); i--; )
9387  {
9388  m_Blocks[i]->Destroy(m_hAllocator);
9389  vma_delete(m_hAllocator, m_Blocks[i]);
9390  }
9391 }
9392 
9393 VkResult VmaBlockVector::CreateMinBlocks()
9394 {
9395  for(size_t i = 0; i < m_MinBlockCount; ++i)
9396  {
9397  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
9398  if(res != VK_SUCCESS)
9399  {
9400  return res;
9401  }
9402  }
9403  return VK_SUCCESS;
9404 }
9405 
9406 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
9407 {
9408  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9409 
9410  const size_t blockCount = m_Blocks.size();
9411 
9412  pStats->size = 0;
9413  pStats->unusedSize = 0;
9414  pStats->allocationCount = 0;
9415  pStats->unusedRangeCount = 0;
9416  pStats->unusedRangeSizeMax = 0;
9417  pStats->blockCount = blockCount;
9418 
9419  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
9420  {
9421  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
9422  VMA_ASSERT(pBlock);
9423  VMA_HEAVY_ASSERT(pBlock->Validate());
9424  pBlock->m_pMetadata->AddPoolStats(*pStats);
9425  }
9426 }
9427 
9428 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
9429 {
9430  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
9431  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
9432  (VMA_DEBUG_MARGIN > 0) &&
9433  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
9434 }
9435 
9436 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
9437 
9438 VkResult VmaBlockVector::Allocate(
9439  VmaPool hCurrentPool,
9440  uint32_t currentFrameIndex,
9441  VkDeviceSize size,
9442  VkDeviceSize alignment,
9443  const VmaAllocationCreateInfo& createInfo,
9444  VmaSuballocationType suballocType,
9445  VmaAllocation* pAllocation)
9446 {
9447  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
9448  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
9449  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
9450  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
9451  const bool canCreateNewBlock =
9452  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
9453  (m_Blocks.size() < m_MaxBlockCount);
9454 
9455  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
9456  // Which in turn is available only when maxBlockCount = 1.
9457  if(m_LinearAlgorithm && m_MaxBlockCount > 1)
9458  {
9459  canMakeOtherLost = false;
9460  }
9461 
9462  // Upper address can only be used with linear allocator and within single memory block.
9463  if(isUpperAddress &&
9464  (!m_LinearAlgorithm || m_MaxBlockCount > 1))
9465  {
9466  return VK_ERROR_FEATURE_NOT_PRESENT;
9467  }
9468 
9469  // Early reject: requested allocation size is larger that maximum block size for this block vector.
9470  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
9471  {
9472  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9473  }
9474 
9475  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9476 
9477  /*
9478  Under certain condition, this whole section can be skipped for optimization, so
9479  we move on directly to trying to allocate with canMakeOtherLost. That's the case
9480  e.g. for custom pools with linear algorithm.
9481  */
9482  if(!canMakeOtherLost || canCreateNewBlock)
9483  {
9484  // 1. Search existing allocations. Try to allocate without making other allocations lost.
9485  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
9487 
9488  if(m_LinearAlgorithm)
9489  {
9490  // Use only last block.
9491  if(!m_Blocks.empty())
9492  {
9493  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
9494  VMA_ASSERT(pCurrBlock);
9495  VkResult res = AllocateFromBlock(
9496  pCurrBlock,
9497  hCurrentPool,
9498  currentFrameIndex,
9499  size,
9500  alignment,
9501  allocFlagsCopy,
9502  createInfo.pUserData,
9503  suballocType,
9504  pAllocation);
9505  if(res == VK_SUCCESS)
9506  {
9507  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
9508  return VK_SUCCESS;
9509  }
9510  }
9511  }
9512  else
9513  {
9514  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9515  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9516  {
9517  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9518  VMA_ASSERT(pCurrBlock);
9519  VkResult res = AllocateFromBlock(
9520  pCurrBlock,
9521  hCurrentPool,
9522  currentFrameIndex,
9523  size,
9524  alignment,
9525  allocFlagsCopy,
9526  createInfo.pUserData,
9527  suballocType,
9528  pAllocation);
9529  if(res == VK_SUCCESS)
9530  {
9531  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
9532  return VK_SUCCESS;
9533  }
9534  }
9535  }
9536 
9537  // 2. Try to create new block.
9538  if(canCreateNewBlock)
9539  {
9540  // Calculate optimal size for new block.
9541  VkDeviceSize newBlockSize = m_PreferredBlockSize;
9542  uint32_t newBlockSizeShift = 0;
9543  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
9544 
9545  if(!m_ExplicitBlockSize)
9546  {
9547  // Allocate 1/8, 1/4, 1/2 as first blocks.
9548  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
9549  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
9550  {
9551  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9552  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
9553  {
9554  newBlockSize = smallerNewBlockSize;
9555  ++newBlockSizeShift;
9556  }
9557  else
9558  {
9559  break;
9560  }
9561  }
9562  }
9563 
9564  size_t newBlockIndex = 0;
9565  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
9566  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
9567  if(!m_ExplicitBlockSize)
9568  {
9569  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
9570  {
9571  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
9572  if(smallerNewBlockSize >= size)
9573  {
9574  newBlockSize = smallerNewBlockSize;
9575  ++newBlockSizeShift;
9576  res = CreateBlock(newBlockSize, &newBlockIndex);
9577  }
9578  else
9579  {
9580  break;
9581  }
9582  }
9583  }
9584 
9585  if(res == VK_SUCCESS)
9586  {
9587  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
9588  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
9589 
9590  res = AllocateFromBlock(
9591  pBlock,
9592  hCurrentPool,
9593  currentFrameIndex,
9594  size,
9595  alignment,
9596  allocFlagsCopy,
9597  createInfo.pUserData,
9598  suballocType,
9599  pAllocation);
9600  if(res == VK_SUCCESS)
9601  {
9602  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
9603  return VK_SUCCESS;
9604  }
9605  else
9606  {
9607  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
9608  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9609  }
9610  }
9611  }
9612  }
9613 
9614  // 3. Try to allocate from existing blocks with making other allocations lost.
9615  if(canMakeOtherLost)
9616  {
9617  uint32_t tryIndex = 0;
9618  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
9619  {
9620  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
9621  VmaAllocationRequest bestRequest = {};
9622  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
9623 
9624  // 1. Search existing allocations.
9625  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
9626  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
9627  {
9628  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
9629  VMA_ASSERT(pCurrBlock);
9630  VmaAllocationRequest currRequest = {};
9631  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
9632  currentFrameIndex,
9633  m_FrameInUseCount,
9634  m_BufferImageGranularity,
9635  size,
9636  alignment,
9637  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
9638  suballocType,
9639  canMakeOtherLost,
9640  &currRequest))
9641  {
9642  const VkDeviceSize currRequestCost = currRequest.CalcCost();
9643  if(pBestRequestBlock == VMA_NULL ||
9644  currRequestCost < bestRequestCost)
9645  {
9646  pBestRequestBlock = pCurrBlock;
9647  bestRequest = currRequest;
9648  bestRequestCost = currRequestCost;
9649 
9650  if(bestRequestCost == 0)
9651  {
9652  break;
9653  }
9654  }
9655  }
9656  }
9657 
9658  if(pBestRequestBlock != VMA_NULL)
9659  {
9660  if(mapped)
9661  {
9662  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
9663  if(res != VK_SUCCESS)
9664  {
9665  return res;
9666  }
9667  }
9668 
9669  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
9670  currentFrameIndex,
9671  m_FrameInUseCount,
9672  &bestRequest))
9673  {
9674  // We no longer have an empty Allocation.
9675  if(pBestRequestBlock->m_pMetadata->IsEmpty())
9676  {
9677  m_HasEmptyBlock = false;
9678  }
9679  // Allocate from this pBlock.
9680  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9681  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
9682  (*pAllocation)->InitBlockAllocation(
9683  hCurrentPool,
9684  pBestRequestBlock,
9685  bestRequest.offset,
9686  alignment,
9687  size,
9688  suballocType,
9689  mapped,
9690  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9691  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
9692  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
9693  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
9694  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9695  {
9696  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9697  }
9698  if(IsCorruptionDetectionEnabled())
9699  {
9700  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
9701  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9702  }
9703  return VK_SUCCESS;
9704  }
9705  // else: Some allocations must have been touched while we are here. Next try.
9706  }
9707  else
9708  {
9709  // Could not find place in any of the blocks - break outer loop.
9710  break;
9711  }
9712  }
9713  /* Maximum number of tries exceeded - a very unlike event when many other
9714  threads are simultaneously touching allocations making it impossible to make
9715  lost at the same time as we try to allocate. */
9716  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
9717  {
9718  return VK_ERROR_TOO_MANY_OBJECTS;
9719  }
9720  }
9721 
9722  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9723 }
9724 
9725 void VmaBlockVector::Free(
9726  VmaAllocation hAllocation)
9727 {
9728  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
9729 
9730  // Scope for lock.
9731  {
9732  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9733 
9734  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
9735 
9736  if(IsCorruptionDetectionEnabled())
9737  {
9738  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
9739  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
9740  }
9741 
9742  if(hAllocation->IsPersistentMap())
9743  {
9744  pBlock->Unmap(m_hAllocator, 1);
9745  }
9746 
9747  pBlock->m_pMetadata->Free(hAllocation);
9748  VMA_HEAVY_ASSERT(pBlock->Validate());
9749 
9750  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
9751 
9752  // pBlock became empty after this deallocation.
9753  if(pBlock->m_pMetadata->IsEmpty())
9754  {
9755  // Already has empty Allocation. We don't want to have two, so delete this one.
9756  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
9757  {
9758  pBlockToDelete = pBlock;
9759  Remove(pBlock);
9760  }
9761  // We now have first empty block.
9762  else
9763  {
9764  m_HasEmptyBlock = true;
9765  }
9766  }
9767  // pBlock didn't become empty, but we have another empty block - find and free that one.
9768  // (This is optional, heuristics.)
9769  else if(m_HasEmptyBlock)
9770  {
9771  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
9772  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
9773  {
9774  pBlockToDelete = pLastBlock;
9775  m_Blocks.pop_back();
9776  m_HasEmptyBlock = false;
9777  }
9778  }
9779 
9780  IncrementallySortBlocks();
9781  }
9782 
9783  // Destruction of a free Allocation. Deferred until this point, outside of mutex
9784  // lock, for performance reason.
9785  if(pBlockToDelete != VMA_NULL)
9786  {
9787  VMA_DEBUG_LOG(" Deleted empty allocation");
9788  pBlockToDelete->Destroy(m_hAllocator);
9789  vma_delete(m_hAllocator, pBlockToDelete);
9790  }
9791 }
9792 
9793 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
9794 {
9795  VkDeviceSize result = 0;
9796  for(size_t i = m_Blocks.size(); i--; )
9797  {
9798  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
9799  if(result >= m_PreferredBlockSize)
9800  {
9801  break;
9802  }
9803  }
9804  return result;
9805 }
9806 
9807 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
9808 {
9809  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
9810  {
9811  if(m_Blocks[blockIndex] == pBlock)
9812  {
9813  VmaVectorRemove(m_Blocks, blockIndex);
9814  return;
9815  }
9816  }
9817  VMA_ASSERT(0);
9818 }
9819 
9820 void VmaBlockVector::IncrementallySortBlocks()
9821 {
9822  if(!m_LinearAlgorithm)
9823  {
9824  // Bubble sort only until first swap.
9825  for(size_t i = 1; i < m_Blocks.size(); ++i)
9826  {
9827  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
9828  {
9829  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
9830  return;
9831  }
9832  }
9833  }
9834 }
9835 
9836 VkResult VmaBlockVector::AllocateFromBlock(
9837  VmaDeviceMemoryBlock* pBlock,
9838  VmaPool hCurrentPool,
9839  uint32_t currentFrameIndex,
9840  VkDeviceSize size,
9841  VkDeviceSize alignment,
9842  VmaAllocationCreateFlags allocFlags,
9843  void* pUserData,
9844  VmaSuballocationType suballocType,
9845  VmaAllocation* pAllocation)
9846 {
9847  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
9848  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
9849  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
9850  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
9851 
9852  VmaAllocationRequest currRequest = {};
9853  if(pBlock->m_pMetadata->CreateAllocationRequest(
9854  currentFrameIndex,
9855  m_FrameInUseCount,
9856  m_BufferImageGranularity,
9857  size,
9858  alignment,
9859  isUpperAddress,
9860  suballocType,
9861  false, // canMakeOtherLost
9862  &currRequest))
9863  {
9864  // Allocate from pCurrBlock.
9865  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
9866 
9867  if(mapped)
9868  {
9869  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
9870  if(res != VK_SUCCESS)
9871  {
9872  return res;
9873  }
9874  }
9875 
9876  // We no longer have an empty Allocation.
9877  if(pBlock->m_pMetadata->IsEmpty())
9878  {
9879  m_HasEmptyBlock = false;
9880  }
9881 
9882  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
9883  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
9884  (*pAllocation)->InitBlockAllocation(
9885  hCurrentPool,
9886  pBlock,
9887  currRequest.offset,
9888  alignment,
9889  size,
9890  suballocType,
9891  mapped,
9892  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
9893  VMA_HEAVY_ASSERT(pBlock->Validate());
9894  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
9895  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
9896  {
9897  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
9898  }
9899  if(IsCorruptionDetectionEnabled())
9900  {
9901  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
9902  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
9903  }
9904  return VK_SUCCESS;
9905  }
9906  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
9907 }
9908 
9909 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
9910 {
9911  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
9912  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
9913  allocInfo.allocationSize = blockSize;
9914  VkDeviceMemory mem = VK_NULL_HANDLE;
9915  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
9916  if(res < 0)
9917  {
9918  return res;
9919  }
9920 
9921  // New VkDeviceMemory successfully created.
9922 
9923  // Create new Allocation for it.
9924  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
9925  pBlock->Init(
9926  m_hAllocator,
9927  m_MemoryTypeIndex,
9928  mem,
9929  allocInfo.allocationSize,
9930  m_NextBlockId++,
9931  m_LinearAlgorithm);
9932 
9933  m_Blocks.push_back(pBlock);
9934  if(pNewBlockIndex != VMA_NULL)
9935  {
9936  *pNewBlockIndex = m_Blocks.size() - 1;
9937  }
9938 
9939  return VK_SUCCESS;
9940 }
9941 
9942 #if VMA_STATS_STRING_ENABLED
9943 
9944 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
9945 {
9946  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
9947 
9948  json.BeginObject();
9949 
9950  if(m_IsCustomPool)
9951  {
9952  json.WriteString("MemoryTypeIndex");
9953  json.WriteNumber(m_MemoryTypeIndex);
9954 
9955  json.WriteString("BlockSize");
9956  json.WriteNumber(m_PreferredBlockSize);
9957 
9958  json.WriteString("BlockCount");
9959  json.BeginObject(true);
9960  if(m_MinBlockCount > 0)
9961  {
9962  json.WriteString("Min");
9963  json.WriteNumber((uint64_t)m_MinBlockCount);
9964  }
9965  if(m_MaxBlockCount < SIZE_MAX)
9966  {
9967  json.WriteString("Max");
9968  json.WriteNumber((uint64_t)m_MaxBlockCount);
9969  }
9970  json.WriteString("Cur");
9971  json.WriteNumber((uint64_t)m_Blocks.size());
9972  json.EndObject();
9973 
9974  if(m_FrameInUseCount > 0)
9975  {
9976  json.WriteString("FrameInUseCount");
9977  json.WriteNumber(m_FrameInUseCount);
9978  }
9979 
9980  if(m_LinearAlgorithm)
9981  {
9982  json.WriteString("LinearAlgorithm");
9983  json.WriteBool(true);
9984  }
9985  }
9986  else
9987  {
9988  json.WriteString("PreferredBlockSize");
9989  json.WriteNumber(m_PreferredBlockSize);
9990  }
9991 
9992  json.WriteString("Blocks");
9993  json.BeginObject();
9994  for(size_t i = 0; i < m_Blocks.size(); ++i)
9995  {
9996  json.BeginString();
9997  json.ContinueString(m_Blocks[i]->GetId());
9998  json.EndString();
9999 
10000  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
10001  }
10002  json.EndObject();
10003 
10004  json.EndObject();
10005 }
10006 
10007 #endif // #if VMA_STATS_STRING_ENABLED
10008 
10009 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
10010  VmaAllocator hAllocator,
10011  uint32_t currentFrameIndex)
10012 {
10013  if(m_pDefragmentator == VMA_NULL)
10014  {
10015  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
10016  hAllocator,
10017  this,
10018  currentFrameIndex);
10019  }
10020 
10021  return m_pDefragmentator;
10022 }
10023 
10024 VkResult VmaBlockVector::Defragment(
10025  VmaDefragmentationStats* pDefragmentationStats,
10026  VkDeviceSize& maxBytesToMove,
10027  uint32_t& maxAllocationsToMove)
10028 {
10029  if(m_pDefragmentator == VMA_NULL)
10030  {
10031  return VK_SUCCESS;
10032  }
10033 
10034  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10035 
10036  // Defragment.
10037  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
10038 
10039  // Accumulate statistics.
10040  if(pDefragmentationStats != VMA_NULL)
10041  {
10042  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
10043  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
10044  pDefragmentationStats->bytesMoved += bytesMoved;
10045  pDefragmentationStats->allocationsMoved += allocationsMoved;
10046  VMA_ASSERT(bytesMoved <= maxBytesToMove);
10047  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
10048  maxBytesToMove -= bytesMoved;
10049  maxAllocationsToMove -= allocationsMoved;
10050  }
10051 
10052  // Free empty blocks.
10053  m_HasEmptyBlock = false;
10054  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
10055  {
10056  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
10057  if(pBlock->m_pMetadata->IsEmpty())
10058  {
10059  if(m_Blocks.size() > m_MinBlockCount)
10060  {
10061  if(pDefragmentationStats != VMA_NULL)
10062  {
10063  ++pDefragmentationStats->deviceMemoryBlocksFreed;
10064  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
10065  }
10066 
10067  VmaVectorRemove(m_Blocks, blockIndex);
10068  pBlock->Destroy(m_hAllocator);
10069  vma_delete(m_hAllocator, pBlock);
10070  }
10071  else
10072  {
10073  m_HasEmptyBlock = true;
10074  }
10075  }
10076  }
10077 
10078  return result;
10079 }
10080 
10081 void VmaBlockVector::DestroyDefragmentator()
10082 {
10083  if(m_pDefragmentator != VMA_NULL)
10084  {
10085  vma_delete(m_hAllocator, m_pDefragmentator);
10086  m_pDefragmentator = VMA_NULL;
10087  }
10088 }
10089 
10090 void VmaBlockVector::MakePoolAllocationsLost(
10091  uint32_t currentFrameIndex,
10092  size_t* pLostAllocationCount)
10093 {
10094  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10095  size_t lostAllocationCount = 0;
10096  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10097  {
10098  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10099  VMA_ASSERT(pBlock);
10100  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
10101  }
10102  if(pLostAllocationCount != VMA_NULL)
10103  {
10104  *pLostAllocationCount = lostAllocationCount;
10105  }
10106 }
10107 
10108 VkResult VmaBlockVector::CheckCorruption()
10109 {
10110  if(!IsCorruptionDetectionEnabled())
10111  {
10112  return VK_ERROR_FEATURE_NOT_PRESENT;
10113  }
10114 
10115  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10116  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10117  {
10118  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10119  VMA_ASSERT(pBlock);
10120  VkResult res = pBlock->CheckCorruption(m_hAllocator);
10121  if(res != VK_SUCCESS)
10122  {
10123  return res;
10124  }
10125  }
10126  return VK_SUCCESS;
10127 }
10128 
10129 void VmaBlockVector::AddStats(VmaStats* pStats)
10130 {
10131  const uint32_t memTypeIndex = m_MemoryTypeIndex;
10132  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
10133 
10134  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
10135 
10136  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
10137  {
10138  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
10139  VMA_ASSERT(pBlock);
10140  VMA_HEAVY_ASSERT(pBlock->Validate());
10141  VmaStatInfo allocationStatInfo;
10142  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
10143  VmaAddStatInfo(pStats->total, allocationStatInfo);
10144  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
10145  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
10146  }
10147 }
10148 
10150 // VmaDefragmentator members definition
10151 
10152 VmaDefragmentator::VmaDefragmentator(
10153  VmaAllocator hAllocator,
10154  VmaBlockVector* pBlockVector,
10155  uint32_t currentFrameIndex) :
10156  m_hAllocator(hAllocator),
10157  m_pBlockVector(pBlockVector),
10158  m_CurrentFrameIndex(currentFrameIndex),
10159  m_BytesMoved(0),
10160  m_AllocationsMoved(0),
10161  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
10162  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
10163 {
10164  VMA_ASSERT(!pBlockVector->UsesLinearAlgorithm());
10165 }
10166 
10167 VmaDefragmentator::~VmaDefragmentator()
10168 {
10169  for(size_t i = m_Blocks.size(); i--; )
10170  {
10171  vma_delete(m_hAllocator, m_Blocks[i]);
10172  }
10173 }
10174 
10175 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
10176 {
10177  AllocationInfo allocInfo;
10178  allocInfo.m_hAllocation = hAlloc;
10179  allocInfo.m_pChanged = pChanged;
10180  m_Allocations.push_back(allocInfo);
10181 }
10182 
10183 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
10184 {
10185  // It has already been mapped for defragmentation.
10186  if(m_pMappedDataForDefragmentation)
10187  {
10188  *ppMappedData = m_pMappedDataForDefragmentation;
10189  return VK_SUCCESS;
10190  }
10191 
10192  // It is originally mapped.
10193  if(m_pBlock->GetMappedData())
10194  {
10195  *ppMappedData = m_pBlock->GetMappedData();
10196  return VK_SUCCESS;
10197  }
10198 
10199  // Map on first usage.
10200  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
10201  *ppMappedData = m_pMappedDataForDefragmentation;
10202  return res;
10203 }
10204 
10205 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
10206 {
10207  if(m_pMappedDataForDefragmentation != VMA_NULL)
10208  {
10209  m_pBlock->Unmap(hAllocator, 1);
10210  }
10211 }
10212 
10213 VkResult VmaDefragmentator::DefragmentRound(
10214  VkDeviceSize maxBytesToMove,
10215  uint32_t maxAllocationsToMove)
10216 {
10217  if(m_Blocks.empty())
10218  {
10219  return VK_SUCCESS;
10220  }
10221 
10222  size_t srcBlockIndex = m_Blocks.size() - 1;
10223  size_t srcAllocIndex = SIZE_MAX;
10224  for(;;)
10225  {
10226  // 1. Find next allocation to move.
10227  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
10228  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
10229  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
10230  {
10231  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
10232  {
10233  // Finished: no more allocations to process.
10234  if(srcBlockIndex == 0)
10235  {
10236  return VK_SUCCESS;
10237  }
10238  else
10239  {
10240  --srcBlockIndex;
10241  srcAllocIndex = SIZE_MAX;
10242  }
10243  }
10244  else
10245  {
10246  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
10247  }
10248  }
10249 
10250  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
10251  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
10252 
10253  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
10254  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
10255  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
10256  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
10257 
10258  // 2. Try to find new place for this allocation in preceding or current block.
10259  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
10260  {
10261  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
10262  VmaAllocationRequest dstAllocRequest;
10263  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
10264  m_CurrentFrameIndex,
10265  m_pBlockVector->GetFrameInUseCount(),
10266  m_pBlockVector->GetBufferImageGranularity(),
10267  size,
10268  alignment,
10269  false, // upperAddress
10270  suballocType,
10271  false, // canMakeOtherLost
10272  &dstAllocRequest) &&
10273  MoveMakesSense(
10274  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
10275  {
10276  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
10277 
10278  // Reached limit on number of allocations or bytes to move.
10279  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
10280  (m_BytesMoved + size > maxBytesToMove))
10281  {
10282  return VK_INCOMPLETE;
10283  }
10284 
10285  void* pDstMappedData = VMA_NULL;
10286  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
10287  if(res != VK_SUCCESS)
10288  {
10289  return res;
10290  }
10291 
10292  void* pSrcMappedData = VMA_NULL;
10293  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
10294  if(res != VK_SUCCESS)
10295  {
10296  return res;
10297  }
10298 
10299  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
10300  memcpy(
10301  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
10302  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
10303  static_cast<size_t>(size));
10304 
10305  if(VMA_DEBUG_MARGIN > 0)
10306  {
10307  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
10308  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
10309  }
10310 
10311  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
10312  dstAllocRequest,
10313  suballocType,
10314  size,
10315  false, // upperAddress
10316  allocInfo.m_hAllocation);
10317  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
10318 
10319  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
10320 
10321  if(allocInfo.m_pChanged != VMA_NULL)
10322  {
10323  *allocInfo.m_pChanged = VK_TRUE;
10324  }
10325 
10326  ++m_AllocationsMoved;
10327  m_BytesMoved += size;
10328 
10329  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
10330 
10331  break;
10332  }
10333  }
10334 
10335  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
10336 
10337  if(srcAllocIndex > 0)
10338  {
10339  --srcAllocIndex;
10340  }
10341  else
10342  {
10343  if(srcBlockIndex > 0)
10344  {
10345  --srcBlockIndex;
10346  srcAllocIndex = SIZE_MAX;
10347  }
10348  else
10349  {
10350  return VK_SUCCESS;
10351  }
10352  }
10353  }
10354 }
10355 
10356 VkResult VmaDefragmentator::Defragment(
10357  VkDeviceSize maxBytesToMove,
10358  uint32_t maxAllocationsToMove)
10359 {
10360  if(m_Allocations.empty())
10361  {
10362  return VK_SUCCESS;
10363  }
10364 
10365  // Create block info for each block.
10366  const size_t blockCount = m_pBlockVector->m_Blocks.size();
10367  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10368  {
10369  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
10370  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
10371  m_Blocks.push_back(pBlockInfo);
10372  }
10373 
10374  // Sort them by m_pBlock pointer value.
10375  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
10376 
10377  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
10378  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
10379  {
10380  AllocationInfo& allocInfo = m_Allocations[blockIndex];
10381  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
10382  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
10383  {
10384  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
10385  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
10386  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
10387  {
10388  (*it)->m_Allocations.push_back(allocInfo);
10389  }
10390  else
10391  {
10392  VMA_ASSERT(0);
10393  }
10394  }
10395  }
10396  m_Allocations.clear();
10397 
10398  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10399  {
10400  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
10401  pBlockInfo->CalcHasNonMovableAllocations();
10402  pBlockInfo->SortAllocationsBySizeDescecnding();
10403  }
10404 
10405  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
10406  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
10407 
10408  // Execute defragmentation rounds (the main part).
10409  VkResult result = VK_SUCCESS;
10410  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
10411  {
10412  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
10413  }
10414 
10415  // Unmap blocks that were mapped for defragmentation.
10416  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
10417  {
10418  m_Blocks[blockIndex]->Unmap(m_hAllocator);
10419  }
10420 
10421  return result;
10422 }
10423 
10424 bool VmaDefragmentator::MoveMakesSense(
10425  size_t dstBlockIndex, VkDeviceSize dstOffset,
10426  size_t srcBlockIndex, VkDeviceSize srcOffset)
10427 {
10428  if(dstBlockIndex < srcBlockIndex)
10429  {
10430  return true;
10431  }
10432  if(dstBlockIndex > srcBlockIndex)
10433  {
10434  return false;
10435  }
10436  if(dstOffset < srcOffset)
10437  {
10438  return true;
10439  }
10440  return false;
10441 }
10442 
10444 // VmaRecorder
10445 
10446 #if VMA_RECORDING_ENABLED
10447 
10448 VmaRecorder::VmaRecorder() :
10449  m_UseMutex(true),
10450  m_Flags(0),
10451  m_File(VMA_NULL),
10452  m_Freq(INT64_MAX),
10453  m_StartCounter(INT64_MAX)
10454 {
10455 }
10456 
10457 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
10458 {
10459  m_UseMutex = useMutex;
10460  m_Flags = settings.flags;
10461 
10462  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
10463  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
10464 
10465  // Open file for writing.
10466  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
10467  if(err != 0)
10468  {
10469  return VK_ERROR_INITIALIZATION_FAILED;
10470  }
10471 
10472  // Write header.
10473  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
10474  fprintf(m_File, "%s\n", "1,3");
10475 
10476  return VK_SUCCESS;
10477 }
10478 
10479 VmaRecorder::~VmaRecorder()
10480 {
10481  if(m_File != VMA_NULL)
10482  {
10483  fclose(m_File);
10484  }
10485 }
10486 
10487 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
10488 {
10489  CallParams callParams;
10490  GetBasicParams(callParams);
10491 
10492  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10493  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
10494  Flush();
10495 }
10496 
10497 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
10498 {
10499  CallParams callParams;
10500  GetBasicParams(callParams);
10501 
10502  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10503  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
10504  Flush();
10505 }
10506 
10507 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
10508 {
10509  CallParams callParams;
10510  GetBasicParams(callParams);
10511 
10512  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10513  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
10514  createInfo.memoryTypeIndex,
10515  createInfo.flags,
10516  createInfo.blockSize,
10517  createInfo.minBlockCount,
10518  createInfo.maxBlockCount,
10519  createInfo.frameInUseCount,
10520  pool);
10521  Flush();
10522 }
10523 
10524 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
10525 {
10526  CallParams callParams;
10527  GetBasicParams(callParams);
10528 
10529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10530  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
10531  pool);
10532  Flush();
10533 }
10534 
10535 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
10536  const VkMemoryRequirements& vkMemReq,
10537  const VmaAllocationCreateInfo& createInfo,
10538  VmaAllocation allocation)
10539 {
10540  CallParams callParams;
10541  GetBasicParams(callParams);
10542 
10543  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10544  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10545  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10546  vkMemReq.size,
10547  vkMemReq.alignment,
10548  vkMemReq.memoryTypeBits,
10549  createInfo.flags,
10550  createInfo.usage,
10551  createInfo.requiredFlags,
10552  createInfo.preferredFlags,
10553  createInfo.memoryTypeBits,
10554  createInfo.pool,
10555  allocation,
10556  userDataStr.GetString());
10557  Flush();
10558 }
10559 
10560 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
10561  const VkMemoryRequirements& vkMemReq,
10562  bool requiresDedicatedAllocation,
10563  bool prefersDedicatedAllocation,
10564  const VmaAllocationCreateInfo& createInfo,
10565  VmaAllocation allocation)
10566 {
10567  CallParams callParams;
10568  GetBasicParams(callParams);
10569 
10570  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10571  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10572  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10573  vkMemReq.size,
10574  vkMemReq.alignment,
10575  vkMemReq.memoryTypeBits,
10576  requiresDedicatedAllocation ? 1 : 0,
10577  prefersDedicatedAllocation ? 1 : 0,
10578  createInfo.flags,
10579  createInfo.usage,
10580  createInfo.requiredFlags,
10581  createInfo.preferredFlags,
10582  createInfo.memoryTypeBits,
10583  createInfo.pool,
10584  allocation,
10585  userDataStr.GetString());
10586  Flush();
10587 }
10588 
10589 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
10590  const VkMemoryRequirements& vkMemReq,
10591  bool requiresDedicatedAllocation,
10592  bool prefersDedicatedAllocation,
10593  const VmaAllocationCreateInfo& createInfo,
10594  VmaAllocation allocation)
10595 {
10596  CallParams callParams;
10597  GetBasicParams(callParams);
10598 
10599  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10600  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
10601  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10602  vkMemReq.size,
10603  vkMemReq.alignment,
10604  vkMemReq.memoryTypeBits,
10605  requiresDedicatedAllocation ? 1 : 0,
10606  prefersDedicatedAllocation ? 1 : 0,
10607  createInfo.flags,
10608  createInfo.usage,
10609  createInfo.requiredFlags,
10610  createInfo.preferredFlags,
10611  createInfo.memoryTypeBits,
10612  createInfo.pool,
10613  allocation,
10614  userDataStr.GetString());
10615  Flush();
10616 }
10617 
10618 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
10619  VmaAllocation allocation)
10620 {
10621  CallParams callParams;
10622  GetBasicParams(callParams);
10623 
10624  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10625  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10626  allocation);
10627  Flush();
10628 }
10629 
10630 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
10631  VmaAllocation allocation,
10632  const void* pUserData)
10633 {
10634  CallParams callParams;
10635  GetBasicParams(callParams);
10636 
10637  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10638  UserDataString userDataStr(
10639  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
10640  pUserData);
10641  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10642  allocation,
10643  userDataStr.GetString());
10644  Flush();
10645 }
10646 
10647 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
10648  VmaAllocation allocation)
10649 {
10650  CallParams callParams;
10651  GetBasicParams(callParams);
10652 
10653  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10654  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10655  allocation);
10656  Flush();
10657 }
10658 
10659 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
10660  VmaAllocation allocation)
10661 {
10662  CallParams callParams;
10663  GetBasicParams(callParams);
10664 
10665  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10666  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10667  allocation);
10668  Flush();
10669 }
10670 
10671 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
10672  VmaAllocation allocation)
10673 {
10674  CallParams callParams;
10675  GetBasicParams(callParams);
10676 
10677  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10678  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
10679  allocation);
10680  Flush();
10681 }
10682 
10683 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
10684  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10685 {
10686  CallParams callParams;
10687  GetBasicParams(callParams);
10688 
10689  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10690  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10691  allocation,
10692  offset,
10693  size);
10694  Flush();
10695 }
10696 
10697 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
10698  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
10699 {
10700  CallParams callParams;
10701  GetBasicParams(callParams);
10702 
10703  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10704  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
10705  allocation,
10706  offset,
10707  size);
10708  Flush();
10709 }
10710 
10711 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
10712  const VkBufferCreateInfo& bufCreateInfo,
10713  const VmaAllocationCreateInfo& allocCreateInfo,
10714  VmaAllocation allocation)
10715 {
10716  CallParams callParams;
10717  GetBasicParams(callParams);
10718 
10719  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10720  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10721  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10722  bufCreateInfo.flags,
10723  bufCreateInfo.size,
10724  bufCreateInfo.usage,
10725  bufCreateInfo.sharingMode,
10726  allocCreateInfo.flags,
10727  allocCreateInfo.usage,
10728  allocCreateInfo.requiredFlags,
10729  allocCreateInfo.preferredFlags,
10730  allocCreateInfo.memoryTypeBits,
10731  allocCreateInfo.pool,
10732  allocation,
10733  userDataStr.GetString());
10734  Flush();
10735 }
10736 
10737 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
10738  const VkImageCreateInfo& imageCreateInfo,
10739  const VmaAllocationCreateInfo& allocCreateInfo,
10740  VmaAllocation allocation)
10741 {
10742  CallParams callParams;
10743  GetBasicParams(callParams);
10744 
10745  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10746  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
10747  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
10748  imageCreateInfo.flags,
10749  imageCreateInfo.imageType,
10750  imageCreateInfo.format,
10751  imageCreateInfo.extent.width,
10752  imageCreateInfo.extent.height,
10753  imageCreateInfo.extent.depth,
10754  imageCreateInfo.mipLevels,
10755  imageCreateInfo.arrayLayers,
10756  imageCreateInfo.samples,
10757  imageCreateInfo.tiling,
10758  imageCreateInfo.usage,
10759  imageCreateInfo.sharingMode,
10760  imageCreateInfo.initialLayout,
10761  allocCreateInfo.flags,
10762  allocCreateInfo.usage,
10763  allocCreateInfo.requiredFlags,
10764  allocCreateInfo.preferredFlags,
10765  allocCreateInfo.memoryTypeBits,
10766  allocCreateInfo.pool,
10767  allocation,
10768  userDataStr.GetString());
10769  Flush();
10770 }
10771 
10772 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
10773  VmaAllocation allocation)
10774 {
10775  CallParams callParams;
10776  GetBasicParams(callParams);
10777 
10778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10779  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
10780  allocation);
10781  Flush();
10782 }
10783 
10784 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
10785  VmaAllocation allocation)
10786 {
10787  CallParams callParams;
10788  GetBasicParams(callParams);
10789 
10790  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10791  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
10792  allocation);
10793  Flush();
10794 }
10795 
10796 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
10797  VmaAllocation allocation)
10798 {
10799  CallParams callParams;
10800  GetBasicParams(callParams);
10801 
10802  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10803  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
10804  allocation);
10805  Flush();
10806 }
10807 
10808 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
10809  VmaAllocation allocation)
10810 {
10811  CallParams callParams;
10812  GetBasicParams(callParams);
10813 
10814  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10815  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
10816  allocation);
10817  Flush();
10818 }
10819 
10820 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
10821  VmaPool pool)
10822 {
10823  CallParams callParams;
10824  GetBasicParams(callParams);
10825 
10826  VmaMutexLock lock(m_FileMutex, m_UseMutex);
10827  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
10828  pool);
10829  Flush();
10830 }
10831 
10832 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
10833 {
10834  if(pUserData != VMA_NULL)
10835  {
10836  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
10837  {
10838  m_Str = (const char*)pUserData;
10839  }
10840  else
10841  {
10842  sprintf_s(m_PtrStr, "%p", pUserData);
10843  m_Str = m_PtrStr;
10844  }
10845  }
10846  else
10847  {
10848  m_Str = "";
10849  }
10850 }
10851 
10852 void VmaRecorder::WriteConfiguration(
10853  const VkPhysicalDeviceProperties& devProps,
10854  const VkPhysicalDeviceMemoryProperties& memProps,
10855  bool dedicatedAllocationExtensionEnabled)
10856 {
10857  fprintf(m_File, "Config,Begin\n");
10858 
10859  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
10860  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
10861  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
10862  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
10863  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
10864  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
10865 
10866  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
10867  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
10868  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
10869 
10870  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
10871  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
10872  {
10873  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
10874  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
10875  }
10876  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
10877  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
10878  {
10879  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
10880  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
10881  }
10882 
10883  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
10884 
10885  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
10886  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
10887  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
10888  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
10889  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
10890  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
10891  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
10892  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
10893  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10894 
10895  fprintf(m_File, "Config,End\n");
10896 }
10897 
10898 void VmaRecorder::GetBasicParams(CallParams& outParams)
10899 {
10900  outParams.threadId = GetCurrentThreadId();
10901 
10902  LARGE_INTEGER counter;
10903  QueryPerformanceCounter(&counter);
10904  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
10905 }
10906 
10907 void VmaRecorder::Flush()
10908 {
10909  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
10910  {
10911  fflush(m_File);
10912  }
10913 }
10914 
10915 #endif // #if VMA_RECORDING_ENABLED
10916 
10918 // VmaAllocator_T
10919 
10920 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
10921  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
10922  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
10923  m_hDevice(pCreateInfo->device),
10924  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
10925  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
10926  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
10927  m_PreferredLargeHeapBlockSize(0),
10928  m_PhysicalDevice(pCreateInfo->physicalDevice),
10929  m_CurrentFrameIndex(0),
10930  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
10931  m_NextPoolId(0)
10933  ,m_pRecorder(VMA_NULL)
10934 #endif
10935 {
10936  if(VMA_DEBUG_DETECT_CORRUPTION)
10937  {
10938  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
10939  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
10940  }
10941 
10942  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
10943 
10944 #if !(VMA_DEDICATED_ALLOCATION)
10946  {
10947  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
10948  }
10949 #endif
10950 
10951  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
10952  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
10953  memset(&m_MemProps, 0, sizeof(m_MemProps));
10954 
10955  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
10956  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
10957 
10958  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
10959  {
10960  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
10961  }
10962 
10963  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
10964  {
10965  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
10966  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
10967  }
10968 
10969  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
10970 
10971  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
10972  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
10973 
10974  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
10975  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
10976 
10977  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
10978  {
10979  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
10980  {
10981  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
10982  if(limit != VK_WHOLE_SIZE)
10983  {
10984  m_HeapSizeLimit[heapIndex] = limit;
10985  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
10986  {
10987  m_MemProps.memoryHeaps[heapIndex].size = limit;
10988  }
10989  }
10990  }
10991  }
10992 
10993  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
10994  {
10995  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
10996 
10997  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
10998  this,
10999  memTypeIndex,
11000  preferredBlockSize,
11001  0,
11002  SIZE_MAX,
11003  GetBufferImageGranularity(),
11004  pCreateInfo->frameInUseCount,
11005  false, // isCustomPool
11006  false, // explicitBlockSize
11007  false); // linearAlgorithm
11008  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
11009  // becase minBlockCount is 0.
11010  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
11011 
11012  }
11013 }
11014 
11015 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
11016 {
11017  VkResult res = VK_SUCCESS;
11018 
11019  if(pCreateInfo->pRecordSettings != VMA_NULL &&
11020  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
11021  {
11022 #if VMA_RECORDING_ENABLED
11023  m_pRecorder = vma_new(this, VmaRecorder)();
11024  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
11025  if(res != VK_SUCCESS)
11026  {
11027  return res;
11028  }
11029  m_pRecorder->WriteConfiguration(
11030  m_PhysicalDeviceProperties,
11031  m_MemProps,
11032  m_UseKhrDedicatedAllocation);
11033  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
11034 #else
11035  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
11036  return VK_ERROR_FEATURE_NOT_PRESENT;
11037 #endif
11038  }
11039 
11040  return res;
11041 }
11042 
11043 VmaAllocator_T::~VmaAllocator_T()
11044 {
11045 #if VMA_RECORDING_ENABLED
11046  if(m_pRecorder != VMA_NULL)
11047  {
11048  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
11049  vma_delete(this, m_pRecorder);
11050  }
11051 #endif
11052 
11053  VMA_ASSERT(m_Pools.empty());
11054 
11055  for(size_t i = GetMemoryTypeCount(); i--; )
11056  {
11057  vma_delete(this, m_pDedicatedAllocations[i]);
11058  vma_delete(this, m_pBlockVectors[i]);
11059  }
11060 }
11061 
11062 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
11063 {
11064 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11065  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
11066  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
11067  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
11068  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
11069  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
11070  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
11071  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
11072  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
11073  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
11074  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
11075  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
11076  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
11077  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
11078  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
11079  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
11080  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
11081 #if VMA_DEDICATED_ALLOCATION
11082  if(m_UseKhrDedicatedAllocation)
11083  {
11084  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
11085  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
11086  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
11087  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
11088  }
11089 #endif // #if VMA_DEDICATED_ALLOCATION
11090 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
11091 
11092 #define VMA_COPY_IF_NOT_NULL(funcName) \
11093  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
11094 
11095  if(pVulkanFunctions != VMA_NULL)
11096  {
11097  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
11098  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
11099  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
11100  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
11101  VMA_COPY_IF_NOT_NULL(vkMapMemory);
11102  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
11103  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
11104  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
11105  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
11106  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
11107  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
11108  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
11109  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
11110  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
11111  VMA_COPY_IF_NOT_NULL(vkCreateImage);
11112  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
11113 #if VMA_DEDICATED_ALLOCATION
11114  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
11115  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
11116 #endif
11117  }
11118 
11119 #undef VMA_COPY_IF_NOT_NULL
11120 
11121  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
11122  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
11123  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
11124  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
11125  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
11126  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
11127  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
11128  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
11129  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
11130  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
11131  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
11132  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
11133  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
11134  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
11135  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
11136  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
11137  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
11138  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
11139 #if VMA_DEDICATED_ALLOCATION
11140  if(m_UseKhrDedicatedAllocation)
11141  {
11142  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
11143  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
11144  }
11145 #endif
11146 }
11147 
11148 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
11149 {
11150  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11151  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
11152  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
11153  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
11154 }
11155 
11156 VkResult VmaAllocator_T::AllocateMemoryOfType(
11157  VkDeviceSize size,
11158  VkDeviceSize alignment,
11159  bool dedicatedAllocation,
11160  VkBuffer dedicatedBuffer,
11161  VkImage dedicatedImage,
11162  const VmaAllocationCreateInfo& createInfo,
11163  uint32_t memTypeIndex,
11164  VmaSuballocationType suballocType,
11165  VmaAllocation* pAllocation)
11166 {
11167  VMA_ASSERT(pAllocation != VMA_NULL);
11168  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
11169 
11170  VmaAllocationCreateInfo finalCreateInfo = createInfo;
11171 
11172  // If memory type is not HOST_VISIBLE, disable MAPPED.
11173  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11174  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
11175  {
11176  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
11177  }
11178 
11179  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
11180  VMA_ASSERT(blockVector);
11181 
11182  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
11183  bool preferDedicatedMemory =
11184  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
11185  dedicatedAllocation ||
11186  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
11187  size > preferredBlockSize / 2;
11188 
11189  if(preferDedicatedMemory &&
11190  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
11191  finalCreateInfo.pool == VK_NULL_HANDLE)
11192  {
11194  }
11195 
11196  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
11197  {
11198  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11199  {
11200  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11201  }
11202  else
11203  {
11204  return AllocateDedicatedMemory(
11205  size,
11206  suballocType,
11207  memTypeIndex,
11208  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11209  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11210  finalCreateInfo.pUserData,
11211  dedicatedBuffer,
11212  dedicatedImage,
11213  pAllocation);
11214  }
11215  }
11216  else
11217  {
11218  VkResult res = blockVector->Allocate(
11219  VK_NULL_HANDLE, // hCurrentPool
11220  m_CurrentFrameIndex.load(),
11221  size,
11222  alignment,
11223  finalCreateInfo,
11224  suballocType,
11225  pAllocation);
11226  if(res == VK_SUCCESS)
11227  {
11228  return res;
11229  }
11230 
11231  // 5. Try dedicated memory.
11232  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11233  {
11234  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11235  }
11236  else
11237  {
11238  res = AllocateDedicatedMemory(
11239  size,
11240  suballocType,
11241  memTypeIndex,
11242  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
11243  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
11244  finalCreateInfo.pUserData,
11245  dedicatedBuffer,
11246  dedicatedImage,
11247  pAllocation);
11248  if(res == VK_SUCCESS)
11249  {
11250  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
11251  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
11252  return VK_SUCCESS;
11253  }
11254  else
11255  {
11256  // Everything failed: Return error code.
11257  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11258  return res;
11259  }
11260  }
11261  }
11262 }
11263 
11264 VkResult VmaAllocator_T::AllocateDedicatedMemory(
11265  VkDeviceSize size,
11266  VmaSuballocationType suballocType,
11267  uint32_t memTypeIndex,
11268  bool map,
11269  bool isUserDataString,
11270  void* pUserData,
11271  VkBuffer dedicatedBuffer,
11272  VkImage dedicatedImage,
11273  VmaAllocation* pAllocation)
11274 {
11275  VMA_ASSERT(pAllocation);
11276 
11277  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11278  allocInfo.memoryTypeIndex = memTypeIndex;
11279  allocInfo.allocationSize = size;
11280 
11281 #if VMA_DEDICATED_ALLOCATION
11282  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
11283  if(m_UseKhrDedicatedAllocation)
11284  {
11285  if(dedicatedBuffer != VK_NULL_HANDLE)
11286  {
11287  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
11288  dedicatedAllocInfo.buffer = dedicatedBuffer;
11289  allocInfo.pNext = &dedicatedAllocInfo;
11290  }
11291  else if(dedicatedImage != VK_NULL_HANDLE)
11292  {
11293  dedicatedAllocInfo.image = dedicatedImage;
11294  allocInfo.pNext = &dedicatedAllocInfo;
11295  }
11296  }
11297 #endif // #if VMA_DEDICATED_ALLOCATION
11298 
11299  // Allocate VkDeviceMemory.
11300  VkDeviceMemory hMemory = VK_NULL_HANDLE;
11301  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
11302  if(res < 0)
11303  {
11304  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
11305  return res;
11306  }
11307 
11308  void* pMappedData = VMA_NULL;
11309  if(map)
11310  {
11311  res = (*m_VulkanFunctions.vkMapMemory)(
11312  m_hDevice,
11313  hMemory,
11314  0,
11315  VK_WHOLE_SIZE,
11316  0,
11317  &pMappedData);
11318  if(res < 0)
11319  {
11320  VMA_DEBUG_LOG(" vkMapMemory FAILED");
11321  FreeVulkanMemory(memTypeIndex, size, hMemory);
11322  return res;
11323  }
11324  }
11325 
11326  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
11327  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
11328  (*pAllocation)->SetUserData(this, pUserData);
11329  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11330  {
11331  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11332  }
11333 
11334  // Register it in m_pDedicatedAllocations.
11335  {
11336  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11337  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
11338  VMA_ASSERT(pDedicatedAllocations);
11339  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
11340  }
11341 
11342  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
11343 
11344  return VK_SUCCESS;
11345 }
11346 
11347 void VmaAllocator_T::GetBufferMemoryRequirements(
11348  VkBuffer hBuffer,
11349  VkMemoryRequirements& memReq,
11350  bool& requiresDedicatedAllocation,
11351  bool& prefersDedicatedAllocation) const
11352 {
11353 #if VMA_DEDICATED_ALLOCATION
11354  if(m_UseKhrDedicatedAllocation)
11355  {
11356  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
11357  memReqInfo.buffer = hBuffer;
11358 
11359  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11360 
11361  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11362  memReq2.pNext = &memDedicatedReq;
11363 
11364  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11365 
11366  memReq = memReq2.memoryRequirements;
11367  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11368  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11369  }
11370  else
11371 #endif // #if VMA_DEDICATED_ALLOCATION
11372  {
11373  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
11374  requiresDedicatedAllocation = false;
11375  prefersDedicatedAllocation = false;
11376  }
11377 }
11378 
11379 void VmaAllocator_T::GetImageMemoryRequirements(
11380  VkImage hImage,
11381  VkMemoryRequirements& memReq,
11382  bool& requiresDedicatedAllocation,
11383  bool& prefersDedicatedAllocation) const
11384 {
11385 #if VMA_DEDICATED_ALLOCATION
11386  if(m_UseKhrDedicatedAllocation)
11387  {
11388  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
11389  memReqInfo.image = hImage;
11390 
11391  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
11392 
11393  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
11394  memReq2.pNext = &memDedicatedReq;
11395 
11396  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
11397 
11398  memReq = memReq2.memoryRequirements;
11399  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
11400  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
11401  }
11402  else
11403 #endif // #if VMA_DEDICATED_ALLOCATION
11404  {
11405  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
11406  requiresDedicatedAllocation = false;
11407  prefersDedicatedAllocation = false;
11408  }
11409 }
11410 
11411 VkResult VmaAllocator_T::AllocateMemory(
11412  const VkMemoryRequirements& vkMemReq,
11413  bool requiresDedicatedAllocation,
11414  bool prefersDedicatedAllocation,
11415  VkBuffer dedicatedBuffer,
11416  VkImage dedicatedImage,
11417  const VmaAllocationCreateInfo& createInfo,
11418  VmaSuballocationType suballocType,
11419  VmaAllocation* pAllocation)
11420 {
11421  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
11422  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11423  {
11424  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
11425  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11426  }
11427  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
11428  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0)
11429  {
11430  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
11431  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11432  }
11433  if(requiresDedicatedAllocation)
11434  {
11435  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
11436  {
11437  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
11438  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11439  }
11440  if(createInfo.pool != VK_NULL_HANDLE)
11441  {
11442  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
11443  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11444  }
11445  }
11446  if((createInfo.pool != VK_NULL_HANDLE) &&
11447  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
11448  {
11449  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
11450  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11451  }
11452 
11453  if(createInfo.pool != VK_NULL_HANDLE)
11454  {
11455  const VkDeviceSize alignmentForPool = VMA_MAX(
11456  vkMemReq.alignment,
11457  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
11458  return createInfo.pool->m_BlockVector.Allocate(
11459  createInfo.pool,
11460  m_CurrentFrameIndex.load(),
11461  vkMemReq.size,
11462  alignmentForPool,
11463  createInfo,
11464  suballocType,
11465  pAllocation);
11466  }
11467  else
11468  {
11469  // Bit mask of memory Vulkan types acceptable for this allocation.
11470  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
11471  uint32_t memTypeIndex = UINT32_MAX;
11472  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11473  if(res == VK_SUCCESS)
11474  {
11475  VkDeviceSize alignmentForMemType = VMA_MAX(
11476  vkMemReq.alignment,
11477  GetMemoryTypeMinAlignment(memTypeIndex));
11478 
11479  res = AllocateMemoryOfType(
11480  vkMemReq.size,
11481  alignmentForMemType,
11482  requiresDedicatedAllocation || prefersDedicatedAllocation,
11483  dedicatedBuffer,
11484  dedicatedImage,
11485  createInfo,
11486  memTypeIndex,
11487  suballocType,
11488  pAllocation);
11489  // Succeeded on first try.
11490  if(res == VK_SUCCESS)
11491  {
11492  return res;
11493  }
11494  // Allocation from this memory type failed. Try other compatible memory types.
11495  else
11496  {
11497  for(;;)
11498  {
11499  // Remove old memTypeIndex from list of possibilities.
11500  memoryTypeBits &= ~(1u << memTypeIndex);
11501  // Find alternative memTypeIndex.
11502  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
11503  if(res == VK_SUCCESS)
11504  {
11505  alignmentForMemType = VMA_MAX(
11506  vkMemReq.alignment,
11507  GetMemoryTypeMinAlignment(memTypeIndex));
11508 
11509  res = AllocateMemoryOfType(
11510  vkMemReq.size,
11511  alignmentForMemType,
11512  requiresDedicatedAllocation || prefersDedicatedAllocation,
11513  dedicatedBuffer,
11514  dedicatedImage,
11515  createInfo,
11516  memTypeIndex,
11517  suballocType,
11518  pAllocation);
11519  // Allocation from this alternative memory type succeeded.
11520  if(res == VK_SUCCESS)
11521  {
11522  return res;
11523  }
11524  // else: Allocation from this memory type failed. Try next one - next loop iteration.
11525  }
11526  // No other matching memory type index could be found.
11527  else
11528  {
11529  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
11530  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11531  }
11532  }
11533  }
11534  }
11535  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
11536  else
11537  return res;
11538  }
11539 }
11540 
11541 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
11542 {
11543  VMA_ASSERT(allocation);
11544 
11545  if(allocation->CanBecomeLost() == false ||
11546  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
11547  {
11548  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11549  {
11550  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
11551  }
11552 
11553  switch(allocation->GetType())
11554  {
11555  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
11556  {
11557  VmaBlockVector* pBlockVector = VMA_NULL;
11558  VmaPool hPool = allocation->GetPool();
11559  if(hPool != VK_NULL_HANDLE)
11560  {
11561  pBlockVector = &hPool->m_BlockVector;
11562  }
11563  else
11564  {
11565  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
11566  pBlockVector = m_pBlockVectors[memTypeIndex];
11567  }
11568  pBlockVector->Free(allocation);
11569  }
11570  break;
11571  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
11572  FreeDedicatedMemory(allocation);
11573  break;
11574  default:
11575  VMA_ASSERT(0);
11576  }
11577  }
11578 
11579  allocation->SetUserData(this, VMA_NULL);
11580  vma_delete(this, allocation);
11581 }
11582 
11583 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
11584 {
11585  // Initialize.
11586  InitStatInfo(pStats->total);
11587  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
11588  InitStatInfo(pStats->memoryType[i]);
11589  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
11590  InitStatInfo(pStats->memoryHeap[i]);
11591 
11592  // Process default pools.
11593  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11594  {
11595  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11596  VMA_ASSERT(pBlockVector);
11597  pBlockVector->AddStats(pStats);
11598  }
11599 
11600  // Process custom pools.
11601  {
11602  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11603  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11604  {
11605  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
11606  }
11607  }
11608 
11609  // Process dedicated allocations.
11610  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11611  {
11612  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
11613  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
11614  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
11615  VMA_ASSERT(pDedicatedAllocVector);
11616  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
11617  {
11618  VmaStatInfo allocationStatInfo;
11619  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
11620  VmaAddStatInfo(pStats->total, allocationStatInfo);
11621  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
11622  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
11623  }
11624  }
11625 
11626  // Postprocess.
11627  VmaPostprocessCalcStatInfo(pStats->total);
11628  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
11629  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
11630  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
11631  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
11632 }
11633 
11634 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
11635 
11636 VkResult VmaAllocator_T::Defragment(
11637  VmaAllocation* pAllocations,
11638  size_t allocationCount,
11639  VkBool32* pAllocationsChanged,
11640  const VmaDefragmentationInfo* pDefragmentationInfo,
11641  VmaDefragmentationStats* pDefragmentationStats)
11642 {
11643  if(pAllocationsChanged != VMA_NULL)
11644  {
11645  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
11646  }
11647  if(pDefragmentationStats != VMA_NULL)
11648  {
11649  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
11650  }
11651 
11652  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
11653 
11654  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
11655 
11656  const size_t poolCount = m_Pools.size();
11657 
11658  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
11659  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
11660  {
11661  VmaAllocation hAlloc = pAllocations[allocIndex];
11662  VMA_ASSERT(hAlloc);
11663  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
11664  // DedicatedAlloc cannot be defragmented.
11665  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11666  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
11667  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
11668  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
11669  // Lost allocation cannot be defragmented.
11670  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
11671  {
11672  VmaBlockVector* pAllocBlockVector = VMA_NULL;
11673 
11674  const VmaPool hAllocPool = hAlloc->GetPool();
11675  // This allocation belongs to custom pool.
11676  if(hAllocPool != VK_NULL_HANDLE)
11677  {
11678  // Pools with linear algorithm are not defragmented.
11679  if(!hAllocPool->m_BlockVector.UsesLinearAlgorithm())
11680  {
11681  pAllocBlockVector = &hAllocPool->m_BlockVector;
11682  }
11683  }
11684  // This allocation belongs to general pool.
11685  else
11686  {
11687  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
11688  }
11689 
11690  if(pAllocBlockVector != VMA_NULL)
11691  {
11692  VmaDefragmentator* const pDefragmentator =
11693  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
11694  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
11695  &pAllocationsChanged[allocIndex] : VMA_NULL;
11696  pDefragmentator->AddAllocation(hAlloc, pChanged);
11697  }
11698  }
11699  }
11700 
11701  VkResult result = VK_SUCCESS;
11702 
11703  // ======== Main processing.
11704 
11705  VkDeviceSize maxBytesToMove = SIZE_MAX;
11706  uint32_t maxAllocationsToMove = UINT32_MAX;
11707  if(pDefragmentationInfo != VMA_NULL)
11708  {
11709  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
11710  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
11711  }
11712 
11713  // Process standard memory.
11714  for(uint32_t memTypeIndex = 0;
11715  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
11716  ++memTypeIndex)
11717  {
11718  // Only HOST_VISIBLE memory types can be defragmented.
11719  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11720  {
11721  result = m_pBlockVectors[memTypeIndex]->Defragment(
11722  pDefragmentationStats,
11723  maxBytesToMove,
11724  maxAllocationsToMove);
11725  }
11726  }
11727 
11728  // Process custom pools.
11729  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
11730  {
11731  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
11732  pDefragmentationStats,
11733  maxBytesToMove,
11734  maxAllocationsToMove);
11735  }
11736 
11737  // ======== Destroy defragmentators.
11738 
11739  // Process custom pools.
11740  for(size_t poolIndex = poolCount; poolIndex--; )
11741  {
11742  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
11743  }
11744 
11745  // Process standard memory.
11746  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
11747  {
11748  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
11749  {
11750  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
11751  }
11752  }
11753 
11754  return result;
11755 }
11756 
11757 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
11758 {
11759  if(hAllocation->CanBecomeLost())
11760  {
11761  /*
11762  Warning: This is a carefully designed algorithm.
11763  Do not modify unless you really know what you're doing :)
11764  */
11765  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11766  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11767  for(;;)
11768  {
11769  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11770  {
11771  pAllocationInfo->memoryType = UINT32_MAX;
11772  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
11773  pAllocationInfo->offset = 0;
11774  pAllocationInfo->size = hAllocation->GetSize();
11775  pAllocationInfo->pMappedData = VMA_NULL;
11776  pAllocationInfo->pUserData = hAllocation->GetUserData();
11777  return;
11778  }
11779  else if(localLastUseFrameIndex == localCurrFrameIndex)
11780  {
11781  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11782  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11783  pAllocationInfo->offset = hAllocation->GetOffset();
11784  pAllocationInfo->size = hAllocation->GetSize();
11785  pAllocationInfo->pMappedData = VMA_NULL;
11786  pAllocationInfo->pUserData = hAllocation->GetUserData();
11787  return;
11788  }
11789  else // Last use time earlier than current time.
11790  {
11791  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11792  {
11793  localLastUseFrameIndex = localCurrFrameIndex;
11794  }
11795  }
11796  }
11797  }
11798  else
11799  {
11800 #if VMA_STATS_STRING_ENABLED
11801  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11802  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11803  for(;;)
11804  {
11805  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11806  if(localLastUseFrameIndex == localCurrFrameIndex)
11807  {
11808  break;
11809  }
11810  else // Last use time earlier than current time.
11811  {
11812  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11813  {
11814  localLastUseFrameIndex = localCurrFrameIndex;
11815  }
11816  }
11817  }
11818 #endif
11819 
11820  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
11821  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
11822  pAllocationInfo->offset = hAllocation->GetOffset();
11823  pAllocationInfo->size = hAllocation->GetSize();
11824  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
11825  pAllocationInfo->pUserData = hAllocation->GetUserData();
11826  }
11827 }
11828 
11829 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
11830 {
11831  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
11832  if(hAllocation->CanBecomeLost())
11833  {
11834  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11835  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11836  for(;;)
11837  {
11838  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
11839  {
11840  return false;
11841  }
11842  else if(localLastUseFrameIndex == localCurrFrameIndex)
11843  {
11844  return true;
11845  }
11846  else // Last use time earlier than current time.
11847  {
11848  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11849  {
11850  localLastUseFrameIndex = localCurrFrameIndex;
11851  }
11852  }
11853  }
11854  }
11855  else
11856  {
11857 #if VMA_STATS_STRING_ENABLED
11858  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
11859  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
11860  for(;;)
11861  {
11862  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
11863  if(localLastUseFrameIndex == localCurrFrameIndex)
11864  {
11865  break;
11866  }
11867  else // Last use time earlier than current time.
11868  {
11869  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
11870  {
11871  localLastUseFrameIndex = localCurrFrameIndex;
11872  }
11873  }
11874  }
11875 #endif
11876 
11877  return true;
11878  }
11879 }
11880 
11881 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
11882 {
11883  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
11884 
11885  const bool isLinearAlgorithm = (pCreateInfo->flags & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) != 0;
11886 
11887  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
11888 
11889  if(newCreateInfo.maxBlockCount == 0)
11890  {
11891  newCreateInfo.maxBlockCount = SIZE_MAX;
11892  }
11893  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
11894  {
11895  return VK_ERROR_INITIALIZATION_FAILED;
11896  }
11897 
11898  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
11899 
11900  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
11901 
11902  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
11903  if(res != VK_SUCCESS)
11904  {
11905  vma_delete(this, *pPool);
11906  *pPool = VMA_NULL;
11907  return res;
11908  }
11909 
11910  // Add to m_Pools.
11911  {
11912  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11913  (*pPool)->SetId(m_NextPoolId++);
11914  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
11915  }
11916 
11917  return VK_SUCCESS;
11918 }
11919 
11920 void VmaAllocator_T::DestroyPool(VmaPool pool)
11921 {
11922  // Remove from m_Pools.
11923  {
11924  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11925  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
11926  VMA_ASSERT(success && "Pool not found in Allocator.");
11927  }
11928 
11929  vma_delete(this, pool);
11930 }
11931 
11932 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
11933 {
11934  pool->m_BlockVector.GetPoolStats(pPoolStats);
11935 }
11936 
11937 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
11938 {
11939  m_CurrentFrameIndex.store(frameIndex);
11940 }
11941 
11942 void VmaAllocator_T::MakePoolAllocationsLost(
11943  VmaPool hPool,
11944  size_t* pLostAllocationCount)
11945 {
11946  hPool->m_BlockVector.MakePoolAllocationsLost(
11947  m_CurrentFrameIndex.load(),
11948  pLostAllocationCount);
11949 }
11950 
11951 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
11952 {
11953  return hPool->m_BlockVector.CheckCorruption();
11954 }
11955 
11956 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
11957 {
11958  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
11959 
11960  // Process default pools.
11961  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
11962  {
11963  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
11964  {
11965  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
11966  VMA_ASSERT(pBlockVector);
11967  VkResult localRes = pBlockVector->CheckCorruption();
11968  switch(localRes)
11969  {
11970  case VK_ERROR_FEATURE_NOT_PRESENT:
11971  break;
11972  case VK_SUCCESS:
11973  finalRes = VK_SUCCESS;
11974  break;
11975  default:
11976  return localRes;
11977  }
11978  }
11979  }
11980 
11981  // Process custom pools.
11982  {
11983  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
11984  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
11985  {
11986  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
11987  {
11988  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
11989  switch(localRes)
11990  {
11991  case VK_ERROR_FEATURE_NOT_PRESENT:
11992  break;
11993  case VK_SUCCESS:
11994  finalRes = VK_SUCCESS;
11995  break;
11996  default:
11997  return localRes;
11998  }
11999  }
12000  }
12001  }
12002 
12003  return finalRes;
12004 }
12005 
12006 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
12007 {
12008  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
12009  (*pAllocation)->InitLost();
12010 }
12011 
12012 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
12013 {
12014  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
12015 
12016  VkResult res;
12017  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12018  {
12019  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12020  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
12021  {
12022  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12023  if(res == VK_SUCCESS)
12024  {
12025  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
12026  }
12027  }
12028  else
12029  {
12030  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
12031  }
12032  }
12033  else
12034  {
12035  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
12036  }
12037 
12038  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
12039  {
12040  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
12041  }
12042 
12043  return res;
12044 }
12045 
12046 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
12047 {
12048  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
12049  {
12050  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
12051  }
12052 
12053  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
12054 
12055  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
12056  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
12057  {
12058  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
12059  m_HeapSizeLimit[heapIndex] += size;
12060  }
12061 }
12062 
12063 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
12064 {
12065  if(hAllocation->CanBecomeLost())
12066  {
12067  return VK_ERROR_MEMORY_MAP_FAILED;
12068  }
12069 
12070  switch(hAllocation->GetType())
12071  {
12072  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12073  {
12074  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12075  char *pBytes = VMA_NULL;
12076  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
12077  if(res == VK_SUCCESS)
12078  {
12079  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
12080  hAllocation->BlockAllocMap();
12081  }
12082  return res;
12083  }
12084  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12085  return hAllocation->DedicatedAllocMap(this, ppData);
12086  default:
12087  VMA_ASSERT(0);
12088  return VK_ERROR_MEMORY_MAP_FAILED;
12089  }
12090 }
12091 
12092 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
12093 {
12094  switch(hAllocation->GetType())
12095  {
12096  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12097  {
12098  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
12099  hAllocation->BlockAllocUnmap();
12100  pBlock->Unmap(this, 1);
12101  }
12102  break;
12103  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12104  hAllocation->DedicatedAllocUnmap(this);
12105  break;
12106  default:
12107  VMA_ASSERT(0);
12108  }
12109 }
12110 
12111 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
12112 {
12113  VkResult res = VK_SUCCESS;
12114  switch(hAllocation->GetType())
12115  {
12116  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12117  res = GetVulkanFunctions().vkBindBufferMemory(
12118  m_hDevice,
12119  hBuffer,
12120  hAllocation->GetMemory(),
12121  0); //memoryOffset
12122  break;
12123  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12124  {
12125  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12126  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
12127  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
12128  break;
12129  }
12130  default:
12131  VMA_ASSERT(0);
12132  }
12133  return res;
12134 }
12135 
12136 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
12137 {
12138  VkResult res = VK_SUCCESS;
12139  switch(hAllocation->GetType())
12140  {
12141  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12142  res = GetVulkanFunctions().vkBindImageMemory(
12143  m_hDevice,
12144  hImage,
12145  hAllocation->GetMemory(),
12146  0); //memoryOffset
12147  break;
12148  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12149  {
12150  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12151  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
12152  res = pBlock->BindImageMemory(this, hAllocation, hImage);
12153  break;
12154  }
12155  default:
12156  VMA_ASSERT(0);
12157  }
12158  return res;
12159 }
12160 
12161 void VmaAllocator_T::FlushOrInvalidateAllocation(
12162  VmaAllocation hAllocation,
12163  VkDeviceSize offset, VkDeviceSize size,
12164  VMA_CACHE_OPERATION op)
12165 {
12166  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
12167  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
12168  {
12169  const VkDeviceSize allocationSize = hAllocation->GetSize();
12170  VMA_ASSERT(offset <= allocationSize);
12171 
12172  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
12173 
12174  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
12175  memRange.memory = hAllocation->GetMemory();
12176 
12177  switch(hAllocation->GetType())
12178  {
12179  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
12180  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12181  if(size == VK_WHOLE_SIZE)
12182  {
12183  memRange.size = allocationSize - memRange.offset;
12184  }
12185  else
12186  {
12187  VMA_ASSERT(offset + size <= allocationSize);
12188  memRange.size = VMA_MIN(
12189  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
12190  allocationSize - memRange.offset);
12191  }
12192  break;
12193 
12194  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
12195  {
12196  // 1. Still within this allocation.
12197  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
12198  if(size == VK_WHOLE_SIZE)
12199  {
12200  size = allocationSize - offset;
12201  }
12202  else
12203  {
12204  VMA_ASSERT(offset + size <= allocationSize);
12205  }
12206  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
12207 
12208  // 2. Adjust to whole block.
12209  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
12210  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
12211  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
12212  memRange.offset += allocationOffset;
12213  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
12214 
12215  break;
12216  }
12217 
12218  default:
12219  VMA_ASSERT(0);
12220  }
12221 
12222  switch(op)
12223  {
12224  case VMA_CACHE_FLUSH:
12225  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
12226  break;
12227  case VMA_CACHE_INVALIDATE:
12228  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
12229  break;
12230  default:
12231  VMA_ASSERT(0);
12232  }
12233  }
12234  // else: Just ignore this call.
12235 }
12236 
12237 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
12238 {
12239  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
12240 
12241  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
12242  {
12243  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12244  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
12245  VMA_ASSERT(pDedicatedAllocations);
12246  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
12247  VMA_ASSERT(success);
12248  }
12249 
12250  VkDeviceMemory hMemory = allocation->GetMemory();
12251 
12252  if(allocation->GetMappedData() != VMA_NULL)
12253  {
12254  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
12255  }
12256 
12257  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
12258 
12259  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
12260 }
12261 
12262 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
12263 {
12264  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
12265  !hAllocation->CanBecomeLost() &&
12266  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12267  {
12268  void* pData = VMA_NULL;
12269  VkResult res = Map(hAllocation, &pData);
12270  if(res == VK_SUCCESS)
12271  {
12272  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
12273  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
12274  Unmap(hAllocation);
12275  }
12276  else
12277  {
12278  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
12279  }
12280  }
12281 }
12282 
12283 #if VMA_STATS_STRING_ENABLED
12284 
12285 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
12286 {
12287  bool dedicatedAllocationsStarted = false;
12288  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12289  {
12290  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
12291  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
12292  VMA_ASSERT(pDedicatedAllocVector);
12293  if(pDedicatedAllocVector->empty() == false)
12294  {
12295  if(dedicatedAllocationsStarted == false)
12296  {
12297  dedicatedAllocationsStarted = true;
12298  json.WriteString("DedicatedAllocations");
12299  json.BeginObject();
12300  }
12301 
12302  json.BeginString("Type ");
12303  json.ContinueString(memTypeIndex);
12304  json.EndString();
12305 
12306  json.BeginArray();
12307 
12308  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
12309  {
12310  json.BeginObject(true);
12311  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
12312  hAlloc->PrintParameters(json);
12313  json.EndObject();
12314  }
12315 
12316  json.EndArray();
12317  }
12318  }
12319  if(dedicatedAllocationsStarted)
12320  {
12321  json.EndObject();
12322  }
12323 
12324  {
12325  bool allocationsStarted = false;
12326  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
12327  {
12328  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
12329  {
12330  if(allocationsStarted == false)
12331  {
12332  allocationsStarted = true;
12333  json.WriteString("DefaultPools");
12334  json.BeginObject();
12335  }
12336 
12337  json.BeginString("Type ");
12338  json.ContinueString(memTypeIndex);
12339  json.EndString();
12340 
12341  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
12342  }
12343  }
12344  if(allocationsStarted)
12345  {
12346  json.EndObject();
12347  }
12348  }
12349 
12350  // Custom pools
12351  {
12352  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
12353  const size_t poolCount = m_Pools.size();
12354  if(poolCount > 0)
12355  {
12356  json.WriteString("Pools");
12357  json.BeginObject();
12358  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
12359  {
12360  json.BeginString();
12361  json.ContinueString(m_Pools[poolIndex]->GetId());
12362  json.EndString();
12363 
12364  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
12365  }
12366  json.EndObject();
12367  }
12368  }
12369 }
12370 
12371 #endif // #if VMA_STATS_STRING_ENABLED
12372 
12374 // Public interface
12375 
12376 VkResult vmaCreateAllocator(
12377  const VmaAllocatorCreateInfo* pCreateInfo,
12378  VmaAllocator* pAllocator)
12379 {
12380  VMA_ASSERT(pCreateInfo && pAllocator);
12381  VMA_DEBUG_LOG("vmaCreateAllocator");
12382  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
12383  return (*pAllocator)->Init(pCreateInfo);
12384 }
12385 
12386 void vmaDestroyAllocator(
12387  VmaAllocator allocator)
12388 {
12389  if(allocator != VK_NULL_HANDLE)
12390  {
12391  VMA_DEBUG_LOG("vmaDestroyAllocator");
12392  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
12393  vma_delete(&allocationCallbacks, allocator);
12394  }
12395 }
12396 
12398  VmaAllocator allocator,
12399  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
12400 {
12401  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
12402  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
12403 }
12404 
12406  VmaAllocator allocator,
12407  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
12408 {
12409  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
12410  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
12411 }
12412 
12414  VmaAllocator allocator,
12415  uint32_t memoryTypeIndex,
12416  VkMemoryPropertyFlags* pFlags)
12417 {
12418  VMA_ASSERT(allocator && pFlags);
12419  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
12420  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
12421 }
12422 
12424  VmaAllocator allocator,
12425  uint32_t frameIndex)
12426 {
12427  VMA_ASSERT(allocator);
12428  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
12429 
12430  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12431 
12432  allocator->SetCurrentFrameIndex(frameIndex);
12433 }
12434 
12435 void vmaCalculateStats(
12436  VmaAllocator allocator,
12437  VmaStats* pStats)
12438 {
12439  VMA_ASSERT(allocator && pStats);
12440  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12441  allocator->CalculateStats(pStats);
12442 }
12443 
12444 #if VMA_STATS_STRING_ENABLED
12445 
12446 void vmaBuildStatsString(
12447  VmaAllocator allocator,
12448  char** ppStatsString,
12449  VkBool32 detailedMap)
12450 {
12451  VMA_ASSERT(allocator && ppStatsString);
12452  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12453 
12454  VmaStringBuilder sb(allocator);
12455  {
12456  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
12457  json.BeginObject();
12458 
12459  VmaStats stats;
12460  allocator->CalculateStats(&stats);
12461 
12462  json.WriteString("Total");
12463  VmaPrintStatInfo(json, stats.total);
12464 
12465  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
12466  {
12467  json.BeginString("Heap ");
12468  json.ContinueString(heapIndex);
12469  json.EndString();
12470  json.BeginObject();
12471 
12472  json.WriteString("Size");
12473  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
12474 
12475  json.WriteString("Flags");
12476  json.BeginArray(true);
12477  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
12478  {
12479  json.WriteString("DEVICE_LOCAL");
12480  }
12481  json.EndArray();
12482 
12483  if(stats.memoryHeap[heapIndex].blockCount > 0)
12484  {
12485  json.WriteString("Stats");
12486  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
12487  }
12488 
12489  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
12490  {
12491  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
12492  {
12493  json.BeginString("Type ");
12494  json.ContinueString(typeIndex);
12495  json.EndString();
12496 
12497  json.BeginObject();
12498 
12499  json.WriteString("Flags");
12500  json.BeginArray(true);
12501  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
12502  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
12503  {
12504  json.WriteString("DEVICE_LOCAL");
12505  }
12506  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
12507  {
12508  json.WriteString("HOST_VISIBLE");
12509  }
12510  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
12511  {
12512  json.WriteString("HOST_COHERENT");
12513  }
12514  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
12515  {
12516  json.WriteString("HOST_CACHED");
12517  }
12518  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
12519  {
12520  json.WriteString("LAZILY_ALLOCATED");
12521  }
12522  json.EndArray();
12523 
12524  if(stats.memoryType[typeIndex].blockCount > 0)
12525  {
12526  json.WriteString("Stats");
12527  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
12528  }
12529 
12530  json.EndObject();
12531  }
12532  }
12533 
12534  json.EndObject();
12535  }
12536  if(detailedMap == VK_TRUE)
12537  {
12538  allocator->PrintDetailedMap(json);
12539  }
12540 
12541  json.EndObject();
12542  }
12543 
12544  const size_t len = sb.GetLength();
12545  char* const pChars = vma_new_array(allocator, char, len + 1);
12546  if(len > 0)
12547  {
12548  memcpy(pChars, sb.GetData(), len);
12549  }
12550  pChars[len] = '\0';
12551  *ppStatsString = pChars;
12552 }
12553 
12554 void vmaFreeStatsString(
12555  VmaAllocator allocator,
12556  char* pStatsString)
12557 {
12558  if(pStatsString != VMA_NULL)
12559  {
12560  VMA_ASSERT(allocator);
12561  size_t len = strlen(pStatsString);
12562  vma_delete_array(allocator, pStatsString, len + 1);
12563  }
12564 }
12565 
12566 #endif // #if VMA_STATS_STRING_ENABLED
12567 
12568 /*
12569 This function is not protected by any mutex because it just reads immutable data.
12570 */
12571 VkResult vmaFindMemoryTypeIndex(
12572  VmaAllocator allocator,
12573  uint32_t memoryTypeBits,
12574  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12575  uint32_t* pMemoryTypeIndex)
12576 {
12577  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12578  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12579  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12580 
12581  if(pAllocationCreateInfo->memoryTypeBits != 0)
12582  {
12583  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
12584  }
12585 
12586  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
12587  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
12588 
12589  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12590  if(mapped)
12591  {
12592  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12593  }
12594 
12595  // Convert usage to requiredFlags and preferredFlags.
12596  switch(pAllocationCreateInfo->usage)
12597  {
12599  break;
12601  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12602  {
12603  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12604  }
12605  break;
12607  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12608  break;
12610  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12611  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
12612  {
12613  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
12614  }
12615  break;
12617  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
12618  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
12619  break;
12620  default:
12621  break;
12622  }
12623 
12624  *pMemoryTypeIndex = UINT32_MAX;
12625  uint32_t minCost = UINT32_MAX;
12626  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
12627  memTypeIndex < allocator->GetMemoryTypeCount();
12628  ++memTypeIndex, memTypeBit <<= 1)
12629  {
12630  // This memory type is acceptable according to memoryTypeBits bitmask.
12631  if((memTypeBit & memoryTypeBits) != 0)
12632  {
12633  const VkMemoryPropertyFlags currFlags =
12634  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
12635  // This memory type contains requiredFlags.
12636  if((requiredFlags & ~currFlags) == 0)
12637  {
12638  // Calculate cost as number of bits from preferredFlags not present in this memory type.
12639  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
12640  // Remember memory type with lowest cost.
12641  if(currCost < minCost)
12642  {
12643  *pMemoryTypeIndex = memTypeIndex;
12644  if(currCost == 0)
12645  {
12646  return VK_SUCCESS;
12647  }
12648  minCost = currCost;
12649  }
12650  }
12651  }
12652  }
12653  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
12654 }
12655 
12657  VmaAllocator allocator,
12658  const VkBufferCreateInfo* pBufferCreateInfo,
12659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12660  uint32_t* pMemoryTypeIndex)
12661 {
12662  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12663  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
12664  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12665  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12666 
12667  const VkDevice hDev = allocator->m_hDevice;
12668  VkBuffer hBuffer = VK_NULL_HANDLE;
12669  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
12670  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
12671  if(res == VK_SUCCESS)
12672  {
12673  VkMemoryRequirements memReq = {};
12674  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
12675  hDev, hBuffer, &memReq);
12676 
12677  res = vmaFindMemoryTypeIndex(
12678  allocator,
12679  memReq.memoryTypeBits,
12680  pAllocationCreateInfo,
12681  pMemoryTypeIndex);
12682 
12683  allocator->GetVulkanFunctions().vkDestroyBuffer(
12684  hDev, hBuffer, allocator->GetAllocationCallbacks());
12685  }
12686  return res;
12687 }
12688 
12690  VmaAllocator allocator,
12691  const VkImageCreateInfo* pImageCreateInfo,
12692  const VmaAllocationCreateInfo* pAllocationCreateInfo,
12693  uint32_t* pMemoryTypeIndex)
12694 {
12695  VMA_ASSERT(allocator != VK_NULL_HANDLE);
12696  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
12697  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
12698  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
12699 
12700  const VkDevice hDev = allocator->m_hDevice;
12701  VkImage hImage = VK_NULL_HANDLE;
12702  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
12703  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
12704  if(res == VK_SUCCESS)
12705  {
12706  VkMemoryRequirements memReq = {};
12707  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
12708  hDev, hImage, &memReq);
12709 
12710  res = vmaFindMemoryTypeIndex(
12711  allocator,
12712  memReq.memoryTypeBits,
12713  pAllocationCreateInfo,
12714  pMemoryTypeIndex);
12715 
12716  allocator->GetVulkanFunctions().vkDestroyImage(
12717  hDev, hImage, allocator->GetAllocationCallbacks());
12718  }
12719  return res;
12720 }
12721 
12722 VkResult vmaCreatePool(
12723  VmaAllocator allocator,
12724  const VmaPoolCreateInfo* pCreateInfo,
12725  VmaPool* pPool)
12726 {
12727  VMA_ASSERT(allocator && pCreateInfo && pPool);
12728 
12729  VMA_DEBUG_LOG("vmaCreatePool");
12730 
12731  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12732 
12733  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
12734 
12735 #if VMA_RECORDING_ENABLED
12736  if(allocator->GetRecorder() != VMA_NULL)
12737  {
12738  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
12739  }
12740 #endif
12741 
12742  return res;
12743 }
12744 
12745 void vmaDestroyPool(
12746  VmaAllocator allocator,
12747  VmaPool pool)
12748 {
12749  VMA_ASSERT(allocator);
12750 
12751  if(pool == VK_NULL_HANDLE)
12752  {
12753  return;
12754  }
12755 
12756  VMA_DEBUG_LOG("vmaDestroyPool");
12757 
12758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12759 
12760 #if VMA_RECORDING_ENABLED
12761  if(allocator->GetRecorder() != VMA_NULL)
12762  {
12763  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
12764  }
12765 #endif
12766 
12767  allocator->DestroyPool(pool);
12768 }
12769 
12770 void vmaGetPoolStats(
12771  VmaAllocator allocator,
12772  VmaPool pool,
12773  VmaPoolStats* pPoolStats)
12774 {
12775  VMA_ASSERT(allocator && pool && pPoolStats);
12776 
12777  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12778 
12779  allocator->GetPoolStats(pool, pPoolStats);
12780 }
12781 
12783  VmaAllocator allocator,
12784  VmaPool pool,
12785  size_t* pLostAllocationCount)
12786 {
12787  VMA_ASSERT(allocator && pool);
12788 
12789  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12790 
12791 #if VMA_RECORDING_ENABLED
12792  if(allocator->GetRecorder() != VMA_NULL)
12793  {
12794  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
12795  }
12796 #endif
12797 
12798  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
12799 }
12800 
12801 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
12802 {
12803  VMA_ASSERT(allocator && pool);
12804 
12805  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12806 
12807  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
12808 
12809  return allocator->CheckPoolCorruption(pool);
12810 }
12811 
12812 VkResult vmaAllocateMemory(
12813  VmaAllocator allocator,
12814  const VkMemoryRequirements* pVkMemoryRequirements,
12815  const VmaAllocationCreateInfo* pCreateInfo,
12816  VmaAllocation* pAllocation,
12817  VmaAllocationInfo* pAllocationInfo)
12818 {
12819  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
12820 
12821  VMA_DEBUG_LOG("vmaAllocateMemory");
12822 
12823  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12824 
12825  VkResult result = allocator->AllocateMemory(
12826  *pVkMemoryRequirements,
12827  false, // requiresDedicatedAllocation
12828  false, // prefersDedicatedAllocation
12829  VK_NULL_HANDLE, // dedicatedBuffer
12830  VK_NULL_HANDLE, // dedicatedImage
12831  *pCreateInfo,
12832  VMA_SUBALLOCATION_TYPE_UNKNOWN,
12833  pAllocation);
12834 
12835 #if VMA_RECORDING_ENABLED
12836  if(allocator->GetRecorder() != VMA_NULL)
12837  {
12838  allocator->GetRecorder()->RecordAllocateMemory(
12839  allocator->GetCurrentFrameIndex(),
12840  *pVkMemoryRequirements,
12841  *pCreateInfo,
12842  *pAllocation);
12843  }
12844 #endif
12845 
12846  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
12847  {
12848  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12849  }
12850 
12851  return result;
12852 }
12853 
12855  VmaAllocator allocator,
12856  VkBuffer buffer,
12857  const VmaAllocationCreateInfo* pCreateInfo,
12858  VmaAllocation* pAllocation,
12859  VmaAllocationInfo* pAllocationInfo)
12860 {
12861  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12862 
12863  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
12864 
12865  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12866 
12867  VkMemoryRequirements vkMemReq = {};
12868  bool requiresDedicatedAllocation = false;
12869  bool prefersDedicatedAllocation = false;
12870  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
12871  requiresDedicatedAllocation,
12872  prefersDedicatedAllocation);
12873 
12874  VkResult result = allocator->AllocateMemory(
12875  vkMemReq,
12876  requiresDedicatedAllocation,
12877  prefersDedicatedAllocation,
12878  buffer, // dedicatedBuffer
12879  VK_NULL_HANDLE, // dedicatedImage
12880  *pCreateInfo,
12881  VMA_SUBALLOCATION_TYPE_BUFFER,
12882  pAllocation);
12883 
12884 #if VMA_RECORDING_ENABLED
12885  if(allocator->GetRecorder() != VMA_NULL)
12886  {
12887  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
12888  allocator->GetCurrentFrameIndex(),
12889  vkMemReq,
12890  requiresDedicatedAllocation,
12891  prefersDedicatedAllocation,
12892  *pCreateInfo,
12893  *pAllocation);
12894  }
12895 #endif
12896 
12897  if(pAllocationInfo && result == VK_SUCCESS)
12898  {
12899  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12900  }
12901 
12902  return result;
12903 }
12904 
12905 VkResult vmaAllocateMemoryForImage(
12906  VmaAllocator allocator,
12907  VkImage image,
12908  const VmaAllocationCreateInfo* pCreateInfo,
12909  VmaAllocation* pAllocation,
12910  VmaAllocationInfo* pAllocationInfo)
12911 {
12912  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
12913 
12914  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
12915 
12916  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12917 
12918  VkMemoryRequirements vkMemReq = {};
12919  bool requiresDedicatedAllocation = false;
12920  bool prefersDedicatedAllocation = false;
12921  allocator->GetImageMemoryRequirements(image, vkMemReq,
12922  requiresDedicatedAllocation, prefersDedicatedAllocation);
12923 
12924  VkResult result = allocator->AllocateMemory(
12925  vkMemReq,
12926  requiresDedicatedAllocation,
12927  prefersDedicatedAllocation,
12928  VK_NULL_HANDLE, // dedicatedBuffer
12929  image, // dedicatedImage
12930  *pCreateInfo,
12931  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
12932  pAllocation);
12933 
12934 #if VMA_RECORDING_ENABLED
12935  if(allocator->GetRecorder() != VMA_NULL)
12936  {
12937  allocator->GetRecorder()->RecordAllocateMemoryForImage(
12938  allocator->GetCurrentFrameIndex(),
12939  vkMemReq,
12940  requiresDedicatedAllocation,
12941  prefersDedicatedAllocation,
12942  *pCreateInfo,
12943  *pAllocation);
12944  }
12945 #endif
12946 
12947  if(pAllocationInfo && result == VK_SUCCESS)
12948  {
12949  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
12950  }
12951 
12952  return result;
12953 }
12954 
12955 void vmaFreeMemory(
12956  VmaAllocator allocator,
12957  VmaAllocation allocation)
12958 {
12959  VMA_ASSERT(allocator);
12960 
12961  if(allocation == VK_NULL_HANDLE)
12962  {
12963  return;
12964  }
12965 
12966  VMA_DEBUG_LOG("vmaFreeMemory");
12967 
12968  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12969 
12970 #if VMA_RECORDING_ENABLED
12971  if(allocator->GetRecorder() != VMA_NULL)
12972  {
12973  allocator->GetRecorder()->RecordFreeMemory(
12974  allocator->GetCurrentFrameIndex(),
12975  allocation);
12976  }
12977 #endif
12978 
12979  allocator->FreeMemory(allocation);
12980 }
12981 
12983  VmaAllocator allocator,
12984  VmaAllocation allocation,
12985  VmaAllocationInfo* pAllocationInfo)
12986 {
12987  VMA_ASSERT(allocator && allocation && pAllocationInfo);
12988 
12989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
12990 
12991 #if VMA_RECORDING_ENABLED
12992  if(allocator->GetRecorder() != VMA_NULL)
12993  {
12994  allocator->GetRecorder()->RecordGetAllocationInfo(
12995  allocator->GetCurrentFrameIndex(),
12996  allocation);
12997  }
12998 #endif
12999 
13000  allocator->GetAllocationInfo(allocation, pAllocationInfo);
13001 }
13002 
13003 VkBool32 vmaTouchAllocation(
13004  VmaAllocator allocator,
13005  VmaAllocation allocation)
13006 {
13007  VMA_ASSERT(allocator && allocation);
13008 
13009  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13010 
13011 #if VMA_RECORDING_ENABLED
13012  if(allocator->GetRecorder() != VMA_NULL)
13013  {
13014  allocator->GetRecorder()->RecordTouchAllocation(
13015  allocator->GetCurrentFrameIndex(),
13016  allocation);
13017  }
13018 #endif
13019 
13020  return allocator->TouchAllocation(allocation);
13021 }
13022 
13024  VmaAllocator allocator,
13025  VmaAllocation allocation,
13026  void* pUserData)
13027 {
13028  VMA_ASSERT(allocator && allocation);
13029 
13030  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13031 
13032  allocation->SetUserData(allocator, pUserData);
13033 
13034 #if VMA_RECORDING_ENABLED
13035  if(allocator->GetRecorder() != VMA_NULL)
13036  {
13037  allocator->GetRecorder()->RecordSetAllocationUserData(
13038  allocator->GetCurrentFrameIndex(),
13039  allocation,
13040  pUserData);
13041  }
13042 #endif
13043 }
13044 
13046  VmaAllocator allocator,
13047  VmaAllocation* pAllocation)
13048 {
13049  VMA_ASSERT(allocator && pAllocation);
13050 
13051  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
13052 
13053  allocator->CreateLostAllocation(pAllocation);
13054 
13055 #if VMA_RECORDING_ENABLED
13056  if(allocator->GetRecorder() != VMA_NULL)
13057  {
13058  allocator->GetRecorder()->RecordCreateLostAllocation(
13059  allocator->GetCurrentFrameIndex(),
13060  *pAllocation);
13061  }
13062 #endif
13063 }
13064 
13065 VkResult vmaMapMemory(
13066  VmaAllocator allocator,
13067  VmaAllocation allocation,
13068  void** ppData)
13069 {
13070  VMA_ASSERT(allocator && allocation && ppData);
13071 
13072  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13073 
13074  VkResult res = allocator->Map(allocation, ppData);
13075 
13076 #if VMA_RECORDING_ENABLED
13077  if(allocator->GetRecorder() != VMA_NULL)
13078  {
13079  allocator->GetRecorder()->RecordMapMemory(
13080  allocator->GetCurrentFrameIndex(),
13081  allocation);
13082  }
13083 #endif
13084 
13085  return res;
13086 }
13087 
13088 void vmaUnmapMemory(
13089  VmaAllocator allocator,
13090  VmaAllocation allocation)
13091 {
13092  VMA_ASSERT(allocator && allocation);
13093 
13094  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13095 
13096 #if VMA_RECORDING_ENABLED
13097  if(allocator->GetRecorder() != VMA_NULL)
13098  {
13099  allocator->GetRecorder()->RecordUnmapMemory(
13100  allocator->GetCurrentFrameIndex(),
13101  allocation);
13102  }
13103 #endif
13104 
13105  allocator->Unmap(allocation);
13106 }
13107 
13108 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13109 {
13110  VMA_ASSERT(allocator && allocation);
13111 
13112  VMA_DEBUG_LOG("vmaFlushAllocation");
13113 
13114  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13115 
13116  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
13117 
13118 #if VMA_RECORDING_ENABLED
13119  if(allocator->GetRecorder() != VMA_NULL)
13120  {
13121  allocator->GetRecorder()->RecordFlushAllocation(
13122  allocator->GetCurrentFrameIndex(),
13123  allocation, offset, size);
13124  }
13125 #endif
13126 }
13127 
13128 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13129 {
13130  VMA_ASSERT(allocator && allocation);
13131 
13132  VMA_DEBUG_LOG("vmaInvalidateAllocation");
13133 
13134  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13135 
13136  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
13137 
13138 #if VMA_RECORDING_ENABLED
13139  if(allocator->GetRecorder() != VMA_NULL)
13140  {
13141  allocator->GetRecorder()->RecordInvalidateAllocation(
13142  allocator->GetCurrentFrameIndex(),
13143  allocation, offset, size);
13144  }
13145 #endif
13146 }
13147 
13148 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
13149 {
13150  VMA_ASSERT(allocator);
13151 
13152  VMA_DEBUG_LOG("vmaCheckCorruption");
13153 
13154  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13155 
13156  return allocator->CheckCorruption(memoryTypeBits);
13157 }
13158 
13159 VkResult vmaDefragment(
13160  VmaAllocator allocator,
13161  VmaAllocation* pAllocations,
13162  size_t allocationCount,
13163  VkBool32* pAllocationsChanged,
13164  const VmaDefragmentationInfo *pDefragmentationInfo,
13165  VmaDefragmentationStats* pDefragmentationStats)
13166 {
13167  VMA_ASSERT(allocator && pAllocations);
13168 
13169  VMA_DEBUG_LOG("vmaDefragment");
13170 
13171  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13172 
13173  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
13174 }
13175 
13176 VkResult vmaBindBufferMemory(
13177  VmaAllocator allocator,
13178  VmaAllocation allocation,
13179  VkBuffer buffer)
13180 {
13181  VMA_ASSERT(allocator && allocation && buffer);
13182 
13183  VMA_DEBUG_LOG("vmaBindBufferMemory");
13184 
13185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13186 
13187  return allocator->BindBufferMemory(allocation, buffer);
13188 }
13189 
13190 VkResult vmaBindImageMemory(
13191  VmaAllocator allocator,
13192  VmaAllocation allocation,
13193  VkImage image)
13194 {
13195  VMA_ASSERT(allocator && allocation && image);
13196 
13197  VMA_DEBUG_LOG("vmaBindImageMemory");
13198 
13199  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13200 
13201  return allocator->BindImageMemory(allocation, image);
13202 }
13203 
13204 VkResult vmaCreateBuffer(
13205  VmaAllocator allocator,
13206  const VkBufferCreateInfo* pBufferCreateInfo,
13207  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13208  VkBuffer* pBuffer,
13209  VmaAllocation* pAllocation,
13210  VmaAllocationInfo* pAllocationInfo)
13211 {
13212  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
13213 
13214  VMA_DEBUG_LOG("vmaCreateBuffer");
13215 
13216  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13217 
13218  *pBuffer = VK_NULL_HANDLE;
13219  *pAllocation = VK_NULL_HANDLE;
13220 
13221  // 1. Create VkBuffer.
13222  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
13223  allocator->m_hDevice,
13224  pBufferCreateInfo,
13225  allocator->GetAllocationCallbacks(),
13226  pBuffer);
13227  if(res >= 0)
13228  {
13229  // 2. vkGetBufferMemoryRequirements.
13230  VkMemoryRequirements vkMemReq = {};
13231  bool requiresDedicatedAllocation = false;
13232  bool prefersDedicatedAllocation = false;
13233  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
13234  requiresDedicatedAllocation, prefersDedicatedAllocation);
13235 
13236  // Make sure alignment requirements for specific buffer usages reported
13237  // in Physical Device Properties are included in alignment reported by memory requirements.
13238  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
13239  {
13240  VMA_ASSERT(vkMemReq.alignment %
13241  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
13242  }
13243  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
13244  {
13245  VMA_ASSERT(vkMemReq.alignment %
13246  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
13247  }
13248  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
13249  {
13250  VMA_ASSERT(vkMemReq.alignment %
13251  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
13252  }
13253 
13254  // 3. Allocate memory using allocator.
13255  res = allocator->AllocateMemory(
13256  vkMemReq,
13257  requiresDedicatedAllocation,
13258  prefersDedicatedAllocation,
13259  *pBuffer, // dedicatedBuffer
13260  VK_NULL_HANDLE, // dedicatedImage
13261  *pAllocationCreateInfo,
13262  VMA_SUBALLOCATION_TYPE_BUFFER,
13263  pAllocation);
13264 
13265 #if VMA_RECORDING_ENABLED
13266  if(allocator->GetRecorder() != VMA_NULL)
13267  {
13268  allocator->GetRecorder()->RecordCreateBuffer(
13269  allocator->GetCurrentFrameIndex(),
13270  *pBufferCreateInfo,
13271  *pAllocationCreateInfo,
13272  *pAllocation);
13273  }
13274 #endif
13275 
13276  if(res >= 0)
13277  {
13278  // 3. Bind buffer with memory.
13279  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
13280  if(res >= 0)
13281  {
13282  // All steps succeeded.
13283  #if VMA_STATS_STRING_ENABLED
13284  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
13285  #endif
13286  if(pAllocationInfo != VMA_NULL)
13287  {
13288  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13289  }
13290 
13291  return VK_SUCCESS;
13292  }
13293  allocator->FreeMemory(*pAllocation);
13294  *pAllocation = VK_NULL_HANDLE;
13295  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13296  *pBuffer = VK_NULL_HANDLE;
13297  return res;
13298  }
13299  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
13300  *pBuffer = VK_NULL_HANDLE;
13301  return res;
13302  }
13303  return res;
13304 }
13305 
13306 void vmaDestroyBuffer(
13307  VmaAllocator allocator,
13308  VkBuffer buffer,
13309  VmaAllocation allocation)
13310 {
13311  VMA_ASSERT(allocator);
13312 
13313  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13314  {
13315  return;
13316  }
13317 
13318  VMA_DEBUG_LOG("vmaDestroyBuffer");
13319 
13320  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13321 
13322 #if VMA_RECORDING_ENABLED
13323  if(allocator->GetRecorder() != VMA_NULL)
13324  {
13325  allocator->GetRecorder()->RecordDestroyBuffer(
13326  allocator->GetCurrentFrameIndex(),
13327  allocation);
13328  }
13329 #endif
13330 
13331  if(buffer != VK_NULL_HANDLE)
13332  {
13333  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
13334  }
13335 
13336  if(allocation != VK_NULL_HANDLE)
13337  {
13338  allocator->FreeMemory(allocation);
13339  }
13340 }
13341 
13342 VkResult vmaCreateImage(
13343  VmaAllocator allocator,
13344  const VkImageCreateInfo* pImageCreateInfo,
13345  const VmaAllocationCreateInfo* pAllocationCreateInfo,
13346  VkImage* pImage,
13347  VmaAllocation* pAllocation,
13348  VmaAllocationInfo* pAllocationInfo)
13349 {
13350  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
13351 
13352  VMA_DEBUG_LOG("vmaCreateImage");
13353 
13354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13355 
13356  *pImage = VK_NULL_HANDLE;
13357  *pAllocation = VK_NULL_HANDLE;
13358 
13359  // 1. Create VkImage.
13360  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
13361  allocator->m_hDevice,
13362  pImageCreateInfo,
13363  allocator->GetAllocationCallbacks(),
13364  pImage);
13365  if(res >= 0)
13366  {
13367  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
13368  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
13369  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
13370 
13371  // 2. Allocate memory using allocator.
13372  VkMemoryRequirements vkMemReq = {};
13373  bool requiresDedicatedAllocation = false;
13374  bool prefersDedicatedAllocation = false;
13375  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
13376  requiresDedicatedAllocation, prefersDedicatedAllocation);
13377 
13378  res = allocator->AllocateMemory(
13379  vkMemReq,
13380  requiresDedicatedAllocation,
13381  prefersDedicatedAllocation,
13382  VK_NULL_HANDLE, // dedicatedBuffer
13383  *pImage, // dedicatedImage
13384  *pAllocationCreateInfo,
13385  suballocType,
13386  pAllocation);
13387 
13388 #if VMA_RECORDING_ENABLED
13389  if(allocator->GetRecorder() != VMA_NULL)
13390  {
13391  allocator->GetRecorder()->RecordCreateImage(
13392  allocator->GetCurrentFrameIndex(),
13393  *pImageCreateInfo,
13394  *pAllocationCreateInfo,
13395  *pAllocation);
13396  }
13397 #endif
13398 
13399  if(res >= 0)
13400  {
13401  // 3. Bind image with memory.
13402  res = allocator->BindImageMemory(*pAllocation, *pImage);
13403  if(res >= 0)
13404  {
13405  // All steps succeeded.
13406  #if VMA_STATS_STRING_ENABLED
13407  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
13408  #endif
13409  if(pAllocationInfo != VMA_NULL)
13410  {
13411  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
13412  }
13413 
13414  return VK_SUCCESS;
13415  }
13416  allocator->FreeMemory(*pAllocation);
13417  *pAllocation = VK_NULL_HANDLE;
13418  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13419  *pImage = VK_NULL_HANDLE;
13420  return res;
13421  }
13422  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
13423  *pImage = VK_NULL_HANDLE;
13424  return res;
13425  }
13426  return res;
13427 }
13428 
13429 void vmaDestroyImage(
13430  VmaAllocator allocator,
13431  VkImage image,
13432  VmaAllocation allocation)
13433 {
13434  VMA_ASSERT(allocator);
13435 
13436  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
13437  {
13438  return;
13439  }
13440 
13441  VMA_DEBUG_LOG("vmaDestroyImage");
13442 
13443  VMA_DEBUG_GLOBAL_MUTEX_LOCK
13444 
13445 #if VMA_RECORDING_ENABLED
13446  if(allocator->GetRecorder() != VMA_NULL)
13447  {
13448  allocator->GetRecorder()->RecordDestroyImage(
13449  allocator->GetCurrentFrameIndex(),
13450  allocation);
13451  }
13452 #endif
13453 
13454  if(image != VK_NULL_HANDLE)
13455  {
13456  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
13457  }
13458  if(allocation != VK_NULL_HANDLE)
13459  {
13460  allocator->FreeMemory(allocation);
13461  }
13462 }
13463 
13464 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1446
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1759
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1515
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1477
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2032
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1458
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1716
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1450
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2132
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1512
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2377
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1940
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1489
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2013
Definition: vk_mem_alloc.h:1796
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1439
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1839
Definition: vk_mem_alloc.h:1743
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1524
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1577
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1509
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1747
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1649
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1455
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1648
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2381
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1541
VmaStatInfo total
Definition: vk_mem_alloc.h:1658
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2389
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1823
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2372
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1456
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1381
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1518
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1963
Definition: vk_mem_alloc.h:1957
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1584
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2142
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1451
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1475
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1860
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1983
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2019
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1437
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1966
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1694
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2367
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2385
Definition: vk_mem_alloc.h:1733
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1847
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1454
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1654
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1387
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1408
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1479
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1413
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2387
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1834
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2029
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1447
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1637
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:1978
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1400
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1803
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1650
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1404
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1969
Definition: vk_mem_alloc.h:1742
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1453
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1829
Definition: vk_mem_alloc.h:1820
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1640
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1449
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1991
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1527
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2022
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1818
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1853
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1565
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1656
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1783
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1649
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1460
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1497
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1402
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1459
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2005
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1452
Definition: vk_mem_alloc.h:1814
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1505
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2156
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1521
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1649
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1646
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2010
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2137
Definition: vk_mem_alloc.h:1816
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2383
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1445
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1644
Definition: vk_mem_alloc.h:1699
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1959
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1494
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1642
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1457
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1461
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1770
Definition: vk_mem_alloc.h:1726
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2151
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1435
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1448
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:1955
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2118
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1922
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1650
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1469
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1657
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2016
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1650
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2123