Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1092 #include <vulkan/vulkan.h>
1093 
1094 #if !defined(VMA_DEDICATED_ALLOCATION)
1095  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1096  #define VMA_DEDICATED_ALLOCATION 1
1097  #else
1098  #define VMA_DEDICATED_ALLOCATION 0
1099  #endif
1100 #endif
1101 
1111 VK_DEFINE_HANDLE(VmaAllocator)
1112 
1113 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1115  VmaAllocator allocator,
1116  uint32_t memoryType,
1117  VkDeviceMemory memory,
1118  VkDeviceSize size);
1120 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1121  VmaAllocator allocator,
1122  uint32_t memoryType,
1123  VkDeviceMemory memory,
1124  VkDeviceSize size);
1125 
1139 
1169 
1172 typedef VkFlags VmaAllocatorCreateFlags;
1173 
1178 typedef struct VmaVulkanFunctions {
1179  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1180  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1181  PFN_vkAllocateMemory vkAllocateMemory;
1182  PFN_vkFreeMemory vkFreeMemory;
1183  PFN_vkMapMemory vkMapMemory;
1184  PFN_vkUnmapMemory vkUnmapMemory;
1185  PFN_vkBindBufferMemory vkBindBufferMemory;
1186  PFN_vkBindImageMemory vkBindImageMemory;
1187  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1188  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1189  PFN_vkCreateBuffer vkCreateBuffer;
1190  PFN_vkDestroyBuffer vkDestroyBuffer;
1191  PFN_vkCreateImage vkCreateImage;
1192  PFN_vkDestroyImage vkDestroyImage;
1193 #if VMA_DEDICATED_ALLOCATION
1194  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1195  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1196 #endif
1198 
1201 {
1203  VmaAllocatorCreateFlags flags;
1205 
1206  VkPhysicalDevice physicalDevice;
1208 
1209  VkDevice device;
1211 
1214 
1215  const VkAllocationCallbacks* pAllocationCallbacks;
1217 
1256  const VkDeviceSize* pHeapSizeLimit;
1270 
1272 VkResult vmaCreateAllocator(
1273  const VmaAllocatorCreateInfo* pCreateInfo,
1274  VmaAllocator* pAllocator);
1275 
1277 void vmaDestroyAllocator(
1278  VmaAllocator allocator);
1279 
1285  VmaAllocator allocator,
1286  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1287 
1293  VmaAllocator allocator,
1294  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1295 
1303  VmaAllocator allocator,
1304  uint32_t memoryTypeIndex,
1305  VkMemoryPropertyFlags* pFlags);
1306 
1316  VmaAllocator allocator,
1317  uint32_t frameIndex);
1318 
1321 typedef struct VmaStatInfo
1322 {
1324  uint32_t blockCount;
1330  VkDeviceSize usedBytes;
1332  VkDeviceSize unusedBytes;
1333  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1334  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1335 } VmaStatInfo;
1336 
1338 typedef struct VmaStats
1339 {
1340  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1341  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1343 } VmaStats;
1344 
1346 void vmaCalculateStats(
1347  VmaAllocator allocator,
1348  VmaStats* pStats);
1349 
1350 #define VMA_STATS_STRING_ENABLED 1
1351 
1352 #if VMA_STATS_STRING_ENABLED
1353 
1355 
1357 void vmaBuildStatsString(
1358  VmaAllocator allocator,
1359  char** ppStatsString,
1360  VkBool32 detailedMap);
1361 
1362 void vmaFreeStatsString(
1363  VmaAllocator allocator,
1364  char* pStatsString);
1365 
1366 #endif // #if VMA_STATS_STRING_ENABLED
1367 
1376 VK_DEFINE_HANDLE(VmaPool)
1377 
1378 typedef enum VmaMemoryUsage
1379 {
1428 } VmaMemoryUsage;
1429 
1444 
1494 
1498 
1500 {
1502  VmaAllocationCreateFlags flags;
1513  VkMemoryPropertyFlags requiredFlags;
1518  VkMemoryPropertyFlags preferredFlags;
1526  uint32_t memoryTypeBits;
1539  void* pUserData;
1541 
1558 VkResult vmaFindMemoryTypeIndex(
1559  VmaAllocator allocator,
1560  uint32_t memoryTypeBits,
1561  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1562  uint32_t* pMemoryTypeIndex);
1563 
1577  VmaAllocator allocator,
1578  const VkBufferCreateInfo* pBufferCreateInfo,
1579  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1580  uint32_t* pMemoryTypeIndex);
1581 
1595  VmaAllocator allocator,
1596  const VkImageCreateInfo* pImageCreateInfo,
1597  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1598  uint32_t* pMemoryTypeIndex);
1599 
1620 
1623 typedef VkFlags VmaPoolCreateFlags;
1624 
1627 typedef struct VmaPoolCreateInfo {
1633  VmaPoolCreateFlags flags;
1638  VkDeviceSize blockSize;
1667 
1670 typedef struct VmaPoolStats {
1673  VkDeviceSize size;
1676  VkDeviceSize unusedSize;
1689  VkDeviceSize unusedRangeSizeMax;
1690 } VmaPoolStats;
1691 
1698 VkResult vmaCreatePool(
1699  VmaAllocator allocator,
1700  const VmaPoolCreateInfo* pCreateInfo,
1701  VmaPool* pPool);
1702 
1705 void vmaDestroyPool(
1706  VmaAllocator allocator,
1707  VmaPool pool);
1708 
1715 void vmaGetPoolStats(
1716  VmaAllocator allocator,
1717  VmaPool pool,
1718  VmaPoolStats* pPoolStats);
1719 
1727  VmaAllocator allocator,
1728  VmaPool pool,
1729  size_t* pLostAllocationCount);
1730 
1755 VK_DEFINE_HANDLE(VmaAllocation)
1756 
1757 
1759 typedef struct VmaAllocationInfo {
1764  uint32_t memoryType;
1773  VkDeviceMemory deviceMemory;
1778  VkDeviceSize offset;
1783  VkDeviceSize size;
1797  void* pUserData;
1799 
1810 VkResult vmaAllocateMemory(
1811  VmaAllocator allocator,
1812  const VkMemoryRequirements* pVkMemoryRequirements,
1813  const VmaAllocationCreateInfo* pCreateInfo,
1814  VmaAllocation* pAllocation,
1815  VmaAllocationInfo* pAllocationInfo);
1816 
1824  VmaAllocator allocator,
1825  VkBuffer buffer,
1826  const VmaAllocationCreateInfo* pCreateInfo,
1827  VmaAllocation* pAllocation,
1828  VmaAllocationInfo* pAllocationInfo);
1829 
1831 VkResult vmaAllocateMemoryForImage(
1832  VmaAllocator allocator,
1833  VkImage image,
1834  const VmaAllocationCreateInfo* pCreateInfo,
1835  VmaAllocation* pAllocation,
1836  VmaAllocationInfo* pAllocationInfo);
1837 
1839 void vmaFreeMemory(
1840  VmaAllocator allocator,
1841  VmaAllocation allocation);
1842 
1860  VmaAllocator allocator,
1861  VmaAllocation allocation,
1862  VmaAllocationInfo* pAllocationInfo);
1863 
1878 VkBool32 vmaTouchAllocation(
1879  VmaAllocator allocator,
1880  VmaAllocation allocation);
1881 
1896  VmaAllocator allocator,
1897  VmaAllocation allocation,
1898  void* pUserData);
1899 
1911  VmaAllocator allocator,
1912  VmaAllocation* pAllocation);
1913 
1948 VkResult vmaMapMemory(
1949  VmaAllocator allocator,
1950  VmaAllocation allocation,
1951  void** ppData);
1952 
1957 void vmaUnmapMemory(
1958  VmaAllocator allocator,
1959  VmaAllocation allocation);
1960 
1962 typedef struct VmaDefragmentationInfo {
1967  VkDeviceSize maxBytesToMove;
1974 
1976 typedef struct VmaDefragmentationStats {
1978  VkDeviceSize bytesMoved;
1980  VkDeviceSize bytesFreed;
1986 
2069 VkResult vmaDefragment(
2070  VmaAllocator allocator,
2071  VmaAllocation* pAllocations,
2072  size_t allocationCount,
2073  VkBool32* pAllocationsChanged,
2074  const VmaDefragmentationInfo *pDefragmentationInfo,
2075  VmaDefragmentationStats* pDefragmentationStats);
2076 
2089 VkResult vmaBindBufferMemory(
2090  VmaAllocator allocator,
2091  VmaAllocation allocation,
2092  VkBuffer buffer);
2093 
2106 VkResult vmaBindImageMemory(
2107  VmaAllocator allocator,
2108  VmaAllocation allocation,
2109  VkImage image);
2110 
2137 VkResult vmaCreateBuffer(
2138  VmaAllocator allocator,
2139  const VkBufferCreateInfo* pBufferCreateInfo,
2140  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2141  VkBuffer* pBuffer,
2142  VmaAllocation* pAllocation,
2143  VmaAllocationInfo* pAllocationInfo);
2144 
2156 void vmaDestroyBuffer(
2157  VmaAllocator allocator,
2158  VkBuffer buffer,
2159  VmaAllocation allocation);
2160 
2162 VkResult vmaCreateImage(
2163  VmaAllocator allocator,
2164  const VkImageCreateInfo* pImageCreateInfo,
2165  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2166  VkImage* pImage,
2167  VmaAllocation* pAllocation,
2168  VmaAllocationInfo* pAllocationInfo);
2169 
2181 void vmaDestroyImage(
2182  VmaAllocator allocator,
2183  VkImage image,
2184  VmaAllocation allocation);
2185 
2186 #ifdef __cplusplus
2187 }
2188 #endif
2189 
2190 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2191 
2192 // For Visual Studio IntelliSense.
2193 #if defined(__cplusplus) && defined(__INTELLISENSE__)
2194 #define VMA_IMPLEMENTATION
2195 #endif
2196 
2197 #ifdef VMA_IMPLEMENTATION
2198 #undef VMA_IMPLEMENTATION
2199 
2200 #include <cstdint>
2201 #include <cstdlib>
2202 #include <cstring>
2203 
2204 /*******************************************************************************
2205 CONFIGURATION SECTION
2206 
2207 Define some of these macros before each #include of this header or change them
2208 here if you need other then default behavior depending on your environment.
2209 */
2210 
2211 /*
2212 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2213 internally, like:
2214 
2215  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2216 
2217 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2218 VmaAllocatorCreateInfo::pVulkanFunctions.
2219 */
2220 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2221 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2222 #endif
2223 
2224 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2225 //#define VMA_USE_STL_CONTAINERS 1
2226 
2227 /* Set this macro to 1 to make the library including and using STL containers:
2228 std::pair, std::vector, std::list, std::unordered_map.
2229 
2230 Set it to 0 or undefined to make the library using its own implementation of
2231 the containers.
2232 */
2233 #if VMA_USE_STL_CONTAINERS
2234  #define VMA_USE_STL_VECTOR 1
2235  #define VMA_USE_STL_UNORDERED_MAP 1
2236  #define VMA_USE_STL_LIST 1
2237 #endif
2238 
2239 #if VMA_USE_STL_VECTOR
2240  #include <vector>
2241 #endif
2242 
2243 #if VMA_USE_STL_UNORDERED_MAP
2244  #include <unordered_map>
2245 #endif
2246 
2247 #if VMA_USE_STL_LIST
2248  #include <list>
2249 #endif
2250 
2251 /*
2252 Following headers are used in this CONFIGURATION section only, so feel free to
2253 remove them if not needed.
2254 */
2255 #include <cassert> // for assert
2256 #include <algorithm> // for min, max
2257 #include <mutex> // for std::mutex
2258 #include <atomic> // for std::atomic
2259 
2260 #if !defined(_WIN32) && !defined(__APPLE__)
2261  #include <malloc.h> // for aligned_alloc()
2262 #endif
2263 
2264 #ifndef VMA_NULL
2265  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2266  #define VMA_NULL nullptr
2267 #endif
2268 
2269 #if defined(__APPLE__) || defined(__ANDROID__)
2270 #include <cstdlib>
2271 void *aligned_alloc(size_t alignment, size_t size)
2272 {
2273  // alignment must be >= sizeof(void*)
2274  if(alignment < sizeof(void*))
2275  {
2276  alignment = sizeof(void*);
2277  }
2278 
2279  void *pointer;
2280  if(posix_memalign(&pointer, alignment, size) == 0)
2281  return pointer;
2282  return VMA_NULL;
2283 }
2284 #endif
2285 
2286 // Normal assert to check for programmer's errors, especially in Debug configuration.
2287 #ifndef VMA_ASSERT
2288  #ifdef _DEBUG
2289  #define VMA_ASSERT(expr) assert(expr)
2290  #else
2291  #define VMA_ASSERT(expr)
2292  #endif
2293 #endif
2294 
2295 // Assert that will be called very often, like inside data structures e.g. operator[].
2296 // Making it non-empty can make program slow.
2297 #ifndef VMA_HEAVY_ASSERT
2298  #ifdef _DEBUG
2299  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2300  #else
2301  #define VMA_HEAVY_ASSERT(expr)
2302  #endif
2303 #endif
2304 
2305 #ifndef VMA_ALIGN_OF
2306  #define VMA_ALIGN_OF(type) (__alignof(type))
2307 #endif
2308 
2309 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2310  #if defined(_WIN32)
2311  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2312  #else
2313  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2314  #endif
2315 #endif
2316 
2317 #ifndef VMA_SYSTEM_FREE
2318  #if defined(_WIN32)
2319  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2320  #else
2321  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2322  #endif
2323 #endif
2324 
2325 #ifndef VMA_MIN
2326  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2327 #endif
2328 
2329 #ifndef VMA_MAX
2330  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2331 #endif
2332 
2333 #ifndef VMA_SWAP
2334  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2335 #endif
2336 
2337 #ifndef VMA_SORT
2338  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2339 #endif
2340 
2341 #ifndef VMA_DEBUG_LOG
2342  #define VMA_DEBUG_LOG(format, ...)
2343  /*
2344  #define VMA_DEBUG_LOG(format, ...) do { \
2345  printf(format, __VA_ARGS__); \
2346  printf("\n"); \
2347  } while(false)
2348  */
2349 #endif
2350 
2351 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2352 #if VMA_STATS_STRING_ENABLED
2353  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2354  {
2355  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2356  }
2357  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2358  {
2359  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2360  }
2361  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2362  {
2363  snprintf(outStr, strLen, "%p", ptr);
2364  }
2365 #endif
2366 
2367 #ifndef VMA_MUTEX
2368  class VmaMutex
2369  {
2370  public:
2371  VmaMutex() { }
2372  ~VmaMutex() { }
2373  void Lock() { m_Mutex.lock(); }
2374  void Unlock() { m_Mutex.unlock(); }
2375  private:
2376  std::mutex m_Mutex;
2377  };
2378  #define VMA_MUTEX VmaMutex
2379 #endif
2380 
2381 /*
2382 If providing your own implementation, you need to implement a subset of std::atomic:
2383 
2384 - Constructor(uint32_t desired)
2385 - uint32_t load() const
2386 - void store(uint32_t desired)
2387 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2388 */
2389 #ifndef VMA_ATOMIC_UINT32
2390  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2391 #endif
2392 
2393 #ifndef VMA_BEST_FIT
2394 
2406  #define VMA_BEST_FIT (1)
2407 #endif
2408 
2409 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2410 
2414  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2415 #endif
2416 
2417 #ifndef VMA_DEBUG_ALIGNMENT
2418 
2422  #define VMA_DEBUG_ALIGNMENT (1)
2423 #endif
2424 
2425 #ifndef VMA_DEBUG_MARGIN
2426 
2430  #define VMA_DEBUG_MARGIN (0)
2431 #endif
2432 
2433 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2434 
2438  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2439 #endif
2440 
2441 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2442 
2446  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2447 #endif
2448 
2449 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2450  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2452 #endif
2453 
2454 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2455  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2457 #endif
2458 
2459 #ifndef VMA_CLASS_NO_COPY
2460  #define VMA_CLASS_NO_COPY(className) \
2461  private: \
2462  className(const className&) = delete; \
2463  className& operator=(const className&) = delete;
2464 #endif
2465 
2466 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2467 
2468 /*******************************************************************************
2469 END OF CONFIGURATION
2470 */
2471 
2472 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2473  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2474 
2475 // Returns number of bits set to 1 in (v).
2476 static inline uint32_t VmaCountBitsSet(uint32_t v)
2477 {
2478  uint32_t c = v - ((v >> 1) & 0x55555555);
2479  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2480  c = ((c >> 4) + c) & 0x0F0F0F0F;
2481  c = ((c >> 8) + c) & 0x00FF00FF;
2482  c = ((c >> 16) + c) & 0x0000FFFF;
2483  return c;
2484 }
2485 
2486 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2487 // Use types like uint32_t, uint64_t as T.
2488 template <typename T>
2489 static inline T VmaAlignUp(T val, T align)
2490 {
2491  return (val + align - 1) / align * align;
2492 }
2493 
2494 // Division with mathematical rounding to nearest number.
2495 template <typename T>
2496 inline T VmaRoundDiv(T x, T y)
2497 {
2498  return (x + (y / (T)2)) / y;
2499 }
2500 
2501 #ifndef VMA_SORT
2502 
2503 template<typename Iterator, typename Compare>
2504 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2505 {
2506  Iterator centerValue = end; --centerValue;
2507  Iterator insertIndex = beg;
2508  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2509  {
2510  if(cmp(*memTypeIndex, *centerValue))
2511  {
2512  if(insertIndex != memTypeIndex)
2513  {
2514  VMA_SWAP(*memTypeIndex, *insertIndex);
2515  }
2516  ++insertIndex;
2517  }
2518  }
2519  if(insertIndex != centerValue)
2520  {
2521  VMA_SWAP(*insertIndex, *centerValue);
2522  }
2523  return insertIndex;
2524 }
2525 
2526 template<typename Iterator, typename Compare>
2527 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2528 {
2529  if(beg < end)
2530  {
2531  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2532  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2533  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2534  }
2535 }
2536 
2537 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2538 
2539 #endif // #ifndef VMA_SORT
2540 
2541 /*
2542 Returns true if two memory blocks occupy overlapping pages.
2543 ResourceA must be in less memory offset than ResourceB.
2544 
2545 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2546 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2547 */
2548 static inline bool VmaBlocksOnSamePage(
2549  VkDeviceSize resourceAOffset,
2550  VkDeviceSize resourceASize,
2551  VkDeviceSize resourceBOffset,
2552  VkDeviceSize pageSize)
2553 {
2554  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2555  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2556  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2557  VkDeviceSize resourceBStart = resourceBOffset;
2558  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2559  return resourceAEndPage == resourceBStartPage;
2560 }
2561 
2562 enum VmaSuballocationType
2563 {
2564  VMA_SUBALLOCATION_TYPE_FREE = 0,
2565  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2566  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2567  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2568  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2569  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2570  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2571 };
2572 
2573 /*
2574 Returns true if given suballocation types could conflict and must respect
2575 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2576 or linear image and another one is optimal image. If type is unknown, behave
2577 conservatively.
2578 */
2579 static inline bool VmaIsBufferImageGranularityConflict(
2580  VmaSuballocationType suballocType1,
2581  VmaSuballocationType suballocType2)
2582 {
2583  if(suballocType1 > suballocType2)
2584  {
2585  VMA_SWAP(suballocType1, suballocType2);
2586  }
2587 
2588  switch(suballocType1)
2589  {
2590  case VMA_SUBALLOCATION_TYPE_FREE:
2591  return false;
2592  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2593  return true;
2594  case VMA_SUBALLOCATION_TYPE_BUFFER:
2595  return
2596  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2597  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2598  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2599  return
2600  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2601  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2602  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2603  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2604  return
2605  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2606  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2607  return false;
2608  default:
2609  VMA_ASSERT(0);
2610  return true;
2611  }
2612 }
2613 
2614 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2615 struct VmaMutexLock
2616 {
2617  VMA_CLASS_NO_COPY(VmaMutexLock)
2618 public:
2619  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2620  m_pMutex(useMutex ? &mutex : VMA_NULL)
2621  {
2622  if(m_pMutex)
2623  {
2624  m_pMutex->Lock();
2625  }
2626  }
2627 
2628  ~VmaMutexLock()
2629  {
2630  if(m_pMutex)
2631  {
2632  m_pMutex->Unlock();
2633  }
2634  }
2635 
2636 private:
2637  VMA_MUTEX* m_pMutex;
2638 };
2639 
2640 #if VMA_DEBUG_GLOBAL_MUTEX
2641  static VMA_MUTEX gDebugGlobalMutex;
2642  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2643 #else
2644  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2645 #endif
2646 
2647 // Minimum size of a free suballocation to register it in the free suballocation collection.
2648 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2649 
2650 /*
2651 Performs binary search and returns iterator to first element that is greater or
2652 equal to (key), according to comparison (cmp).
2653 
2654 Cmp should return true if first argument is less than second argument.
2655 
2656 Returned value is the found element, if present in the collection or place where
2657 new element with value (key) should be inserted.
2658 */
2659 template <typename IterT, typename KeyT, typename CmpT>
2660 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2661 {
2662  size_t down = 0, up = (end - beg);
2663  while(down < up)
2664  {
2665  const size_t mid = (down + up) / 2;
2666  if(cmp(*(beg+mid), key))
2667  {
2668  down = mid + 1;
2669  }
2670  else
2671  {
2672  up = mid;
2673  }
2674  }
2675  return beg + down;
2676 }
2677 
2679 // Memory allocation
2680 
2681 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2682 {
2683  if((pAllocationCallbacks != VMA_NULL) &&
2684  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2685  {
2686  return (*pAllocationCallbacks->pfnAllocation)(
2687  pAllocationCallbacks->pUserData,
2688  size,
2689  alignment,
2690  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2691  }
2692  else
2693  {
2694  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2695  }
2696 }
2697 
2698 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2699 {
2700  if((pAllocationCallbacks != VMA_NULL) &&
2701  (pAllocationCallbacks->pfnFree != VMA_NULL))
2702  {
2703  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2704  }
2705  else
2706  {
2707  VMA_SYSTEM_FREE(ptr);
2708  }
2709 }
2710 
2711 template<typename T>
2712 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2713 {
2714  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2715 }
2716 
2717 template<typename T>
2718 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2719 {
2720  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2721 }
2722 
2723 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2724 
2725 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2726 
2727 template<typename T>
2728 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2729 {
2730  ptr->~T();
2731  VmaFree(pAllocationCallbacks, ptr);
2732 }
2733 
2734 template<typename T>
2735 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2736 {
2737  if(ptr != VMA_NULL)
2738  {
2739  for(size_t i = count; i--; )
2740  {
2741  ptr[i].~T();
2742  }
2743  VmaFree(pAllocationCallbacks, ptr);
2744  }
2745 }
2746 
2747 // STL-compatible allocator.
2748 template<typename T>
2749 class VmaStlAllocator
2750 {
2751 public:
2752  const VkAllocationCallbacks* const m_pCallbacks;
2753  typedef T value_type;
2754 
2755  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2756  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2757 
2758  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2759  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2760 
2761  template<typename U>
2762  bool operator==(const VmaStlAllocator<U>& rhs) const
2763  {
2764  return m_pCallbacks == rhs.m_pCallbacks;
2765  }
2766  template<typename U>
2767  bool operator!=(const VmaStlAllocator<U>& rhs) const
2768  {
2769  return m_pCallbacks != rhs.m_pCallbacks;
2770  }
2771 
2772  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2773 };
2774 
2775 #if VMA_USE_STL_VECTOR
2776 
2777 #define VmaVector std::vector
2778 
2779 template<typename T, typename allocatorT>
2780 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2781 {
2782  vec.insert(vec.begin() + index, item);
2783 }
2784 
2785 template<typename T, typename allocatorT>
2786 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2787 {
2788  vec.erase(vec.begin() + index);
2789 }
2790 
2791 #else // #if VMA_USE_STL_VECTOR
2792 
2793 /* Class with interface compatible with subset of std::vector.
2794 T must be POD because constructors and destructors are not called and memcpy is
2795 used for these objects. */
2796 template<typename T, typename AllocatorT>
2797 class VmaVector
2798 {
2799 public:
2800  typedef T value_type;
2801 
2802  VmaVector(const AllocatorT& allocator) :
2803  m_Allocator(allocator),
2804  m_pArray(VMA_NULL),
2805  m_Count(0),
2806  m_Capacity(0)
2807  {
2808  }
2809 
2810  VmaVector(size_t count, const AllocatorT& allocator) :
2811  m_Allocator(allocator),
2812  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2813  m_Count(count),
2814  m_Capacity(count)
2815  {
2816  }
2817 
2818  VmaVector(const VmaVector<T, AllocatorT>& src) :
2819  m_Allocator(src.m_Allocator),
2820  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2821  m_Count(src.m_Count),
2822  m_Capacity(src.m_Count)
2823  {
2824  if(m_Count != 0)
2825  {
2826  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2827  }
2828  }
2829 
2830  ~VmaVector()
2831  {
2832  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2833  }
2834 
2835  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2836  {
2837  if(&rhs != this)
2838  {
2839  resize(rhs.m_Count);
2840  if(m_Count != 0)
2841  {
2842  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2843  }
2844  }
2845  return *this;
2846  }
2847 
2848  bool empty() const { return m_Count == 0; }
2849  size_t size() const { return m_Count; }
2850  T* data() { return m_pArray; }
2851  const T* data() const { return m_pArray; }
2852 
2853  T& operator[](size_t index)
2854  {
2855  VMA_HEAVY_ASSERT(index < m_Count);
2856  return m_pArray[index];
2857  }
2858  const T& operator[](size_t index) const
2859  {
2860  VMA_HEAVY_ASSERT(index < m_Count);
2861  return m_pArray[index];
2862  }
2863 
2864  T& front()
2865  {
2866  VMA_HEAVY_ASSERT(m_Count > 0);
2867  return m_pArray[0];
2868  }
2869  const T& front() const
2870  {
2871  VMA_HEAVY_ASSERT(m_Count > 0);
2872  return m_pArray[0];
2873  }
2874  T& back()
2875  {
2876  VMA_HEAVY_ASSERT(m_Count > 0);
2877  return m_pArray[m_Count - 1];
2878  }
2879  const T& back() const
2880  {
2881  VMA_HEAVY_ASSERT(m_Count > 0);
2882  return m_pArray[m_Count - 1];
2883  }
2884 
2885  void reserve(size_t newCapacity, bool freeMemory = false)
2886  {
2887  newCapacity = VMA_MAX(newCapacity, m_Count);
2888 
2889  if((newCapacity < m_Capacity) && !freeMemory)
2890  {
2891  newCapacity = m_Capacity;
2892  }
2893 
2894  if(newCapacity != m_Capacity)
2895  {
2896  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2897  if(m_Count != 0)
2898  {
2899  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2900  }
2901  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2902  m_Capacity = newCapacity;
2903  m_pArray = newArray;
2904  }
2905  }
2906 
2907  void resize(size_t newCount, bool freeMemory = false)
2908  {
2909  size_t newCapacity = m_Capacity;
2910  if(newCount > m_Capacity)
2911  {
2912  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2913  }
2914  else if(freeMemory)
2915  {
2916  newCapacity = newCount;
2917  }
2918 
2919  if(newCapacity != m_Capacity)
2920  {
2921  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2922  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2923  if(elementsToCopy != 0)
2924  {
2925  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2926  }
2927  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2928  m_Capacity = newCapacity;
2929  m_pArray = newArray;
2930  }
2931 
2932  m_Count = newCount;
2933  }
2934 
2935  void clear(bool freeMemory = false)
2936  {
2937  resize(0, freeMemory);
2938  }
2939 
2940  void insert(size_t index, const T& src)
2941  {
2942  VMA_HEAVY_ASSERT(index <= m_Count);
2943  const size_t oldCount = size();
2944  resize(oldCount + 1);
2945  if(index < oldCount)
2946  {
2947  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2948  }
2949  m_pArray[index] = src;
2950  }
2951 
2952  void remove(size_t index)
2953  {
2954  VMA_HEAVY_ASSERT(index < m_Count);
2955  const size_t oldCount = size();
2956  if(index < oldCount - 1)
2957  {
2958  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2959  }
2960  resize(oldCount - 1);
2961  }
2962 
2963  void push_back(const T& src)
2964  {
2965  const size_t newIndex = size();
2966  resize(newIndex + 1);
2967  m_pArray[newIndex] = src;
2968  }
2969 
2970  void pop_back()
2971  {
2972  VMA_HEAVY_ASSERT(m_Count > 0);
2973  resize(size() - 1);
2974  }
2975 
2976  void push_front(const T& src)
2977  {
2978  insert(0, src);
2979  }
2980 
2981  void pop_front()
2982  {
2983  VMA_HEAVY_ASSERT(m_Count > 0);
2984  remove(0);
2985  }
2986 
2987  typedef T* iterator;
2988 
2989  iterator begin() { return m_pArray; }
2990  iterator end() { return m_pArray + m_Count; }
2991 
2992 private:
2993  AllocatorT m_Allocator;
2994  T* m_pArray;
2995  size_t m_Count;
2996  size_t m_Capacity;
2997 };
2998 
2999 template<typename T, typename allocatorT>
3000 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
3001 {
3002  vec.insert(index, item);
3003 }
3004 
3005 template<typename T, typename allocatorT>
3006 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
3007 {
3008  vec.remove(index);
3009 }
3010 
3011 #endif // #if VMA_USE_STL_VECTOR
3012 
3013 template<typename CmpLess, typename VectorT>
3014 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3015 {
3016  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3017  vector.data(),
3018  vector.data() + vector.size(),
3019  value,
3020  CmpLess()) - vector.data();
3021  VmaVectorInsert(vector, indexToInsert, value);
3022  return indexToInsert;
3023 }
3024 
3025 template<typename CmpLess, typename VectorT>
3026 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3027 {
3028  CmpLess comparator;
3029  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3030  vector.begin(),
3031  vector.end(),
3032  value,
3033  comparator);
3034  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3035  {
3036  size_t indexToRemove = it - vector.begin();
3037  VmaVectorRemove(vector, indexToRemove);
3038  return true;
3039  }
3040  return false;
3041 }
3042 
3043 template<typename CmpLess, typename VectorT>
3044 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
3045 {
3046  CmpLess comparator;
3047  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3048  vector.data(),
3049  vector.data() + vector.size(),
3050  value,
3051  comparator);
3052  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
3053  {
3054  return it - vector.begin();
3055  }
3056  else
3057  {
3058  return vector.size();
3059  }
3060 }
3061 
3063 // class VmaPoolAllocator
3064 
3065 /*
3066 Allocator for objects of type T using a list of arrays (pools) to speed up
3067 allocation. Number of elements that can be allocated is not bounded because
3068 allocator can create multiple blocks.
3069 */
3070 template<typename T>
3071 class VmaPoolAllocator
3072 {
3073  VMA_CLASS_NO_COPY(VmaPoolAllocator)
3074 public:
3075  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3076  ~VmaPoolAllocator();
3077  void Clear();
3078  T* Alloc();
3079  void Free(T* ptr);
3080 
3081 private:
3082  union Item
3083  {
3084  uint32_t NextFreeIndex;
3085  T Value;
3086  };
3087 
3088  struct ItemBlock
3089  {
3090  Item* pItems;
3091  uint32_t FirstFreeIndex;
3092  };
3093 
3094  const VkAllocationCallbacks* m_pAllocationCallbacks;
3095  size_t m_ItemsPerBlock;
3096  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3097 
3098  ItemBlock& CreateNewBlock();
3099 };
3100 
3101 template<typename T>
3102 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3103  m_pAllocationCallbacks(pAllocationCallbacks),
3104  m_ItemsPerBlock(itemsPerBlock),
3105  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3106 {
3107  VMA_ASSERT(itemsPerBlock > 0);
3108 }
3109 
3110 template<typename T>
3111 VmaPoolAllocator<T>::~VmaPoolAllocator()
3112 {
3113  Clear();
3114 }
3115 
3116 template<typename T>
3117 void VmaPoolAllocator<T>::Clear()
3118 {
3119  for(size_t i = m_ItemBlocks.size(); i--; )
3120  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3121  m_ItemBlocks.clear();
3122 }
3123 
3124 template<typename T>
3125 T* VmaPoolAllocator<T>::Alloc()
3126 {
3127  for(size_t i = m_ItemBlocks.size(); i--; )
3128  {
3129  ItemBlock& block = m_ItemBlocks[i];
3130  // This block has some free items: Use first one.
3131  if(block.FirstFreeIndex != UINT32_MAX)
3132  {
3133  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3134  block.FirstFreeIndex = pItem->NextFreeIndex;
3135  return &pItem->Value;
3136  }
3137  }
3138 
3139  // No block has free item: Create new one and use it.
3140  ItemBlock& newBlock = CreateNewBlock();
3141  Item* const pItem = &newBlock.pItems[0];
3142  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3143  return &pItem->Value;
3144 }
3145 
3146 template<typename T>
3147 void VmaPoolAllocator<T>::Free(T* ptr)
3148 {
3149  // Search all memory blocks to find ptr.
3150  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3151  {
3152  ItemBlock& block = m_ItemBlocks[i];
3153 
3154  // Casting to union.
3155  Item* pItemPtr;
3156  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3157 
3158  // Check if pItemPtr is in address range of this block.
3159  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3160  {
3161  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3162  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3163  block.FirstFreeIndex = index;
3164  return;
3165  }
3166  }
3167  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3168 }
3169 
3170 template<typename T>
3171 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3172 {
3173  ItemBlock newBlock = {
3174  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3175 
3176  m_ItemBlocks.push_back(newBlock);
3177 
3178  // Setup singly-linked list of all free items in this block.
3179  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3180  newBlock.pItems[i].NextFreeIndex = i + 1;
3181  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3182  return m_ItemBlocks.back();
3183 }
3184 
3186 // class VmaRawList, VmaList
3187 
3188 #if VMA_USE_STL_LIST
3189 
3190 #define VmaList std::list
3191 
3192 #else // #if VMA_USE_STL_LIST
3193 
3194 template<typename T>
3195 struct VmaListItem
3196 {
3197  VmaListItem* pPrev;
3198  VmaListItem* pNext;
3199  T Value;
3200 };
3201 
3202 // Doubly linked list.
3203 template<typename T>
3204 class VmaRawList
3205 {
3206  VMA_CLASS_NO_COPY(VmaRawList)
3207 public:
3208  typedef VmaListItem<T> ItemType;
3209 
3210  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3211  ~VmaRawList();
3212  void Clear();
3213 
3214  size_t GetCount() const { return m_Count; }
3215  bool IsEmpty() const { return m_Count == 0; }
3216 
3217  ItemType* Front() { return m_pFront; }
3218  const ItemType* Front() const { return m_pFront; }
3219  ItemType* Back() { return m_pBack; }
3220  const ItemType* Back() const { return m_pBack; }
3221 
3222  ItemType* PushBack();
3223  ItemType* PushFront();
3224  ItemType* PushBack(const T& value);
3225  ItemType* PushFront(const T& value);
3226  void PopBack();
3227  void PopFront();
3228 
3229  // Item can be null - it means PushBack.
3230  ItemType* InsertBefore(ItemType* pItem);
3231  // Item can be null - it means PushFront.
3232  ItemType* InsertAfter(ItemType* pItem);
3233 
3234  ItemType* InsertBefore(ItemType* pItem, const T& value);
3235  ItemType* InsertAfter(ItemType* pItem, const T& value);
3236 
3237  void Remove(ItemType* pItem);
3238 
3239 private:
3240  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3241  VmaPoolAllocator<ItemType> m_ItemAllocator;
3242  ItemType* m_pFront;
3243  ItemType* m_pBack;
3244  size_t m_Count;
3245 };
3246 
3247 template<typename T>
3248 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3249  m_pAllocationCallbacks(pAllocationCallbacks),
3250  m_ItemAllocator(pAllocationCallbacks, 128),
3251  m_pFront(VMA_NULL),
3252  m_pBack(VMA_NULL),
3253  m_Count(0)
3254 {
3255 }
3256 
3257 template<typename T>
3258 VmaRawList<T>::~VmaRawList()
3259 {
3260  // Intentionally not calling Clear, because that would be unnecessary
3261  // computations to return all items to m_ItemAllocator as free.
3262 }
3263 
3264 template<typename T>
3265 void VmaRawList<T>::Clear()
3266 {
3267  if(IsEmpty() == false)
3268  {
3269  ItemType* pItem = m_pBack;
3270  while(pItem != VMA_NULL)
3271  {
3272  ItemType* const pPrevItem = pItem->pPrev;
3273  m_ItemAllocator.Free(pItem);
3274  pItem = pPrevItem;
3275  }
3276  m_pFront = VMA_NULL;
3277  m_pBack = VMA_NULL;
3278  m_Count = 0;
3279  }
3280 }
3281 
3282 template<typename T>
3283 VmaListItem<T>* VmaRawList<T>::PushBack()
3284 {
3285  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3286  pNewItem->pNext = VMA_NULL;
3287  if(IsEmpty())
3288  {
3289  pNewItem->pPrev = VMA_NULL;
3290  m_pFront = pNewItem;
3291  m_pBack = pNewItem;
3292  m_Count = 1;
3293  }
3294  else
3295  {
3296  pNewItem->pPrev = m_pBack;
3297  m_pBack->pNext = pNewItem;
3298  m_pBack = pNewItem;
3299  ++m_Count;
3300  }
3301  return pNewItem;
3302 }
3303 
3304 template<typename T>
3305 VmaListItem<T>* VmaRawList<T>::PushFront()
3306 {
3307  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3308  pNewItem->pPrev = VMA_NULL;
3309  if(IsEmpty())
3310  {
3311  pNewItem->pNext = VMA_NULL;
3312  m_pFront = pNewItem;
3313  m_pBack = pNewItem;
3314  m_Count = 1;
3315  }
3316  else
3317  {
3318  pNewItem->pNext = m_pFront;
3319  m_pFront->pPrev = pNewItem;
3320  m_pFront = pNewItem;
3321  ++m_Count;
3322  }
3323  return pNewItem;
3324 }
3325 
3326 template<typename T>
3327 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3328 {
3329  ItemType* const pNewItem = PushBack();
3330  pNewItem->Value = value;
3331  return pNewItem;
3332 }
3333 
3334 template<typename T>
3335 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3336 {
3337  ItemType* const pNewItem = PushFront();
3338  pNewItem->Value = value;
3339  return pNewItem;
3340 }
3341 
3342 template<typename T>
3343 void VmaRawList<T>::PopBack()
3344 {
3345  VMA_HEAVY_ASSERT(m_Count > 0);
3346  ItemType* const pBackItem = m_pBack;
3347  ItemType* const pPrevItem = pBackItem->pPrev;
3348  if(pPrevItem != VMA_NULL)
3349  {
3350  pPrevItem->pNext = VMA_NULL;
3351  }
3352  m_pBack = pPrevItem;
3353  m_ItemAllocator.Free(pBackItem);
3354  --m_Count;
3355 }
3356 
3357 template<typename T>
3358 void VmaRawList<T>::PopFront()
3359 {
3360  VMA_HEAVY_ASSERT(m_Count > 0);
3361  ItemType* const pFrontItem = m_pFront;
3362  ItemType* const pNextItem = pFrontItem->pNext;
3363  if(pNextItem != VMA_NULL)
3364  {
3365  pNextItem->pPrev = VMA_NULL;
3366  }
3367  m_pFront = pNextItem;
3368  m_ItemAllocator.Free(pFrontItem);
3369  --m_Count;
3370 }
3371 
3372 template<typename T>
3373 void VmaRawList<T>::Remove(ItemType* pItem)
3374 {
3375  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3376  VMA_HEAVY_ASSERT(m_Count > 0);
3377 
3378  if(pItem->pPrev != VMA_NULL)
3379  {
3380  pItem->pPrev->pNext = pItem->pNext;
3381  }
3382  else
3383  {
3384  VMA_HEAVY_ASSERT(m_pFront == pItem);
3385  m_pFront = pItem->pNext;
3386  }
3387 
3388  if(pItem->pNext != VMA_NULL)
3389  {
3390  pItem->pNext->pPrev = pItem->pPrev;
3391  }
3392  else
3393  {
3394  VMA_HEAVY_ASSERT(m_pBack == pItem);
3395  m_pBack = pItem->pPrev;
3396  }
3397 
3398  m_ItemAllocator.Free(pItem);
3399  --m_Count;
3400 }
3401 
3402 template<typename T>
3403 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3404 {
3405  if(pItem != VMA_NULL)
3406  {
3407  ItemType* const prevItem = pItem->pPrev;
3408  ItemType* const newItem = m_ItemAllocator.Alloc();
3409  newItem->pPrev = prevItem;
3410  newItem->pNext = pItem;
3411  pItem->pPrev = newItem;
3412  if(prevItem != VMA_NULL)
3413  {
3414  prevItem->pNext = newItem;
3415  }
3416  else
3417  {
3418  VMA_HEAVY_ASSERT(m_pFront == pItem);
3419  m_pFront = newItem;
3420  }
3421  ++m_Count;
3422  return newItem;
3423  }
3424  else
3425  return PushBack();
3426 }
3427 
3428 template<typename T>
3429 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3430 {
3431  if(pItem != VMA_NULL)
3432  {
3433  ItemType* const nextItem = pItem->pNext;
3434  ItemType* const newItem = m_ItemAllocator.Alloc();
3435  newItem->pNext = nextItem;
3436  newItem->pPrev = pItem;
3437  pItem->pNext = newItem;
3438  if(nextItem != VMA_NULL)
3439  {
3440  nextItem->pPrev = newItem;
3441  }
3442  else
3443  {
3444  VMA_HEAVY_ASSERT(m_pBack == pItem);
3445  m_pBack = newItem;
3446  }
3447  ++m_Count;
3448  return newItem;
3449  }
3450  else
3451  return PushFront();
3452 }
3453 
3454 template<typename T>
3455 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3456 {
3457  ItemType* const newItem = InsertBefore(pItem);
3458  newItem->Value = value;
3459  return newItem;
3460 }
3461 
3462 template<typename T>
3463 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3464 {
3465  ItemType* const newItem = InsertAfter(pItem);
3466  newItem->Value = value;
3467  return newItem;
3468 }
3469 
3470 template<typename T, typename AllocatorT>
3471 class VmaList
3472 {
3473  VMA_CLASS_NO_COPY(VmaList)
3474 public:
3475  class iterator
3476  {
3477  public:
3478  iterator() :
3479  m_pList(VMA_NULL),
3480  m_pItem(VMA_NULL)
3481  {
3482  }
3483 
3484  T& operator*() const
3485  {
3486  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3487  return m_pItem->Value;
3488  }
3489  T* operator->() const
3490  {
3491  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3492  return &m_pItem->Value;
3493  }
3494 
3495  iterator& operator++()
3496  {
3497  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3498  m_pItem = m_pItem->pNext;
3499  return *this;
3500  }
3501  iterator& operator--()
3502  {
3503  if(m_pItem != VMA_NULL)
3504  {
3505  m_pItem = m_pItem->pPrev;
3506  }
3507  else
3508  {
3509  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3510  m_pItem = m_pList->Back();
3511  }
3512  return *this;
3513  }
3514 
3515  iterator operator++(int)
3516  {
3517  iterator result = *this;
3518  ++*this;
3519  return result;
3520  }
3521  iterator operator--(int)
3522  {
3523  iterator result = *this;
3524  --*this;
3525  return result;
3526  }
3527 
3528  bool operator==(const iterator& rhs) const
3529  {
3530  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3531  return m_pItem == rhs.m_pItem;
3532  }
3533  bool operator!=(const iterator& rhs) const
3534  {
3535  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3536  return m_pItem != rhs.m_pItem;
3537  }
3538 
3539  private:
3540  VmaRawList<T>* m_pList;
3541  VmaListItem<T>* m_pItem;
3542 
3543  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3544  m_pList(pList),
3545  m_pItem(pItem)
3546  {
3547  }
3548 
3549  friend class VmaList<T, AllocatorT>;
3550  };
3551 
3552  class const_iterator
3553  {
3554  public:
3555  const_iterator() :
3556  m_pList(VMA_NULL),
3557  m_pItem(VMA_NULL)
3558  {
3559  }
3560 
3561  const_iterator(const iterator& src) :
3562  m_pList(src.m_pList),
3563  m_pItem(src.m_pItem)
3564  {
3565  }
3566 
3567  const T& operator*() const
3568  {
3569  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3570  return m_pItem->Value;
3571  }
3572  const T* operator->() const
3573  {
3574  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3575  return &m_pItem->Value;
3576  }
3577 
3578  const_iterator& operator++()
3579  {
3580  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3581  m_pItem = m_pItem->pNext;
3582  return *this;
3583  }
3584  const_iterator& operator--()
3585  {
3586  if(m_pItem != VMA_NULL)
3587  {
3588  m_pItem = m_pItem->pPrev;
3589  }
3590  else
3591  {
3592  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3593  m_pItem = m_pList->Back();
3594  }
3595  return *this;
3596  }
3597 
3598  const_iterator operator++(int)
3599  {
3600  const_iterator result = *this;
3601  ++*this;
3602  return result;
3603  }
3604  const_iterator operator--(int)
3605  {
3606  const_iterator result = *this;
3607  --*this;
3608  return result;
3609  }
3610 
3611  bool operator==(const const_iterator& rhs) const
3612  {
3613  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3614  return m_pItem == rhs.m_pItem;
3615  }
3616  bool operator!=(const const_iterator& rhs) const
3617  {
3618  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3619  return m_pItem != rhs.m_pItem;
3620  }
3621 
3622  private:
3623  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3624  m_pList(pList),
3625  m_pItem(pItem)
3626  {
3627  }
3628 
3629  const VmaRawList<T>* m_pList;
3630  const VmaListItem<T>* m_pItem;
3631 
3632  friend class VmaList<T, AllocatorT>;
3633  };
3634 
3635  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3636 
3637  bool empty() const { return m_RawList.IsEmpty(); }
3638  size_t size() const { return m_RawList.GetCount(); }
3639 
3640  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3641  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3642 
3643  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3644  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3645 
3646  void clear() { m_RawList.Clear(); }
3647  void push_back(const T& value) { m_RawList.PushBack(value); }
3648  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3649  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3650 
3651 private:
3652  VmaRawList<T> m_RawList;
3653 };
3654 
3655 #endif // #if VMA_USE_STL_LIST
3656 
3658 // class VmaMap
3659 
3660 // Unused in this version.
3661 #if 0
3662 
3663 #if VMA_USE_STL_UNORDERED_MAP
3664 
3665 #define VmaPair std::pair
3666 
3667 #define VMA_MAP_TYPE(KeyT, ValueT) \
3668  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3669 
3670 #else // #if VMA_USE_STL_UNORDERED_MAP
3671 
3672 template<typename T1, typename T2>
3673 struct VmaPair
3674 {
3675  T1 first;
3676  T2 second;
3677 
3678  VmaPair() : first(), second() { }
3679  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3680 };
3681 
3682 /* Class compatible with subset of interface of std::unordered_map.
3683 KeyT, ValueT must be POD because they will be stored in VmaVector.
3684 */
3685 template<typename KeyT, typename ValueT>
3686 class VmaMap
3687 {
3688 public:
3689  typedef VmaPair<KeyT, ValueT> PairType;
3690  typedef PairType* iterator;
3691 
3692  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3693 
3694  iterator begin() { return m_Vector.begin(); }
3695  iterator end() { return m_Vector.end(); }
3696 
3697  void insert(const PairType& pair);
3698  iterator find(const KeyT& key);
3699  void erase(iterator it);
3700 
3701 private:
3702  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3703 };
3704 
3705 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3706 
3707 template<typename FirstT, typename SecondT>
3708 struct VmaPairFirstLess
3709 {
3710  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3711  {
3712  return lhs.first < rhs.first;
3713  }
3714  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3715  {
3716  return lhs.first < rhsFirst;
3717  }
3718 };
3719 
3720 template<typename KeyT, typename ValueT>
3721 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3722 {
3723  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3724  m_Vector.data(),
3725  m_Vector.data() + m_Vector.size(),
3726  pair,
3727  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3728  VmaVectorInsert(m_Vector, indexToInsert, pair);
3729 }
3730 
3731 template<typename KeyT, typename ValueT>
3732 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3733 {
3734  PairType* it = VmaBinaryFindFirstNotLess(
3735  m_Vector.data(),
3736  m_Vector.data() + m_Vector.size(),
3737  key,
3738  VmaPairFirstLess<KeyT, ValueT>());
3739  if((it != m_Vector.end()) && (it->first == key))
3740  {
3741  return it;
3742  }
3743  else
3744  {
3745  return m_Vector.end();
3746  }
3747 }
3748 
3749 template<typename KeyT, typename ValueT>
3750 void VmaMap<KeyT, ValueT>::erase(iterator it)
3751 {
3752  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3753 }
3754 
3755 #endif // #if VMA_USE_STL_UNORDERED_MAP
3756 
3757 #endif // #if 0
3758 
3760 
3761 class VmaDeviceMemoryBlock;
3762 
3763 struct VmaAllocation_T
3764 {
3765  VMA_CLASS_NO_COPY(VmaAllocation_T)
3766 private:
3767  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3768 
3769  enum FLAGS
3770  {
3771  FLAG_USER_DATA_STRING = 0x01,
3772  };
3773 
3774 public:
3775  enum ALLOCATION_TYPE
3776  {
3777  ALLOCATION_TYPE_NONE,
3778  ALLOCATION_TYPE_BLOCK,
3779  ALLOCATION_TYPE_DEDICATED,
3780  };
3781 
3782  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3783  m_Alignment(1),
3784  m_Size(0),
3785  m_pUserData(VMA_NULL),
3786  m_LastUseFrameIndex(currentFrameIndex),
3787  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3788  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3789  m_MapCount(0),
3790  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3791  {
3792 #if VMA_STATS_STRING_ENABLED
3793  m_CreationFrameIndex = currentFrameIndex;
3794  m_BufferImageUsage = 0;
3795 #endif
3796  }
3797 
3798  ~VmaAllocation_T()
3799  {
3800  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3801 
3802  // Check if owned string was freed.
3803  VMA_ASSERT(m_pUserData == VMA_NULL);
3804  }
3805 
3806  void InitBlockAllocation(
3807  VmaPool hPool,
3808  VmaDeviceMemoryBlock* block,
3809  VkDeviceSize offset,
3810  VkDeviceSize alignment,
3811  VkDeviceSize size,
3812  VmaSuballocationType suballocationType,
3813  bool mapped,
3814  bool canBecomeLost)
3815  {
3816  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3817  VMA_ASSERT(block != VMA_NULL);
3818  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3819  m_Alignment = alignment;
3820  m_Size = size;
3821  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3822  m_SuballocationType = (uint8_t)suballocationType;
3823  m_BlockAllocation.m_hPool = hPool;
3824  m_BlockAllocation.m_Block = block;
3825  m_BlockAllocation.m_Offset = offset;
3826  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3827  }
3828 
3829  void InitLost()
3830  {
3831  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3832  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3833  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3834  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3835  m_BlockAllocation.m_Block = VMA_NULL;
3836  m_BlockAllocation.m_Offset = 0;
3837  m_BlockAllocation.m_CanBecomeLost = true;
3838  }
3839 
3840  void ChangeBlockAllocation(
3841  VmaAllocator hAllocator,
3842  VmaDeviceMemoryBlock* block,
3843  VkDeviceSize offset);
3844 
3845  // pMappedData not null means allocation is created with MAPPED flag.
3846  void InitDedicatedAllocation(
3847  uint32_t memoryTypeIndex,
3848  VkDeviceMemory hMemory,
3849  VmaSuballocationType suballocationType,
3850  void* pMappedData,
3851  VkDeviceSize size)
3852  {
3853  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3854  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3855  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3856  m_Alignment = 0;
3857  m_Size = size;
3858  m_SuballocationType = (uint8_t)suballocationType;
3859  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3860  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3861  m_DedicatedAllocation.m_hMemory = hMemory;
3862  m_DedicatedAllocation.m_pMappedData = pMappedData;
3863  }
3864 
3865  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3866  VkDeviceSize GetAlignment() const { return m_Alignment; }
3867  VkDeviceSize GetSize() const { return m_Size; }
3868  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3869  void* GetUserData() const { return m_pUserData; }
3870  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3871  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3872 
3873  VmaDeviceMemoryBlock* GetBlock() const
3874  {
3875  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3876  return m_BlockAllocation.m_Block;
3877  }
3878  VkDeviceSize GetOffset() const;
3879  VkDeviceMemory GetMemory() const;
3880  uint32_t GetMemoryTypeIndex() const;
3881  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3882  void* GetMappedData() const;
3883  bool CanBecomeLost() const;
3884  VmaPool GetPool() const;
3885 
3886  uint32_t GetLastUseFrameIndex() const
3887  {
3888  return m_LastUseFrameIndex.load();
3889  }
3890  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3891  {
3892  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3893  }
3894  /*
3895  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3896  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3897  - Else, returns false.
3898 
3899  If hAllocation is already lost, assert - you should not call it then.
3900  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3901  */
3902  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3903 
3904  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3905  {
3906  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3907  outInfo.blockCount = 1;
3908  outInfo.allocationCount = 1;
3909  outInfo.unusedRangeCount = 0;
3910  outInfo.usedBytes = m_Size;
3911  outInfo.unusedBytes = 0;
3912  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3913  outInfo.unusedRangeSizeMin = UINT64_MAX;
3914  outInfo.unusedRangeSizeMax = 0;
3915  }
3916 
3917  void BlockAllocMap();
3918  void BlockAllocUnmap();
3919  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3920  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3921 
3922 #if VMA_STATS_STRING_ENABLED
3923  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
3924  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
3925 
3926  void InitBufferImageUsage(uint32_t bufferImageUsage)
3927  {
3928  VMA_ASSERT(m_BufferImageUsage == 0);
3929  m_BufferImageUsage = bufferImageUsage;
3930  }
3931 
3932  void PrintParameters(class VmaJsonWriter& json) const;
3933 #endif
3934 
3935 private:
3936  VkDeviceSize m_Alignment;
3937  VkDeviceSize m_Size;
3938  void* m_pUserData;
3939  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3940  uint8_t m_Type; // ALLOCATION_TYPE
3941  uint8_t m_SuballocationType; // VmaSuballocationType
3942  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3943  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3944  uint8_t m_MapCount;
3945  uint8_t m_Flags; // enum FLAGS
3946 
3947  // Allocation out of VmaDeviceMemoryBlock.
3948  struct BlockAllocation
3949  {
3950  VmaPool m_hPool; // Null if belongs to general memory.
3951  VmaDeviceMemoryBlock* m_Block;
3952  VkDeviceSize m_Offset;
3953  bool m_CanBecomeLost;
3954  };
3955 
3956  // Allocation for an object that has its own private VkDeviceMemory.
3957  struct DedicatedAllocation
3958  {
3959  uint32_t m_MemoryTypeIndex;
3960  VkDeviceMemory m_hMemory;
3961  void* m_pMappedData; // Not null means memory is mapped.
3962  };
3963 
3964  union
3965  {
3966  // Allocation out of VmaDeviceMemoryBlock.
3967  BlockAllocation m_BlockAllocation;
3968  // Allocation for an object that has its own private VkDeviceMemory.
3969  DedicatedAllocation m_DedicatedAllocation;
3970  };
3971 
3972 #if VMA_STATS_STRING_ENABLED
3973  uint32_t m_CreationFrameIndex;
3974  uint32_t m_BufferImageUsage; // 0 if unknown.
3975 #endif
3976 
3977  void FreeUserDataString(VmaAllocator hAllocator);
3978 };
3979 
3980 /*
3981 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3982 allocated memory block or free.
3983 */
3984 struct VmaSuballocation
3985 {
3986  VkDeviceSize offset;
3987  VkDeviceSize size;
3988  VmaAllocation hAllocation;
3989  VmaSuballocationType type;
3990 };
3991 
3992 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3993 
3994 // Cost of one additional allocation lost, as equivalent in bytes.
3995 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3996 
3997 /*
3998 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3999 
4000 If canMakeOtherLost was false:
4001 - item points to a FREE suballocation.
4002 - itemsToMakeLostCount is 0.
4003 
4004 If canMakeOtherLost was true:
4005 - item points to first of sequence of suballocations, which are either FREE,
4006  or point to VmaAllocations that can become lost.
4007 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
4008  the requested allocation to succeed.
4009 */
4010 struct VmaAllocationRequest
4011 {
4012  VkDeviceSize offset;
4013  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
4014  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
4015  VmaSuballocationList::iterator item;
4016  size_t itemsToMakeLostCount;
4017 
4018  VkDeviceSize CalcCost() const
4019  {
4020  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
4021  }
4022 };
4023 
4024 /*
4025 Data structure used for bookkeeping of allocations and unused ranges of memory
4026 in a single VkDeviceMemory block.
4027 */
4028 class VmaBlockMetadata
4029 {
4030  VMA_CLASS_NO_COPY(VmaBlockMetadata)
4031 public:
4032  VmaBlockMetadata(VmaAllocator hAllocator);
4033  ~VmaBlockMetadata();
4034  void Init(VkDeviceSize size);
4035 
4036  // Validates all data structures inside this object. If not valid, returns false.
4037  bool Validate() const;
4038  VkDeviceSize GetSize() const { return m_Size; }
4039  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
4040  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
4041  VkDeviceSize GetUnusedRangeSizeMax() const;
4042  // Returns true if this block is empty - contains only single free suballocation.
4043  bool IsEmpty() const;
4044 
4045  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
4046  void AddPoolStats(VmaPoolStats& inoutStats) const;
4047 
4048 #if VMA_STATS_STRING_ENABLED
4049  void PrintDetailedMap(class VmaJsonWriter& json) const;
4050 #endif
4051 
4052  // Creates trivial request for case when block is empty.
4053  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
4054 
4055  // Tries to find a place for suballocation with given parameters inside this block.
4056  // If succeeded, fills pAllocationRequest and returns true.
4057  // If failed, returns false.
4058  bool CreateAllocationRequest(
4059  uint32_t currentFrameIndex,
4060  uint32_t frameInUseCount,
4061  VkDeviceSize bufferImageGranularity,
4062  VkDeviceSize allocSize,
4063  VkDeviceSize allocAlignment,
4064  VmaSuballocationType allocType,
4065  bool canMakeOtherLost,
4066  VmaAllocationRequest* pAllocationRequest);
4067 
4068  bool MakeRequestedAllocationsLost(
4069  uint32_t currentFrameIndex,
4070  uint32_t frameInUseCount,
4071  VmaAllocationRequest* pAllocationRequest);
4072 
4073  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4074 
4075  // Makes actual allocation based on request. Request must already be checked and valid.
4076  void Alloc(
4077  const VmaAllocationRequest& request,
4078  VmaSuballocationType type,
4079  VkDeviceSize allocSize,
4080  VmaAllocation hAllocation);
4081 
4082  // Frees suballocation assigned to given memory region.
4083  void Free(const VmaAllocation allocation);
4084  void FreeAtOffset(VkDeviceSize offset);
4085 
4086 private:
4087  VkDeviceSize m_Size;
4088  uint32_t m_FreeCount;
4089  VkDeviceSize m_SumFreeSize;
4090  VmaSuballocationList m_Suballocations;
4091  // Suballocations that are free and have size greater than certain threshold.
4092  // Sorted by size, ascending.
4093  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4094 
4095  bool ValidateFreeSuballocationList() const;
4096 
4097  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4098  // If yes, fills pOffset and returns true. If no, returns false.
4099  bool CheckAllocation(
4100  uint32_t currentFrameIndex,
4101  uint32_t frameInUseCount,
4102  VkDeviceSize bufferImageGranularity,
4103  VkDeviceSize allocSize,
4104  VkDeviceSize allocAlignment,
4105  VmaSuballocationType allocType,
4106  VmaSuballocationList::const_iterator suballocItem,
4107  bool canMakeOtherLost,
4108  VkDeviceSize* pOffset,
4109  size_t* itemsToMakeLostCount,
4110  VkDeviceSize* pSumFreeSize,
4111  VkDeviceSize* pSumItemSize) const;
4112  // Given free suballocation, it merges it with following one, which must also be free.
4113  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4114  // Releases given suballocation, making it free.
4115  // Merges it with adjacent free suballocations if applicable.
4116  // Returns iterator to new free suballocation at this place.
4117  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4118  // Given free suballocation, it inserts it into sorted list of
4119  // m_FreeSuballocationsBySize if it's suitable.
4120  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4121  // Given free suballocation, it removes it from sorted list of
4122  // m_FreeSuballocationsBySize if it's suitable.
4123  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4124 };
4125 
4126 /*
4127 Represents a single block of device memory (`VkDeviceMemory`) with all the
4128 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4129 
4130 Thread-safety: This class must be externally synchronized.
4131 */
4132 class VmaDeviceMemoryBlock
4133 {
4134  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
4135 public:
4136  VmaBlockMetadata m_Metadata;
4137 
4138  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4139 
4140  ~VmaDeviceMemoryBlock()
4141  {
4142  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4143  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4144  }
4145 
4146  // Always call after construction.
4147  void Init(
4148  uint32_t newMemoryTypeIndex,
4149  VkDeviceMemory newMemory,
4150  VkDeviceSize newSize);
4151  // Always call before destruction.
4152  void Destroy(VmaAllocator allocator);
4153 
4154  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4155  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4156  void* GetMappedData() const { return m_pMappedData; }
4157 
4158  // Validates all data structures inside this object. If not valid, returns false.
4159  bool Validate() const;
4160 
4161  // ppData can be null.
4162  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4163  void Unmap(VmaAllocator hAllocator, uint32_t count);
4164 
4165  VkResult BindBufferMemory(
4166  const VmaAllocator hAllocator,
4167  const VmaAllocation hAllocation,
4168  VkBuffer hBuffer);
4169  VkResult BindImageMemory(
4170  const VmaAllocator hAllocator,
4171  const VmaAllocation hAllocation,
4172  VkImage hImage);
4173 
4174 private:
4175  uint32_t m_MemoryTypeIndex;
4176  VkDeviceMemory m_hMemory;
4177 
4178  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4179  // Also protects m_MapCount, m_pMappedData.
4180  VMA_MUTEX m_Mutex;
4181  uint32_t m_MapCount;
4182  void* m_pMappedData;
4183 };
4184 
4185 struct VmaPointerLess
4186 {
4187  bool operator()(const void* lhs, const void* rhs) const
4188  {
4189  return lhs < rhs;
4190  }
4191 };
4192 
4193 class VmaDefragmentator;
4194 
4195 /*
4196 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4197 Vulkan memory type.
4198 
4199 Synchronized internally with a mutex.
4200 */
4201 struct VmaBlockVector
4202 {
4203  VMA_CLASS_NO_COPY(VmaBlockVector)
4204 public:
4205  VmaBlockVector(
4206  VmaAllocator hAllocator,
4207  uint32_t memoryTypeIndex,
4208  VkDeviceSize preferredBlockSize,
4209  size_t minBlockCount,
4210  size_t maxBlockCount,
4211  VkDeviceSize bufferImageGranularity,
4212  uint32_t frameInUseCount,
4213  bool isCustomPool);
4214  ~VmaBlockVector();
4215 
4216  VkResult CreateMinBlocks();
4217 
4218  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4219  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4220  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4221  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4222 
4223  void GetPoolStats(VmaPoolStats* pStats);
4224 
4225  bool IsEmpty() const { return m_Blocks.empty(); }
4226 
4227  VkResult Allocate(
4228  VmaPool hCurrentPool,
4229  uint32_t currentFrameIndex,
4230  const VkMemoryRequirements& vkMemReq,
4231  const VmaAllocationCreateInfo& createInfo,
4232  VmaSuballocationType suballocType,
4233  VmaAllocation* pAllocation);
4234 
4235  void Free(
4236  VmaAllocation hAllocation);
4237 
4238  // Adds statistics of this BlockVector to pStats.
4239  void AddStats(VmaStats* pStats);
4240 
4241 #if VMA_STATS_STRING_ENABLED
4242  void PrintDetailedMap(class VmaJsonWriter& json);
4243 #endif
4244 
4245  void MakePoolAllocationsLost(
4246  uint32_t currentFrameIndex,
4247  size_t* pLostAllocationCount);
4248 
4249  VmaDefragmentator* EnsureDefragmentator(
4250  VmaAllocator hAllocator,
4251  uint32_t currentFrameIndex);
4252 
4253  VkResult Defragment(
4254  VmaDefragmentationStats* pDefragmentationStats,
4255  VkDeviceSize& maxBytesToMove,
4256  uint32_t& maxAllocationsToMove);
4257 
4258  void DestroyDefragmentator();
4259 
4260 private:
4261  friend class VmaDefragmentator;
4262 
4263  const VmaAllocator m_hAllocator;
4264  const uint32_t m_MemoryTypeIndex;
4265  const VkDeviceSize m_PreferredBlockSize;
4266  const size_t m_MinBlockCount;
4267  const size_t m_MaxBlockCount;
4268  const VkDeviceSize m_BufferImageGranularity;
4269  const uint32_t m_FrameInUseCount;
4270  const bool m_IsCustomPool;
4271  VMA_MUTEX m_Mutex;
4272  // Incrementally sorted by sumFreeSize, ascending.
4273  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4274  /* There can be at most one allocation that is completely empty - a
4275  hysteresis to avoid pessimistic case of alternating creation and destruction
4276  of a VkDeviceMemory. */
4277  bool m_HasEmptyBlock;
4278  VmaDefragmentator* m_pDefragmentator;
4279 
4280  VkDeviceSize CalcMaxBlockSize() const;
4281 
4282  // Finds and removes given block from vector.
4283  void Remove(VmaDeviceMemoryBlock* pBlock);
4284 
4285  // Performs single step in sorting m_Blocks. They may not be fully sorted
4286  // after this call.
4287  void IncrementallySortBlocks();
4288 
4289  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4290 };
4291 
4292 struct VmaPool_T
4293 {
4294  VMA_CLASS_NO_COPY(VmaPool_T)
4295 public:
4296  VmaBlockVector m_BlockVector;
4297 
4298  // Takes ownership.
4299  VmaPool_T(
4300  VmaAllocator hAllocator,
4301  const VmaPoolCreateInfo& createInfo);
4302  ~VmaPool_T();
4303 
4304  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4305 
4306 #if VMA_STATS_STRING_ENABLED
4307  //void PrintDetailedMap(class VmaStringBuilder& sb);
4308 #endif
4309 };
4310 
4311 class VmaDefragmentator
4312 {
4313  VMA_CLASS_NO_COPY(VmaDefragmentator)
4314 private:
4315  const VmaAllocator m_hAllocator;
4316  VmaBlockVector* const m_pBlockVector;
4317  uint32_t m_CurrentFrameIndex;
4318  VkDeviceSize m_BytesMoved;
4319  uint32_t m_AllocationsMoved;
4320 
4321  struct AllocationInfo
4322  {
4323  VmaAllocation m_hAllocation;
4324  VkBool32* m_pChanged;
4325 
4326  AllocationInfo() :
4327  m_hAllocation(VK_NULL_HANDLE),
4328  m_pChanged(VMA_NULL)
4329  {
4330  }
4331  };
4332 
4333  struct AllocationInfoSizeGreater
4334  {
4335  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4336  {
4337  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4338  }
4339  };
4340 
4341  // Used between AddAllocation and Defragment.
4342  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4343 
4344  struct BlockInfo
4345  {
4346  VmaDeviceMemoryBlock* m_pBlock;
4347  bool m_HasNonMovableAllocations;
4348  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4349 
4350  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4351  m_pBlock(VMA_NULL),
4352  m_HasNonMovableAllocations(true),
4353  m_Allocations(pAllocationCallbacks),
4354  m_pMappedDataForDefragmentation(VMA_NULL)
4355  {
4356  }
4357 
4358  void CalcHasNonMovableAllocations()
4359  {
4360  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4361  const size_t defragmentAllocCount = m_Allocations.size();
4362  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4363  }
4364 
4365  void SortAllocationsBySizeDescecnding()
4366  {
4367  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4368  }
4369 
4370  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4371  void Unmap(VmaAllocator hAllocator);
4372 
4373  private:
4374  // Not null if mapped for defragmentation only, not originally mapped.
4375  void* m_pMappedDataForDefragmentation;
4376  };
4377 
4378  struct BlockPointerLess
4379  {
4380  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4381  {
4382  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4383  }
4384  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4385  {
4386  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4387  }
4388  };
4389 
4390  // 1. Blocks with some non-movable allocations go first.
4391  // 2. Blocks with smaller sumFreeSize go first.
4392  struct BlockInfoCompareMoveDestination
4393  {
4394  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4395  {
4396  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4397  {
4398  return true;
4399  }
4400  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4401  {
4402  return false;
4403  }
4404  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4405  {
4406  return true;
4407  }
4408  return false;
4409  }
4410  };
4411 
4412  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4413  BlockInfoVector m_Blocks;
4414 
4415  VkResult DefragmentRound(
4416  VkDeviceSize maxBytesToMove,
4417  uint32_t maxAllocationsToMove);
4418 
4419  static bool MoveMakesSense(
4420  size_t dstBlockIndex, VkDeviceSize dstOffset,
4421  size_t srcBlockIndex, VkDeviceSize srcOffset);
4422 
4423 public:
4424  VmaDefragmentator(
4425  VmaAllocator hAllocator,
4426  VmaBlockVector* pBlockVector,
4427  uint32_t currentFrameIndex);
4428 
4429  ~VmaDefragmentator();
4430 
4431  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4432  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4433 
4434  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4435 
4436  VkResult Defragment(
4437  VkDeviceSize maxBytesToMove,
4438  uint32_t maxAllocationsToMove);
4439 };
4440 
4441 // Main allocator object.
4442 struct VmaAllocator_T
4443 {
4444  VMA_CLASS_NO_COPY(VmaAllocator_T)
4445 public:
4446  bool m_UseMutex;
4447  bool m_UseKhrDedicatedAllocation;
4448  VkDevice m_hDevice;
4449  bool m_AllocationCallbacksSpecified;
4450  VkAllocationCallbacks m_AllocationCallbacks;
4451  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4452 
4453  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4454  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4455  VMA_MUTEX m_HeapSizeLimitMutex;
4456 
4457  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4458  VkPhysicalDeviceMemoryProperties m_MemProps;
4459 
4460  // Default pools.
4461  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4462 
4463  // Each vector is sorted by memory (handle value).
4464  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4465  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4466  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4467 
4468  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4469  ~VmaAllocator_T();
4470 
4471  const VkAllocationCallbacks* GetAllocationCallbacks() const
4472  {
4473  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4474  }
4475  const VmaVulkanFunctions& GetVulkanFunctions() const
4476  {
4477  return m_VulkanFunctions;
4478  }
4479 
4480  VkDeviceSize GetBufferImageGranularity() const
4481  {
4482  return VMA_MAX(
4483  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4484  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4485  }
4486 
4487  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4488  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4489 
4490  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4491  {
4492  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4493  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4494  }
4495 
4496  bool IsIntegratedGpu() const
4497  {
4498  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
4499  }
4500 
4501  void GetBufferMemoryRequirements(
4502  VkBuffer hBuffer,
4503  VkMemoryRequirements& memReq,
4504  bool& requiresDedicatedAllocation,
4505  bool& prefersDedicatedAllocation) const;
4506  void GetImageMemoryRequirements(
4507  VkImage hImage,
4508  VkMemoryRequirements& memReq,
4509  bool& requiresDedicatedAllocation,
4510  bool& prefersDedicatedAllocation) const;
4511 
4512  // Main allocation function.
4513  VkResult AllocateMemory(
4514  const VkMemoryRequirements& vkMemReq,
4515  bool requiresDedicatedAllocation,
4516  bool prefersDedicatedAllocation,
4517  VkBuffer dedicatedBuffer,
4518  VkImage dedicatedImage,
4519  const VmaAllocationCreateInfo& createInfo,
4520  VmaSuballocationType suballocType,
4521  VmaAllocation* pAllocation);
4522 
4523  // Main deallocation function.
4524  void FreeMemory(const VmaAllocation allocation);
4525 
4526  void CalculateStats(VmaStats* pStats);
4527 
4528 #if VMA_STATS_STRING_ENABLED
4529  void PrintDetailedMap(class VmaJsonWriter& json);
4530 #endif
4531 
4532  VkResult Defragment(
4533  VmaAllocation* pAllocations,
4534  size_t allocationCount,
4535  VkBool32* pAllocationsChanged,
4536  const VmaDefragmentationInfo* pDefragmentationInfo,
4537  VmaDefragmentationStats* pDefragmentationStats);
4538 
4539  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4540  bool TouchAllocation(VmaAllocation hAllocation);
4541 
4542  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4543  void DestroyPool(VmaPool pool);
4544  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4545 
4546  void SetCurrentFrameIndex(uint32_t frameIndex);
4547 
4548  void MakePoolAllocationsLost(
4549  VmaPool hPool,
4550  size_t* pLostAllocationCount);
4551 
4552  void CreateLostAllocation(VmaAllocation* pAllocation);
4553 
4554  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4555  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4556 
4557  VkResult Map(VmaAllocation hAllocation, void** ppData);
4558  void Unmap(VmaAllocation hAllocation);
4559 
4560  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
4561  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
4562 
4563 private:
4564  VkDeviceSize m_PreferredLargeHeapBlockSize;
4565 
4566  VkPhysicalDevice m_PhysicalDevice;
4567  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4568 
4569  VMA_MUTEX m_PoolsMutex;
4570  // Protected by m_PoolsMutex. Sorted by pointer value.
4571  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4572 
4573  VmaVulkanFunctions m_VulkanFunctions;
4574 
4575  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4576 
4577  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4578 
4579  VkResult AllocateMemoryOfType(
4580  const VkMemoryRequirements& vkMemReq,
4581  bool dedicatedAllocation,
4582  VkBuffer dedicatedBuffer,
4583  VkImage dedicatedImage,
4584  const VmaAllocationCreateInfo& createInfo,
4585  uint32_t memTypeIndex,
4586  VmaSuballocationType suballocType,
4587  VmaAllocation* pAllocation);
4588 
4589  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4590  VkResult AllocateDedicatedMemory(
4591  VkDeviceSize size,
4592  VmaSuballocationType suballocType,
4593  uint32_t memTypeIndex,
4594  bool map,
4595  bool isUserDataString,
4596  void* pUserData,
4597  VkBuffer dedicatedBuffer,
4598  VkImage dedicatedImage,
4599  VmaAllocation* pAllocation);
4600 
4601  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4602  void FreeDedicatedMemory(VmaAllocation allocation);
4603 };
4604 
4606 // Memory allocation #2 after VmaAllocator_T definition
4607 
4608 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4609 {
4610  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4611 }
4612 
4613 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4614 {
4615  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4616 }
4617 
4618 template<typename T>
4619 static T* VmaAllocate(VmaAllocator hAllocator)
4620 {
4621  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4622 }
4623 
4624 template<typename T>
4625 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4626 {
4627  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4628 }
4629 
4630 template<typename T>
4631 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4632 {
4633  if(ptr != VMA_NULL)
4634  {
4635  ptr->~T();
4636  VmaFree(hAllocator, ptr);
4637  }
4638 }
4639 
4640 template<typename T>
4641 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4642 {
4643  if(ptr != VMA_NULL)
4644  {
4645  for(size_t i = count; i--; )
4646  ptr[i].~T();
4647  VmaFree(hAllocator, ptr);
4648  }
4649 }
4650 
4652 // VmaStringBuilder
4653 
4654 #if VMA_STATS_STRING_ENABLED
4655 
4656 class VmaStringBuilder
4657 {
4658 public:
4659  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4660  size_t GetLength() const { return m_Data.size(); }
4661  const char* GetData() const { return m_Data.data(); }
4662 
4663  void Add(char ch) { m_Data.push_back(ch); }
4664  void Add(const char* pStr);
4665  void AddNewLine() { Add('\n'); }
4666  void AddNumber(uint32_t num);
4667  void AddNumber(uint64_t num);
4668  void AddPointer(const void* ptr);
4669 
4670 private:
4671  VmaVector< char, VmaStlAllocator<char> > m_Data;
4672 };
4673 
4674 void VmaStringBuilder::Add(const char* pStr)
4675 {
4676  const size_t strLen = strlen(pStr);
4677  if(strLen > 0)
4678  {
4679  const size_t oldCount = m_Data.size();
4680  m_Data.resize(oldCount + strLen);
4681  memcpy(m_Data.data() + oldCount, pStr, strLen);
4682  }
4683 }
4684 
4685 void VmaStringBuilder::AddNumber(uint32_t num)
4686 {
4687  char buf[11];
4688  VmaUint32ToStr(buf, sizeof(buf), num);
4689  Add(buf);
4690 }
4691 
4692 void VmaStringBuilder::AddNumber(uint64_t num)
4693 {
4694  char buf[21];
4695  VmaUint64ToStr(buf, sizeof(buf), num);
4696  Add(buf);
4697 }
4698 
4699 void VmaStringBuilder::AddPointer(const void* ptr)
4700 {
4701  char buf[21];
4702  VmaPtrToStr(buf, sizeof(buf), ptr);
4703  Add(buf);
4704 }
4705 
4706 #endif // #if VMA_STATS_STRING_ENABLED
4707 
4709 // VmaJsonWriter
4710 
4711 #if VMA_STATS_STRING_ENABLED
4712 
4713 class VmaJsonWriter
4714 {
4715  VMA_CLASS_NO_COPY(VmaJsonWriter)
4716 public:
4717  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4718  ~VmaJsonWriter();
4719 
4720  void BeginObject(bool singleLine = false);
4721  void EndObject();
4722 
4723  void BeginArray(bool singleLine = false);
4724  void EndArray();
4725 
4726  void WriteString(const char* pStr);
4727  void BeginString(const char* pStr = VMA_NULL);
4728  void ContinueString(const char* pStr);
4729  void ContinueString(uint32_t n);
4730  void ContinueString(uint64_t n);
4731  void ContinueString_Pointer(const void* ptr);
4732  void EndString(const char* pStr = VMA_NULL);
4733 
4734  void WriteNumber(uint32_t n);
4735  void WriteNumber(uint64_t n);
4736  void WriteBool(bool b);
4737  void WriteNull();
4738 
4739 private:
4740  static const char* const INDENT;
4741 
4742  enum COLLECTION_TYPE
4743  {
4744  COLLECTION_TYPE_OBJECT,
4745  COLLECTION_TYPE_ARRAY,
4746  };
4747  struct StackItem
4748  {
4749  COLLECTION_TYPE type;
4750  uint32_t valueCount;
4751  bool singleLineMode;
4752  };
4753 
4754  VmaStringBuilder& m_SB;
4755  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4756  bool m_InsideString;
4757 
4758  void BeginValue(bool isString);
4759  void WriteIndent(bool oneLess = false);
4760 };
4761 
4762 const char* const VmaJsonWriter::INDENT = " ";
4763 
4764 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4765  m_SB(sb),
4766  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4767  m_InsideString(false)
4768 {
4769 }
4770 
4771 VmaJsonWriter::~VmaJsonWriter()
4772 {
4773  VMA_ASSERT(!m_InsideString);
4774  VMA_ASSERT(m_Stack.empty());
4775 }
4776 
4777 void VmaJsonWriter::BeginObject(bool singleLine)
4778 {
4779  VMA_ASSERT(!m_InsideString);
4780 
4781  BeginValue(false);
4782  m_SB.Add('{');
4783 
4784  StackItem item;
4785  item.type = COLLECTION_TYPE_OBJECT;
4786  item.valueCount = 0;
4787  item.singleLineMode = singleLine;
4788  m_Stack.push_back(item);
4789 }
4790 
4791 void VmaJsonWriter::EndObject()
4792 {
4793  VMA_ASSERT(!m_InsideString);
4794 
4795  WriteIndent(true);
4796  m_SB.Add('}');
4797 
4798  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4799  m_Stack.pop_back();
4800 }
4801 
4802 void VmaJsonWriter::BeginArray(bool singleLine)
4803 {
4804  VMA_ASSERT(!m_InsideString);
4805 
4806  BeginValue(false);
4807  m_SB.Add('[');
4808 
4809  StackItem item;
4810  item.type = COLLECTION_TYPE_ARRAY;
4811  item.valueCount = 0;
4812  item.singleLineMode = singleLine;
4813  m_Stack.push_back(item);
4814 }
4815 
4816 void VmaJsonWriter::EndArray()
4817 {
4818  VMA_ASSERT(!m_InsideString);
4819 
4820  WriteIndent(true);
4821  m_SB.Add(']');
4822 
4823  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4824  m_Stack.pop_back();
4825 }
4826 
4827 void VmaJsonWriter::WriteString(const char* pStr)
4828 {
4829  BeginString(pStr);
4830  EndString();
4831 }
4832 
4833 void VmaJsonWriter::BeginString(const char* pStr)
4834 {
4835  VMA_ASSERT(!m_InsideString);
4836 
4837  BeginValue(true);
4838  m_SB.Add('"');
4839  m_InsideString = true;
4840  if(pStr != VMA_NULL && pStr[0] != '\0')
4841  {
4842  ContinueString(pStr);
4843  }
4844 }
4845 
4846 void VmaJsonWriter::ContinueString(const char* pStr)
4847 {
4848  VMA_ASSERT(m_InsideString);
4849 
4850  const size_t strLen = strlen(pStr);
4851  for(size_t i = 0; i < strLen; ++i)
4852  {
4853  char ch = pStr[i];
4854  if(ch == '\'')
4855  {
4856  m_SB.Add("\\\\");
4857  }
4858  else if(ch == '"')
4859  {
4860  m_SB.Add("\\\"");
4861  }
4862  else if(ch >= 32)
4863  {
4864  m_SB.Add(ch);
4865  }
4866  else switch(ch)
4867  {
4868  case '\b':
4869  m_SB.Add("\\b");
4870  break;
4871  case '\f':
4872  m_SB.Add("\\f");
4873  break;
4874  case '\n':
4875  m_SB.Add("\\n");
4876  break;
4877  case '\r':
4878  m_SB.Add("\\r");
4879  break;
4880  case '\t':
4881  m_SB.Add("\\t");
4882  break;
4883  default:
4884  VMA_ASSERT(0 && "Character not currently supported.");
4885  break;
4886  }
4887  }
4888 }
4889 
4890 void VmaJsonWriter::ContinueString(uint32_t n)
4891 {
4892  VMA_ASSERT(m_InsideString);
4893  m_SB.AddNumber(n);
4894 }
4895 
4896 void VmaJsonWriter::ContinueString(uint64_t n)
4897 {
4898  VMA_ASSERT(m_InsideString);
4899  m_SB.AddNumber(n);
4900 }
4901 
4902 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4903 {
4904  VMA_ASSERT(m_InsideString);
4905  m_SB.AddPointer(ptr);
4906 }
4907 
4908 void VmaJsonWriter::EndString(const char* pStr)
4909 {
4910  VMA_ASSERT(m_InsideString);
4911  if(pStr != VMA_NULL && pStr[0] != '\0')
4912  {
4913  ContinueString(pStr);
4914  }
4915  m_SB.Add('"');
4916  m_InsideString = false;
4917 }
4918 
4919 void VmaJsonWriter::WriteNumber(uint32_t n)
4920 {
4921  VMA_ASSERT(!m_InsideString);
4922  BeginValue(false);
4923  m_SB.AddNumber(n);
4924 }
4925 
4926 void VmaJsonWriter::WriteNumber(uint64_t n)
4927 {
4928  VMA_ASSERT(!m_InsideString);
4929  BeginValue(false);
4930  m_SB.AddNumber(n);
4931 }
4932 
4933 void VmaJsonWriter::WriteBool(bool b)
4934 {
4935  VMA_ASSERT(!m_InsideString);
4936  BeginValue(false);
4937  m_SB.Add(b ? "true" : "false");
4938 }
4939 
4940 void VmaJsonWriter::WriteNull()
4941 {
4942  VMA_ASSERT(!m_InsideString);
4943  BeginValue(false);
4944  m_SB.Add("null");
4945 }
4946 
4947 void VmaJsonWriter::BeginValue(bool isString)
4948 {
4949  if(!m_Stack.empty())
4950  {
4951  StackItem& currItem = m_Stack.back();
4952  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4953  currItem.valueCount % 2 == 0)
4954  {
4955  VMA_ASSERT(isString);
4956  }
4957 
4958  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4959  currItem.valueCount % 2 != 0)
4960  {
4961  m_SB.Add(": ");
4962  }
4963  else if(currItem.valueCount > 0)
4964  {
4965  m_SB.Add(", ");
4966  WriteIndent();
4967  }
4968  else
4969  {
4970  WriteIndent();
4971  }
4972  ++currItem.valueCount;
4973  }
4974 }
4975 
4976 void VmaJsonWriter::WriteIndent(bool oneLess)
4977 {
4978  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4979  {
4980  m_SB.AddNewLine();
4981 
4982  size_t count = m_Stack.size();
4983  if(count > 0 && oneLess)
4984  {
4985  --count;
4986  }
4987  for(size_t i = 0; i < count; ++i)
4988  {
4989  m_SB.Add(INDENT);
4990  }
4991  }
4992 }
4993 
4994 #endif // #if VMA_STATS_STRING_ENABLED
4995 
4997 
4998 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4999 {
5000  if(IsUserDataString())
5001  {
5002  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
5003 
5004  FreeUserDataString(hAllocator);
5005 
5006  if(pUserData != VMA_NULL)
5007  {
5008  const char* const newStrSrc = (char*)pUserData;
5009  const size_t newStrLen = strlen(newStrSrc);
5010  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
5011  memcpy(newStrDst, newStrSrc, newStrLen + 1);
5012  m_pUserData = newStrDst;
5013  }
5014  }
5015  else
5016  {
5017  m_pUserData = pUserData;
5018  }
5019 }
5020 
5021 void VmaAllocation_T::ChangeBlockAllocation(
5022  VmaAllocator hAllocator,
5023  VmaDeviceMemoryBlock* block,
5024  VkDeviceSize offset)
5025 {
5026  VMA_ASSERT(block != VMA_NULL);
5027  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5028 
5029  // Move mapping reference counter from old block to new block.
5030  if(block != m_BlockAllocation.m_Block)
5031  {
5032  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
5033  if(IsPersistentMap())
5034  ++mapRefCount;
5035  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
5036  block->Map(hAllocator, mapRefCount, VMA_NULL);
5037  }
5038 
5039  m_BlockAllocation.m_Block = block;
5040  m_BlockAllocation.m_Offset = offset;
5041 }
5042 
5043 VkDeviceSize VmaAllocation_T::GetOffset() const
5044 {
5045  switch(m_Type)
5046  {
5047  case ALLOCATION_TYPE_BLOCK:
5048  return m_BlockAllocation.m_Offset;
5049  case ALLOCATION_TYPE_DEDICATED:
5050  return 0;
5051  default:
5052  VMA_ASSERT(0);
5053  return 0;
5054  }
5055 }
5056 
5057 VkDeviceMemory VmaAllocation_T::GetMemory() const
5058 {
5059  switch(m_Type)
5060  {
5061  case ALLOCATION_TYPE_BLOCK:
5062  return m_BlockAllocation.m_Block->GetDeviceMemory();
5063  case ALLOCATION_TYPE_DEDICATED:
5064  return m_DedicatedAllocation.m_hMemory;
5065  default:
5066  VMA_ASSERT(0);
5067  return VK_NULL_HANDLE;
5068  }
5069 }
5070 
5071 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
5072 {
5073  switch(m_Type)
5074  {
5075  case ALLOCATION_TYPE_BLOCK:
5076  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5077  case ALLOCATION_TYPE_DEDICATED:
5078  return m_DedicatedAllocation.m_MemoryTypeIndex;
5079  default:
5080  VMA_ASSERT(0);
5081  return UINT32_MAX;
5082  }
5083 }
5084 
5085 void* VmaAllocation_T::GetMappedData() const
5086 {
5087  switch(m_Type)
5088  {
5089  case ALLOCATION_TYPE_BLOCK:
5090  if(m_MapCount != 0)
5091  {
5092  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5093  VMA_ASSERT(pBlockData != VMA_NULL);
5094  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5095  }
5096  else
5097  {
5098  return VMA_NULL;
5099  }
5100  break;
5101  case ALLOCATION_TYPE_DEDICATED:
5102  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5103  return m_DedicatedAllocation.m_pMappedData;
5104  default:
5105  VMA_ASSERT(0);
5106  return VMA_NULL;
5107  }
5108 }
5109 
5110 bool VmaAllocation_T::CanBecomeLost() const
5111 {
5112  switch(m_Type)
5113  {
5114  case ALLOCATION_TYPE_BLOCK:
5115  return m_BlockAllocation.m_CanBecomeLost;
5116  case ALLOCATION_TYPE_DEDICATED:
5117  return false;
5118  default:
5119  VMA_ASSERT(0);
5120  return false;
5121  }
5122 }
5123 
5124 VmaPool VmaAllocation_T::GetPool() const
5125 {
5126  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5127  return m_BlockAllocation.m_hPool;
5128 }
5129 
5130 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5131 {
5132  VMA_ASSERT(CanBecomeLost());
5133 
5134  /*
5135  Warning: This is a carefully designed algorithm.
5136  Do not modify unless you really know what you're doing :)
5137  */
5138  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5139  for(;;)
5140  {
5141  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
5142  {
5143  VMA_ASSERT(0);
5144  return false;
5145  }
5146  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
5147  {
5148  return false;
5149  }
5150  else // Last use time earlier than current time.
5151  {
5152  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
5153  {
5154  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
5155  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
5156  return true;
5157  }
5158  }
5159  }
5160 }
5161 
5162 #if VMA_STATS_STRING_ENABLED
5163 
5164 // Correspond to values of enum VmaSuballocationType.
5165 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5166  "FREE",
5167  "UNKNOWN",
5168  "BUFFER",
5169  "IMAGE_UNKNOWN",
5170  "IMAGE_LINEAR",
5171  "IMAGE_OPTIMAL",
5172 };
5173 
5174 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
5175 {
5176  json.WriteString("Type");
5177  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
5178 
5179  json.WriteString("Size");
5180  json.WriteNumber(m_Size);
5181 
5182  if(m_pUserData != VMA_NULL)
5183  {
5184  json.WriteString("UserData");
5185  if(IsUserDataString())
5186  {
5187  json.WriteString((const char*)m_pUserData);
5188  }
5189  else
5190  {
5191  json.BeginString();
5192  json.ContinueString_Pointer(m_pUserData);
5193  json.EndString();
5194  }
5195  }
5196 
5197  json.WriteString("CreationFrameIndex");
5198  json.WriteNumber(m_CreationFrameIndex);
5199 
5200  json.WriteString("LastUseFrameIndex");
5201  json.WriteNumber(GetLastUseFrameIndex());
5202 
5203  if(m_BufferImageUsage != 0)
5204  {
5205  json.WriteString("Usage");
5206  json.WriteNumber(m_BufferImageUsage);
5207  }
5208 }
5209 
5210 #endif
5211 
5212 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
5213 {
5214  VMA_ASSERT(IsUserDataString());
5215  if(m_pUserData != VMA_NULL)
5216  {
5217  char* const oldStr = (char*)m_pUserData;
5218  const size_t oldStrLen = strlen(oldStr);
5219  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
5220  m_pUserData = VMA_NULL;
5221  }
5222 }
5223 
5224 void VmaAllocation_T::BlockAllocMap()
5225 {
5226  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5227 
5228  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5229  {
5230  ++m_MapCount;
5231  }
5232  else
5233  {
5234  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
5235  }
5236 }
5237 
5238 void VmaAllocation_T::BlockAllocUnmap()
5239 {
5240  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5241 
5242  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5243  {
5244  --m_MapCount;
5245  }
5246  else
5247  {
5248  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
5249  }
5250 }
5251 
5252 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
5253 {
5254  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5255 
5256  if(m_MapCount != 0)
5257  {
5258  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5259  {
5260  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
5261  *ppData = m_DedicatedAllocation.m_pMappedData;
5262  ++m_MapCount;
5263  return VK_SUCCESS;
5264  }
5265  else
5266  {
5267  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
5268  return VK_ERROR_MEMORY_MAP_FAILED;
5269  }
5270  }
5271  else
5272  {
5273  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5274  hAllocator->m_hDevice,
5275  m_DedicatedAllocation.m_hMemory,
5276  0, // offset
5277  VK_WHOLE_SIZE,
5278  0, // flags
5279  ppData);
5280  if(result == VK_SUCCESS)
5281  {
5282  m_DedicatedAllocation.m_pMappedData = *ppData;
5283  m_MapCount = 1;
5284  }
5285  return result;
5286  }
5287 }
5288 
5289 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
5290 {
5291  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5292 
5293  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5294  {
5295  --m_MapCount;
5296  if(m_MapCount == 0)
5297  {
5298  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5299  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5300  hAllocator->m_hDevice,
5301  m_DedicatedAllocation.m_hMemory);
5302  }
5303  }
5304  else
5305  {
5306  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5307  }
5308 }
5309 
5310 #if VMA_STATS_STRING_ENABLED
5311 
5312 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5313 {
5314  json.BeginObject();
5315 
5316  json.WriteString("Blocks");
5317  json.WriteNumber(stat.blockCount);
5318 
5319  json.WriteString("Allocations");
5320  json.WriteNumber(stat.allocationCount);
5321 
5322  json.WriteString("UnusedRanges");
5323  json.WriteNumber(stat.unusedRangeCount);
5324 
5325  json.WriteString("UsedBytes");
5326  json.WriteNumber(stat.usedBytes);
5327 
5328  json.WriteString("UnusedBytes");
5329  json.WriteNumber(stat.unusedBytes);
5330 
5331  if(stat.allocationCount > 1)
5332  {
5333  json.WriteString("AllocationSize");
5334  json.BeginObject(true);
5335  json.WriteString("Min");
5336  json.WriteNumber(stat.allocationSizeMin);
5337  json.WriteString("Avg");
5338  json.WriteNumber(stat.allocationSizeAvg);
5339  json.WriteString("Max");
5340  json.WriteNumber(stat.allocationSizeMax);
5341  json.EndObject();
5342  }
5343 
5344  if(stat.unusedRangeCount > 1)
5345  {
5346  json.WriteString("UnusedRangeSize");
5347  json.BeginObject(true);
5348  json.WriteString("Min");
5349  json.WriteNumber(stat.unusedRangeSizeMin);
5350  json.WriteString("Avg");
5351  json.WriteNumber(stat.unusedRangeSizeAvg);
5352  json.WriteString("Max");
5353  json.WriteNumber(stat.unusedRangeSizeMax);
5354  json.EndObject();
5355  }
5356 
5357  json.EndObject();
5358 }
5359 
5360 #endif // #if VMA_STATS_STRING_ENABLED
5361 
5362 struct VmaSuballocationItemSizeLess
5363 {
5364  bool operator()(
5365  const VmaSuballocationList::iterator lhs,
5366  const VmaSuballocationList::iterator rhs) const
5367  {
5368  return lhs->size < rhs->size;
5369  }
5370  bool operator()(
5371  const VmaSuballocationList::iterator lhs,
5372  VkDeviceSize rhsSize) const
5373  {
5374  return lhs->size < rhsSize;
5375  }
5376 };
5377 
5379 // class VmaBlockMetadata
5380 
5381 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5382  m_Size(0),
5383  m_FreeCount(0),
5384  m_SumFreeSize(0),
5385  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5386  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5387 {
5388 }
5389 
5390 VmaBlockMetadata::~VmaBlockMetadata()
5391 {
5392 }
5393 
5394 void VmaBlockMetadata::Init(VkDeviceSize size)
5395 {
5396  m_Size = size;
5397  m_FreeCount = 1;
5398  m_SumFreeSize = size;
5399 
5400  VmaSuballocation suballoc = {};
5401  suballoc.offset = 0;
5402  suballoc.size = size;
5403  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5404  suballoc.hAllocation = VK_NULL_HANDLE;
5405 
5406  m_Suballocations.push_back(suballoc);
5407  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5408  --suballocItem;
5409  m_FreeSuballocationsBySize.push_back(suballocItem);
5410 }
5411 
5412 bool VmaBlockMetadata::Validate() const
5413 {
5414  if(m_Suballocations.empty())
5415  {
5416  return false;
5417  }
5418 
5419  // Expected offset of new suballocation as calculates from previous ones.
5420  VkDeviceSize calculatedOffset = 0;
5421  // Expected number of free suballocations as calculated from traversing their list.
5422  uint32_t calculatedFreeCount = 0;
5423  // Expected sum size of free suballocations as calculated from traversing their list.
5424  VkDeviceSize calculatedSumFreeSize = 0;
5425  // Expected number of free suballocations that should be registered in
5426  // m_FreeSuballocationsBySize calculated from traversing their list.
5427  size_t freeSuballocationsToRegister = 0;
5428  // True if previous visisted suballocation was free.
5429  bool prevFree = false;
5430 
5431  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5432  suballocItem != m_Suballocations.cend();
5433  ++suballocItem)
5434  {
5435  const VmaSuballocation& subAlloc = *suballocItem;
5436 
5437  // Actual offset of this suballocation doesn't match expected one.
5438  if(subAlloc.offset != calculatedOffset)
5439  {
5440  return false;
5441  }
5442 
5443  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5444  // Two adjacent free suballocations are invalid. They should be merged.
5445  if(prevFree && currFree)
5446  {
5447  return false;
5448  }
5449 
5450  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5451  {
5452  return false;
5453  }
5454 
5455  if(currFree)
5456  {
5457  calculatedSumFreeSize += subAlloc.size;
5458  ++calculatedFreeCount;
5459  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5460  {
5461  ++freeSuballocationsToRegister;
5462  }
5463  }
5464  else
5465  {
5466  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5467  {
5468  return false;
5469  }
5470  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5471  {
5472  return false;
5473  }
5474  }
5475 
5476  calculatedOffset += subAlloc.size;
5477  prevFree = currFree;
5478  }
5479 
5480  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5481  // match expected one.
5482  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5483  {
5484  return false;
5485  }
5486 
5487  VkDeviceSize lastSize = 0;
5488  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5489  {
5490  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5491 
5492  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5493  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5494  {
5495  return false;
5496  }
5497  // They must be sorted by size ascending.
5498  if(suballocItem->size < lastSize)
5499  {
5500  return false;
5501  }
5502 
5503  lastSize = suballocItem->size;
5504  }
5505 
5506  // Check if totals match calculacted values.
5507  if(!ValidateFreeSuballocationList() ||
5508  (calculatedOffset != m_Size) ||
5509  (calculatedSumFreeSize != m_SumFreeSize) ||
5510  (calculatedFreeCount != m_FreeCount))
5511  {
5512  return false;
5513  }
5514 
5515  return true;
5516 }
5517 
5518 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5519 {
5520  if(!m_FreeSuballocationsBySize.empty())
5521  {
5522  return m_FreeSuballocationsBySize.back()->size;
5523  }
5524  else
5525  {
5526  return 0;
5527  }
5528 }
5529 
5530 bool VmaBlockMetadata::IsEmpty() const
5531 {
5532  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5533 }
5534 
5535 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5536 {
5537  outInfo.blockCount = 1;
5538 
5539  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5540  outInfo.allocationCount = rangeCount - m_FreeCount;
5541  outInfo.unusedRangeCount = m_FreeCount;
5542 
5543  outInfo.unusedBytes = m_SumFreeSize;
5544  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5545 
5546  outInfo.allocationSizeMin = UINT64_MAX;
5547  outInfo.allocationSizeMax = 0;
5548  outInfo.unusedRangeSizeMin = UINT64_MAX;
5549  outInfo.unusedRangeSizeMax = 0;
5550 
5551  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5552  suballocItem != m_Suballocations.cend();
5553  ++suballocItem)
5554  {
5555  const VmaSuballocation& suballoc = *suballocItem;
5556  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5557  {
5558  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5559  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5560  }
5561  else
5562  {
5563  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5564  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5565  }
5566  }
5567 }
5568 
5569 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5570 {
5571  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5572 
5573  inoutStats.size += m_Size;
5574  inoutStats.unusedSize += m_SumFreeSize;
5575  inoutStats.allocationCount += rangeCount - m_FreeCount;
5576  inoutStats.unusedRangeCount += m_FreeCount;
5577  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5578 }
5579 
5580 #if VMA_STATS_STRING_ENABLED
5581 
5582 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5583 {
5584  json.BeginObject();
5585 
5586  json.WriteString("TotalBytes");
5587  json.WriteNumber(m_Size);
5588 
5589  json.WriteString("UnusedBytes");
5590  json.WriteNumber(m_SumFreeSize);
5591 
5592  json.WriteString("Allocations");
5593  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5594 
5595  json.WriteString("UnusedRanges");
5596  json.WriteNumber(m_FreeCount);
5597 
5598  json.WriteString("Suballocations");
5599  json.BeginArray();
5600  size_t i = 0;
5601  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5602  suballocItem != m_Suballocations.cend();
5603  ++suballocItem, ++i)
5604  {
5605  json.BeginObject(true);
5606 
5607  json.WriteString("Offset");
5608  json.WriteNumber(suballocItem->offset);
5609 
5610  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5611  {
5612  json.WriteString("Type");
5613  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
5614 
5615  json.WriteString("Size");
5616  json.WriteNumber(suballocItem->size);
5617  }
5618  else
5619  {
5620  suballocItem->hAllocation->PrintParameters(json);
5621  }
5622 
5623  json.EndObject();
5624  }
5625  json.EndArray();
5626 
5627  json.EndObject();
5628 }
5629 
5630 #endif // #if VMA_STATS_STRING_ENABLED
5631 
5632 /*
5633 How many suitable free suballocations to analyze before choosing best one.
5634 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5635  be chosen.
5636 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5637  suballocations will be analized and best one will be chosen.
5638 - Any other value is also acceptable.
5639 */
5640 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5641 
5642 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5643 {
5644  VMA_ASSERT(IsEmpty());
5645  pAllocationRequest->offset = 0;
5646  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5647  pAllocationRequest->sumItemSize = 0;
5648  pAllocationRequest->item = m_Suballocations.begin();
5649  pAllocationRequest->itemsToMakeLostCount = 0;
5650 }
5651 
5652 bool VmaBlockMetadata::CreateAllocationRequest(
5653  uint32_t currentFrameIndex,
5654  uint32_t frameInUseCount,
5655  VkDeviceSize bufferImageGranularity,
5656  VkDeviceSize allocSize,
5657  VkDeviceSize allocAlignment,
5658  VmaSuballocationType allocType,
5659  bool canMakeOtherLost,
5660  VmaAllocationRequest* pAllocationRequest)
5661 {
5662  VMA_ASSERT(allocSize > 0);
5663  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5664  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5665  VMA_HEAVY_ASSERT(Validate());
5666 
5667  // There is not enough total free space in this block to fullfill the request: Early return.
5668  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5669  {
5670  return false;
5671  }
5672 
5673  // New algorithm, efficiently searching freeSuballocationsBySize.
5674  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5675  if(freeSuballocCount > 0)
5676  {
5677  if(VMA_BEST_FIT)
5678  {
5679  // Find first free suballocation with size not less than allocSize.
5680  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5681  m_FreeSuballocationsBySize.data(),
5682  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5683  allocSize,
5684  VmaSuballocationItemSizeLess());
5685  size_t index = it - m_FreeSuballocationsBySize.data();
5686  for(; index < freeSuballocCount; ++index)
5687  {
5688  if(CheckAllocation(
5689  currentFrameIndex,
5690  frameInUseCount,
5691  bufferImageGranularity,
5692  allocSize,
5693  allocAlignment,
5694  allocType,
5695  m_FreeSuballocationsBySize[index],
5696  false, // canMakeOtherLost
5697  &pAllocationRequest->offset,
5698  &pAllocationRequest->itemsToMakeLostCount,
5699  &pAllocationRequest->sumFreeSize,
5700  &pAllocationRequest->sumItemSize))
5701  {
5702  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5703  return true;
5704  }
5705  }
5706  }
5707  else
5708  {
5709  // Search staring from biggest suballocations.
5710  for(size_t index = freeSuballocCount; index--; )
5711  {
5712  if(CheckAllocation(
5713  currentFrameIndex,
5714  frameInUseCount,
5715  bufferImageGranularity,
5716  allocSize,
5717  allocAlignment,
5718  allocType,
5719  m_FreeSuballocationsBySize[index],
5720  false, // canMakeOtherLost
5721  &pAllocationRequest->offset,
5722  &pAllocationRequest->itemsToMakeLostCount,
5723  &pAllocationRequest->sumFreeSize,
5724  &pAllocationRequest->sumItemSize))
5725  {
5726  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5727  return true;
5728  }
5729  }
5730  }
5731  }
5732 
5733  if(canMakeOtherLost)
5734  {
5735  // Brute-force algorithm. TODO: Come up with something better.
5736 
5737  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5738  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5739 
5740  VmaAllocationRequest tmpAllocRequest = {};
5741  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5742  suballocIt != m_Suballocations.end();
5743  ++suballocIt)
5744  {
5745  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5746  suballocIt->hAllocation->CanBecomeLost())
5747  {
5748  if(CheckAllocation(
5749  currentFrameIndex,
5750  frameInUseCount,
5751  bufferImageGranularity,
5752  allocSize,
5753  allocAlignment,
5754  allocType,
5755  suballocIt,
5756  canMakeOtherLost,
5757  &tmpAllocRequest.offset,
5758  &tmpAllocRequest.itemsToMakeLostCount,
5759  &tmpAllocRequest.sumFreeSize,
5760  &tmpAllocRequest.sumItemSize))
5761  {
5762  tmpAllocRequest.item = suballocIt;
5763 
5764  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5765  {
5766  *pAllocationRequest = tmpAllocRequest;
5767  }
5768  }
5769  }
5770  }
5771 
5772  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5773  {
5774  return true;
5775  }
5776  }
5777 
5778  return false;
5779 }
5780 
5781 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5782  uint32_t currentFrameIndex,
5783  uint32_t frameInUseCount,
5784  VmaAllocationRequest* pAllocationRequest)
5785 {
5786  while(pAllocationRequest->itemsToMakeLostCount > 0)
5787  {
5788  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5789  {
5790  ++pAllocationRequest->item;
5791  }
5792  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5793  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5794  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5795  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5796  {
5797  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5798  --pAllocationRequest->itemsToMakeLostCount;
5799  }
5800  else
5801  {
5802  return false;
5803  }
5804  }
5805 
5806  VMA_HEAVY_ASSERT(Validate());
5807  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5808  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5809 
5810  return true;
5811 }
5812 
5813 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5814 {
5815  uint32_t lostAllocationCount = 0;
5816  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5817  it != m_Suballocations.end();
5818  ++it)
5819  {
5820  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5821  it->hAllocation->CanBecomeLost() &&
5822  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5823  {
5824  it = FreeSuballocation(it);
5825  ++lostAllocationCount;
5826  }
5827  }
5828  return lostAllocationCount;
5829 }
5830 
5831 void VmaBlockMetadata::Alloc(
5832  const VmaAllocationRequest& request,
5833  VmaSuballocationType type,
5834  VkDeviceSize allocSize,
5835  VmaAllocation hAllocation)
5836 {
5837  VMA_ASSERT(request.item != m_Suballocations.end());
5838  VmaSuballocation& suballoc = *request.item;
5839  // Given suballocation is a free block.
5840  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5841  // Given offset is inside this suballocation.
5842  VMA_ASSERT(request.offset >= suballoc.offset);
5843  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5844  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5845  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5846 
5847  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5848  // it to become used.
5849  UnregisterFreeSuballocation(request.item);
5850 
5851  suballoc.offset = request.offset;
5852  suballoc.size = allocSize;
5853  suballoc.type = type;
5854  suballoc.hAllocation = hAllocation;
5855 
5856  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5857  if(paddingEnd)
5858  {
5859  VmaSuballocation paddingSuballoc = {};
5860  paddingSuballoc.offset = request.offset + allocSize;
5861  paddingSuballoc.size = paddingEnd;
5862  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5863  VmaSuballocationList::iterator next = request.item;
5864  ++next;
5865  const VmaSuballocationList::iterator paddingEndItem =
5866  m_Suballocations.insert(next, paddingSuballoc);
5867  RegisterFreeSuballocation(paddingEndItem);
5868  }
5869 
5870  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5871  if(paddingBegin)
5872  {
5873  VmaSuballocation paddingSuballoc = {};
5874  paddingSuballoc.offset = request.offset - paddingBegin;
5875  paddingSuballoc.size = paddingBegin;
5876  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5877  const VmaSuballocationList::iterator paddingBeginItem =
5878  m_Suballocations.insert(request.item, paddingSuballoc);
5879  RegisterFreeSuballocation(paddingBeginItem);
5880  }
5881 
5882  // Update totals.
5883  m_FreeCount = m_FreeCount - 1;
5884  if(paddingBegin > 0)
5885  {
5886  ++m_FreeCount;
5887  }
5888  if(paddingEnd > 0)
5889  {
5890  ++m_FreeCount;
5891  }
5892  m_SumFreeSize -= allocSize;
5893 }
5894 
5895 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5896 {
5897  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5898  suballocItem != m_Suballocations.end();
5899  ++suballocItem)
5900  {
5901  VmaSuballocation& suballoc = *suballocItem;
5902  if(suballoc.hAllocation == allocation)
5903  {
5904  FreeSuballocation(suballocItem);
5905  VMA_HEAVY_ASSERT(Validate());
5906  return;
5907  }
5908  }
5909  VMA_ASSERT(0 && "Not found!");
5910 }
5911 
5912 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5913 {
5914  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5915  suballocItem != m_Suballocations.end();
5916  ++suballocItem)
5917  {
5918  VmaSuballocation& suballoc = *suballocItem;
5919  if(suballoc.offset == offset)
5920  {
5921  FreeSuballocation(suballocItem);
5922  return;
5923  }
5924  }
5925  VMA_ASSERT(0 && "Not found!");
5926 }
5927 
5928 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5929 {
5930  VkDeviceSize lastSize = 0;
5931  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5932  {
5933  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5934 
5935  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5936  {
5937  VMA_ASSERT(0);
5938  return false;
5939  }
5940  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5941  {
5942  VMA_ASSERT(0);
5943  return false;
5944  }
5945  if(it->size < lastSize)
5946  {
5947  VMA_ASSERT(0);
5948  return false;
5949  }
5950 
5951  lastSize = it->size;
5952  }
5953  return true;
5954 }
5955 
5956 bool VmaBlockMetadata::CheckAllocation(
5957  uint32_t currentFrameIndex,
5958  uint32_t frameInUseCount,
5959  VkDeviceSize bufferImageGranularity,
5960  VkDeviceSize allocSize,
5961  VkDeviceSize allocAlignment,
5962  VmaSuballocationType allocType,
5963  VmaSuballocationList::const_iterator suballocItem,
5964  bool canMakeOtherLost,
5965  VkDeviceSize* pOffset,
5966  size_t* itemsToMakeLostCount,
5967  VkDeviceSize* pSumFreeSize,
5968  VkDeviceSize* pSumItemSize) const
5969 {
5970  VMA_ASSERT(allocSize > 0);
5971  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5972  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5973  VMA_ASSERT(pOffset != VMA_NULL);
5974 
5975  *itemsToMakeLostCount = 0;
5976  *pSumFreeSize = 0;
5977  *pSumItemSize = 0;
5978 
5979  if(canMakeOtherLost)
5980  {
5981  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5982  {
5983  *pSumFreeSize = suballocItem->size;
5984  }
5985  else
5986  {
5987  if(suballocItem->hAllocation->CanBecomeLost() &&
5988  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5989  {
5990  ++*itemsToMakeLostCount;
5991  *pSumItemSize = suballocItem->size;
5992  }
5993  else
5994  {
5995  return false;
5996  }
5997  }
5998 
5999  // Remaining size is too small for this request: Early return.
6000  if(m_Size - suballocItem->offset < allocSize)
6001  {
6002  return false;
6003  }
6004 
6005  // Start from offset equal to beginning of this suballocation.
6006  *pOffset = suballocItem->offset;
6007 
6008  // Apply VMA_DEBUG_MARGIN at the beginning.
6009  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6010  {
6011  *pOffset += VMA_DEBUG_MARGIN;
6012  }
6013 
6014  // Apply alignment.
6015  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
6016  *pOffset = VmaAlignUp(*pOffset, alignment);
6017 
6018  // Check previous suballocations for BufferImageGranularity conflicts.
6019  // Make bigger alignment if necessary.
6020  if(bufferImageGranularity > 1)
6021  {
6022  bool bufferImageGranularityConflict = false;
6023  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6024  while(prevSuballocItem != m_Suballocations.cbegin())
6025  {
6026  --prevSuballocItem;
6027  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6028  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6029  {
6030  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6031  {
6032  bufferImageGranularityConflict = true;
6033  break;
6034  }
6035  }
6036  else
6037  // Already on previous page.
6038  break;
6039  }
6040  if(bufferImageGranularityConflict)
6041  {
6042  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6043  }
6044  }
6045 
6046  // Now that we have final *pOffset, check if we are past suballocItem.
6047  // If yes, return false - this function should be called for another suballocItem as starting point.
6048  if(*pOffset >= suballocItem->offset + suballocItem->size)
6049  {
6050  return false;
6051  }
6052 
6053  // Calculate padding at the beginning based on current offset.
6054  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
6055 
6056  // Calculate required margin at the end if this is not last suballocation.
6057  VmaSuballocationList::const_iterator next = suballocItem;
6058  ++next;
6059  const VkDeviceSize requiredEndMargin =
6060  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6061 
6062  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
6063  // Another early return check.
6064  if(suballocItem->offset + totalSize > m_Size)
6065  {
6066  return false;
6067  }
6068 
6069  // Advance lastSuballocItem until desired size is reached.
6070  // Update itemsToMakeLostCount.
6071  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
6072  if(totalSize > suballocItem->size)
6073  {
6074  VkDeviceSize remainingSize = totalSize - suballocItem->size;
6075  while(remainingSize > 0)
6076  {
6077  ++lastSuballocItem;
6078  if(lastSuballocItem == m_Suballocations.cend())
6079  {
6080  return false;
6081  }
6082  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6083  {
6084  *pSumFreeSize += lastSuballocItem->size;
6085  }
6086  else
6087  {
6088  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
6089  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
6090  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6091  {
6092  ++*itemsToMakeLostCount;
6093  *pSumItemSize += lastSuballocItem->size;
6094  }
6095  else
6096  {
6097  return false;
6098  }
6099  }
6100  remainingSize = (lastSuballocItem->size < remainingSize) ?
6101  remainingSize - lastSuballocItem->size : 0;
6102  }
6103  }
6104 
6105  // Check next suballocations for BufferImageGranularity conflicts.
6106  // If conflict exists, we must mark more allocations lost or fail.
6107  if(bufferImageGranularity > 1)
6108  {
6109  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
6110  ++nextSuballocItem;
6111  while(nextSuballocItem != m_Suballocations.cend())
6112  {
6113  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6114  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6115  {
6116  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6117  {
6118  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
6119  if(nextSuballoc.hAllocation->CanBecomeLost() &&
6120  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6121  {
6122  ++*itemsToMakeLostCount;
6123  }
6124  else
6125  {
6126  return false;
6127  }
6128  }
6129  }
6130  else
6131  {
6132  // Already on next page.
6133  break;
6134  }
6135  ++nextSuballocItem;
6136  }
6137  }
6138  }
6139  else
6140  {
6141  const VmaSuballocation& suballoc = *suballocItem;
6142  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6143 
6144  *pSumFreeSize = suballoc.size;
6145 
6146  // Size of this suballocation is too small for this request: Early return.
6147  if(suballoc.size < allocSize)
6148  {
6149  return false;
6150  }
6151 
6152  // Start from offset equal to beginning of this suballocation.
6153  *pOffset = suballoc.offset;
6154 
6155  // Apply VMA_DEBUG_MARGIN at the beginning.
6156  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6157  {
6158  *pOffset += VMA_DEBUG_MARGIN;
6159  }
6160 
6161  // Apply alignment.
6162  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
6163  *pOffset = VmaAlignUp(*pOffset, alignment);
6164 
6165  // Check previous suballocations for BufferImageGranularity conflicts.
6166  // Make bigger alignment if necessary.
6167  if(bufferImageGranularity > 1)
6168  {
6169  bool bufferImageGranularityConflict = false;
6170  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6171  while(prevSuballocItem != m_Suballocations.cbegin())
6172  {
6173  --prevSuballocItem;
6174  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6175  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6176  {
6177  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6178  {
6179  bufferImageGranularityConflict = true;
6180  break;
6181  }
6182  }
6183  else
6184  // Already on previous page.
6185  break;
6186  }
6187  if(bufferImageGranularityConflict)
6188  {
6189  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6190  }
6191  }
6192 
6193  // Calculate padding at the beginning based on current offset.
6194  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
6195 
6196  // Calculate required margin at the end if this is not last suballocation.
6197  VmaSuballocationList::const_iterator next = suballocItem;
6198  ++next;
6199  const VkDeviceSize requiredEndMargin =
6200  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6201 
6202  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
6203  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
6204  {
6205  return false;
6206  }
6207 
6208  // Check next suballocations for BufferImageGranularity conflicts.
6209  // If conflict exists, allocation cannot be made here.
6210  if(bufferImageGranularity > 1)
6211  {
6212  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
6213  ++nextSuballocItem;
6214  while(nextSuballocItem != m_Suballocations.cend())
6215  {
6216  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6217  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6218  {
6219  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6220  {
6221  return false;
6222  }
6223  }
6224  else
6225  {
6226  // Already on next page.
6227  break;
6228  }
6229  ++nextSuballocItem;
6230  }
6231  }
6232  }
6233 
6234  // All tests passed: Success. pOffset is already filled.
6235  return true;
6236 }
6237 
6238 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
6239 {
6240  VMA_ASSERT(item != m_Suballocations.end());
6241  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6242 
6243  VmaSuballocationList::iterator nextItem = item;
6244  ++nextItem;
6245  VMA_ASSERT(nextItem != m_Suballocations.end());
6246  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6247 
6248  item->size += nextItem->size;
6249  --m_FreeCount;
6250  m_Suballocations.erase(nextItem);
6251 }
6252 
6253 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
6254 {
6255  // Change this suballocation to be marked as free.
6256  VmaSuballocation& suballoc = *suballocItem;
6257  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6258  suballoc.hAllocation = VK_NULL_HANDLE;
6259 
6260  // Update totals.
6261  ++m_FreeCount;
6262  m_SumFreeSize += suballoc.size;
6263 
6264  // Merge with previous and/or next suballocation if it's also free.
6265  bool mergeWithNext = false;
6266  bool mergeWithPrev = false;
6267 
6268  VmaSuballocationList::iterator nextItem = suballocItem;
6269  ++nextItem;
6270  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6271  {
6272  mergeWithNext = true;
6273  }
6274 
6275  VmaSuballocationList::iterator prevItem = suballocItem;
6276  if(suballocItem != m_Suballocations.begin())
6277  {
6278  --prevItem;
6279  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6280  {
6281  mergeWithPrev = true;
6282  }
6283  }
6284 
6285  if(mergeWithNext)
6286  {
6287  UnregisterFreeSuballocation(nextItem);
6288  MergeFreeWithNext(suballocItem);
6289  }
6290 
6291  if(mergeWithPrev)
6292  {
6293  UnregisterFreeSuballocation(prevItem);
6294  MergeFreeWithNext(prevItem);
6295  RegisterFreeSuballocation(prevItem);
6296  return prevItem;
6297  }
6298  else
6299  {
6300  RegisterFreeSuballocation(suballocItem);
6301  return suballocItem;
6302  }
6303 }
6304 
6305 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6306 {
6307  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6308  VMA_ASSERT(item->size > 0);
6309 
6310  // You may want to enable this validation at the beginning or at the end of
6311  // this function, depending on what do you want to check.
6312  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6313 
6314  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6315  {
6316  if(m_FreeSuballocationsBySize.empty())
6317  {
6318  m_FreeSuballocationsBySize.push_back(item);
6319  }
6320  else
6321  {
6322  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6323  }
6324  }
6325 
6326  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6327 }
6328 
6329 
6330 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6331 {
6332  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6333  VMA_ASSERT(item->size > 0);
6334 
6335  // You may want to enable this validation at the beginning or at the end of
6336  // this function, depending on what do you want to check.
6337  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6338 
6339  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6340  {
6341  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6342  m_FreeSuballocationsBySize.data(),
6343  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6344  item,
6345  VmaSuballocationItemSizeLess());
6346  for(size_t index = it - m_FreeSuballocationsBySize.data();
6347  index < m_FreeSuballocationsBySize.size();
6348  ++index)
6349  {
6350  if(m_FreeSuballocationsBySize[index] == item)
6351  {
6352  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6353  return;
6354  }
6355  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6356  }
6357  VMA_ASSERT(0 && "Not found.");
6358  }
6359 
6360  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6361 }
6362 
6364 // class VmaDeviceMemoryBlock
6365 
6366 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6367  m_Metadata(hAllocator),
6368  m_MemoryTypeIndex(UINT32_MAX),
6369  m_hMemory(VK_NULL_HANDLE),
6370  m_MapCount(0),
6371  m_pMappedData(VMA_NULL)
6372 {
6373 }
6374 
6375 void VmaDeviceMemoryBlock::Init(
6376  uint32_t newMemoryTypeIndex,
6377  VkDeviceMemory newMemory,
6378  VkDeviceSize newSize)
6379 {
6380  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6381 
6382  m_MemoryTypeIndex = newMemoryTypeIndex;
6383  m_hMemory = newMemory;
6384 
6385  m_Metadata.Init(newSize);
6386 }
6387 
6388 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6389 {
6390  // This is the most important assert in the entire library.
6391  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6392  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6393 
6394  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6395  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6396  m_hMemory = VK_NULL_HANDLE;
6397 }
6398 
6399 bool VmaDeviceMemoryBlock::Validate() const
6400 {
6401  if((m_hMemory == VK_NULL_HANDLE) ||
6402  (m_Metadata.GetSize() == 0))
6403  {
6404  return false;
6405  }
6406 
6407  return m_Metadata.Validate();
6408 }
6409 
6410 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6411 {
6412  if(count == 0)
6413  {
6414  return VK_SUCCESS;
6415  }
6416 
6417  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6418  if(m_MapCount != 0)
6419  {
6420  m_MapCount += count;
6421  VMA_ASSERT(m_pMappedData != VMA_NULL);
6422  if(ppData != VMA_NULL)
6423  {
6424  *ppData = m_pMappedData;
6425  }
6426  return VK_SUCCESS;
6427  }
6428  else
6429  {
6430  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6431  hAllocator->m_hDevice,
6432  m_hMemory,
6433  0, // offset
6434  VK_WHOLE_SIZE,
6435  0, // flags
6436  &m_pMappedData);
6437  if(result == VK_SUCCESS)
6438  {
6439  if(ppData != VMA_NULL)
6440  {
6441  *ppData = m_pMappedData;
6442  }
6443  m_MapCount = count;
6444  }
6445  return result;
6446  }
6447 }
6448 
6449 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6450 {
6451  if(count == 0)
6452  {
6453  return;
6454  }
6455 
6456  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6457  if(m_MapCount >= count)
6458  {
6459  m_MapCount -= count;
6460  if(m_MapCount == 0)
6461  {
6462  m_pMappedData = VMA_NULL;
6463  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
6464  }
6465  }
6466  else
6467  {
6468  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6469  }
6470 }
6471 
6472 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
6473  const VmaAllocator hAllocator,
6474  const VmaAllocation hAllocation,
6475  VkBuffer hBuffer)
6476 {
6477  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6478  hAllocation->GetBlock() == this);
6479  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6480  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6481  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
6482  hAllocator->m_hDevice,
6483  hBuffer,
6484  m_hMemory,
6485  hAllocation->GetOffset());
6486 }
6487 
6488 VkResult VmaDeviceMemoryBlock::BindImageMemory(
6489  const VmaAllocator hAllocator,
6490  const VmaAllocation hAllocation,
6491  VkImage hImage)
6492 {
6493  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6494  hAllocation->GetBlock() == this);
6495  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6496  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6497  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
6498  hAllocator->m_hDevice,
6499  hImage,
6500  m_hMemory,
6501  hAllocation->GetOffset());
6502 }
6503 
6504 static void InitStatInfo(VmaStatInfo& outInfo)
6505 {
6506  memset(&outInfo, 0, sizeof(outInfo));
6507  outInfo.allocationSizeMin = UINT64_MAX;
6508  outInfo.unusedRangeSizeMin = UINT64_MAX;
6509 }
6510 
6511 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6512 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6513 {
6514  inoutInfo.blockCount += srcInfo.blockCount;
6515  inoutInfo.allocationCount += srcInfo.allocationCount;
6516  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6517  inoutInfo.usedBytes += srcInfo.usedBytes;
6518  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6519  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6520  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6521  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6522  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6523 }
6524 
6525 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6526 {
6527  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6528  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6529  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6530  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6531 }
6532 
6533 VmaPool_T::VmaPool_T(
6534  VmaAllocator hAllocator,
6535  const VmaPoolCreateInfo& createInfo) :
6536  m_BlockVector(
6537  hAllocator,
6538  createInfo.memoryTypeIndex,
6539  createInfo.blockSize,
6540  createInfo.minBlockCount,
6541  createInfo.maxBlockCount,
6542  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6543  createInfo.frameInUseCount,
6544  true) // isCustomPool
6545 {
6546 }
6547 
6548 VmaPool_T::~VmaPool_T()
6549 {
6550 }
6551 
6552 #if VMA_STATS_STRING_ENABLED
6553 
6554 #endif // #if VMA_STATS_STRING_ENABLED
6555 
6556 VmaBlockVector::VmaBlockVector(
6557  VmaAllocator hAllocator,
6558  uint32_t memoryTypeIndex,
6559  VkDeviceSize preferredBlockSize,
6560  size_t minBlockCount,
6561  size_t maxBlockCount,
6562  VkDeviceSize bufferImageGranularity,
6563  uint32_t frameInUseCount,
6564  bool isCustomPool) :
6565  m_hAllocator(hAllocator),
6566  m_MemoryTypeIndex(memoryTypeIndex),
6567  m_PreferredBlockSize(preferredBlockSize),
6568  m_MinBlockCount(minBlockCount),
6569  m_MaxBlockCount(maxBlockCount),
6570  m_BufferImageGranularity(bufferImageGranularity),
6571  m_FrameInUseCount(frameInUseCount),
6572  m_IsCustomPool(isCustomPool),
6573  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6574  m_HasEmptyBlock(false),
6575  m_pDefragmentator(VMA_NULL)
6576 {
6577 }
6578 
6579 VmaBlockVector::~VmaBlockVector()
6580 {
6581  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6582 
6583  for(size_t i = m_Blocks.size(); i--; )
6584  {
6585  m_Blocks[i]->Destroy(m_hAllocator);
6586  vma_delete(m_hAllocator, m_Blocks[i]);
6587  }
6588 }
6589 
6590 VkResult VmaBlockVector::CreateMinBlocks()
6591 {
6592  for(size_t i = 0; i < m_MinBlockCount; ++i)
6593  {
6594  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6595  if(res != VK_SUCCESS)
6596  {
6597  return res;
6598  }
6599  }
6600  return VK_SUCCESS;
6601 }
6602 
6603 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6604 {
6605  pStats->size = 0;
6606  pStats->unusedSize = 0;
6607  pStats->allocationCount = 0;
6608  pStats->unusedRangeCount = 0;
6609  pStats->unusedRangeSizeMax = 0;
6610 
6611  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6612 
6613  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6614  {
6615  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6616  VMA_ASSERT(pBlock);
6617  VMA_HEAVY_ASSERT(pBlock->Validate());
6618  pBlock->m_Metadata.AddPoolStats(*pStats);
6619  }
6620 }
6621 
6622 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6623 
6624 VkResult VmaBlockVector::Allocate(
6625  VmaPool hCurrentPool,
6626  uint32_t currentFrameIndex,
6627  const VkMemoryRequirements& vkMemReq,
6628  const VmaAllocationCreateInfo& createInfo,
6629  VmaSuballocationType suballocType,
6630  VmaAllocation* pAllocation)
6631 {
6632  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6633  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6634 
6635  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6636 
6637  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6638  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6639  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6640  {
6641  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6642  VMA_ASSERT(pCurrBlock);
6643  VmaAllocationRequest currRequest = {};
6644  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6645  currentFrameIndex,
6646  m_FrameInUseCount,
6647  m_BufferImageGranularity,
6648  vkMemReq.size,
6649  vkMemReq.alignment,
6650  suballocType,
6651  false, // canMakeOtherLost
6652  &currRequest))
6653  {
6654  // Allocate from pCurrBlock.
6655  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6656 
6657  if(mapped)
6658  {
6659  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6660  if(res != VK_SUCCESS)
6661  {
6662  return res;
6663  }
6664  }
6665 
6666  // We no longer have an empty Allocation.
6667  if(pCurrBlock->m_Metadata.IsEmpty())
6668  {
6669  m_HasEmptyBlock = false;
6670  }
6671 
6672  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6673  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6674  (*pAllocation)->InitBlockAllocation(
6675  hCurrentPool,
6676  pCurrBlock,
6677  currRequest.offset,
6678  vkMemReq.alignment,
6679  vkMemReq.size,
6680  suballocType,
6681  mapped,
6682  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6683  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6684  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6685  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6686  return VK_SUCCESS;
6687  }
6688  }
6689 
6690  const bool canCreateNewBlock =
6691  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6692  (m_Blocks.size() < m_MaxBlockCount);
6693 
6694  // 2. Try to create new block.
6695  if(canCreateNewBlock)
6696  {
6697  // Calculate optimal size for new block.
6698  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6699  uint32_t newBlockSizeShift = 0;
6700  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6701 
6702  // Allocating blocks of other sizes is allowed only in default pools.
6703  // In custom pools block size is fixed.
6704  if(m_IsCustomPool == false)
6705  {
6706  // Allocate 1/8, 1/4, 1/2 as first blocks.
6707  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6708  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6709  {
6710  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6711  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6712  {
6713  newBlockSize = smallerNewBlockSize;
6714  ++newBlockSizeShift;
6715  }
6716  else
6717  {
6718  break;
6719  }
6720  }
6721  }
6722 
6723  size_t newBlockIndex = 0;
6724  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6725  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6726  if(m_IsCustomPool == false)
6727  {
6728  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6729  {
6730  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6731  if(smallerNewBlockSize >= vkMemReq.size)
6732  {
6733  newBlockSize = smallerNewBlockSize;
6734  ++newBlockSizeShift;
6735  res = CreateBlock(newBlockSize, &newBlockIndex);
6736  }
6737  else
6738  {
6739  break;
6740  }
6741  }
6742  }
6743 
6744  if(res == VK_SUCCESS)
6745  {
6746  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6747  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6748 
6749  if(mapped)
6750  {
6751  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6752  if(res != VK_SUCCESS)
6753  {
6754  return res;
6755  }
6756  }
6757 
6758  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6759  VmaAllocationRequest allocRequest;
6760  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6761  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6762  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6763  (*pAllocation)->InitBlockAllocation(
6764  hCurrentPool,
6765  pBlock,
6766  allocRequest.offset,
6767  vkMemReq.alignment,
6768  vkMemReq.size,
6769  suballocType,
6770  mapped,
6771  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6772  VMA_HEAVY_ASSERT(pBlock->Validate());
6773  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6774  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6775  return VK_SUCCESS;
6776  }
6777  }
6778 
6779  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6780 
6781  // 3. Try to allocate from existing blocks with making other allocations lost.
6782  if(canMakeOtherLost)
6783  {
6784  uint32_t tryIndex = 0;
6785  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6786  {
6787  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6788  VmaAllocationRequest bestRequest = {};
6789  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6790 
6791  // 1. Search existing allocations.
6792  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6793  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6794  {
6795  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6796  VMA_ASSERT(pCurrBlock);
6797  VmaAllocationRequest currRequest = {};
6798  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6799  currentFrameIndex,
6800  m_FrameInUseCount,
6801  m_BufferImageGranularity,
6802  vkMemReq.size,
6803  vkMemReq.alignment,
6804  suballocType,
6805  canMakeOtherLost,
6806  &currRequest))
6807  {
6808  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6809  if(pBestRequestBlock == VMA_NULL ||
6810  currRequestCost < bestRequestCost)
6811  {
6812  pBestRequestBlock = pCurrBlock;
6813  bestRequest = currRequest;
6814  bestRequestCost = currRequestCost;
6815 
6816  if(bestRequestCost == 0)
6817  {
6818  break;
6819  }
6820  }
6821  }
6822  }
6823 
6824  if(pBestRequestBlock != VMA_NULL)
6825  {
6826  if(mapped)
6827  {
6828  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6829  if(res != VK_SUCCESS)
6830  {
6831  return res;
6832  }
6833  }
6834 
6835  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6836  currentFrameIndex,
6837  m_FrameInUseCount,
6838  &bestRequest))
6839  {
6840  // We no longer have an empty Allocation.
6841  if(pBestRequestBlock->m_Metadata.IsEmpty())
6842  {
6843  m_HasEmptyBlock = false;
6844  }
6845  // Allocate from this pBlock.
6846  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6847  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6848  (*pAllocation)->InitBlockAllocation(
6849  hCurrentPool,
6850  pBestRequestBlock,
6851  bestRequest.offset,
6852  vkMemReq.alignment,
6853  vkMemReq.size,
6854  suballocType,
6855  mapped,
6856  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6857  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6858  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6859  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6860  return VK_SUCCESS;
6861  }
6862  // else: Some allocations must have been touched while we are here. Next try.
6863  }
6864  else
6865  {
6866  // Could not find place in any of the blocks - break outer loop.
6867  break;
6868  }
6869  }
6870  /* Maximum number of tries exceeded - a very unlike event when many other
6871  threads are simultaneously touching allocations making it impossible to make
6872  lost at the same time as we try to allocate. */
6873  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6874  {
6875  return VK_ERROR_TOO_MANY_OBJECTS;
6876  }
6877  }
6878 
6879  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6880 }
6881 
6882 void VmaBlockVector::Free(
6883  VmaAllocation hAllocation)
6884 {
6885  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6886 
6887  // Scope for lock.
6888  {
6889  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6890 
6891  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6892 
6893  if(hAllocation->IsPersistentMap())
6894  {
6895  pBlock->Unmap(m_hAllocator, 1);
6896  }
6897 
6898  pBlock->m_Metadata.Free(hAllocation);
6899  VMA_HEAVY_ASSERT(pBlock->Validate());
6900 
6901  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6902 
6903  // pBlock became empty after this deallocation.
6904  if(pBlock->m_Metadata.IsEmpty())
6905  {
6906  // Already has empty Allocation. We don't want to have two, so delete this one.
6907  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6908  {
6909  pBlockToDelete = pBlock;
6910  Remove(pBlock);
6911  }
6912  // We now have first empty Allocation.
6913  else
6914  {
6915  m_HasEmptyBlock = true;
6916  }
6917  }
6918  // pBlock didn't become empty, but we have another empty block - find and free that one.
6919  // (This is optional, heuristics.)
6920  else if(m_HasEmptyBlock)
6921  {
6922  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6923  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6924  {
6925  pBlockToDelete = pLastBlock;
6926  m_Blocks.pop_back();
6927  m_HasEmptyBlock = false;
6928  }
6929  }
6930 
6931  IncrementallySortBlocks();
6932  }
6933 
6934  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6935  // lock, for performance reason.
6936  if(pBlockToDelete != VMA_NULL)
6937  {
6938  VMA_DEBUG_LOG(" Deleted empty allocation");
6939  pBlockToDelete->Destroy(m_hAllocator);
6940  vma_delete(m_hAllocator, pBlockToDelete);
6941  }
6942 }
6943 
6944 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
6945 {
6946  VkDeviceSize result = 0;
6947  for(size_t i = m_Blocks.size(); i--; )
6948  {
6949  result = VMA_MAX(result, m_Blocks[i]->m_Metadata.GetSize());
6950  if(result >= m_PreferredBlockSize)
6951  {
6952  break;
6953  }
6954  }
6955  return result;
6956 }
6957 
6958 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6959 {
6960  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6961  {
6962  if(m_Blocks[blockIndex] == pBlock)
6963  {
6964  VmaVectorRemove(m_Blocks, blockIndex);
6965  return;
6966  }
6967  }
6968  VMA_ASSERT(0);
6969 }
6970 
6971 void VmaBlockVector::IncrementallySortBlocks()
6972 {
6973  // Bubble sort only until first swap.
6974  for(size_t i = 1; i < m_Blocks.size(); ++i)
6975  {
6976  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6977  {
6978  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6979  return;
6980  }
6981  }
6982 }
6983 
6984 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6985 {
6986  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6987  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6988  allocInfo.allocationSize = blockSize;
6989  VkDeviceMemory mem = VK_NULL_HANDLE;
6990  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6991  if(res < 0)
6992  {
6993  return res;
6994  }
6995 
6996  // New VkDeviceMemory successfully created.
6997 
6998  // Create new Allocation for it.
6999  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
7000  pBlock->Init(
7001  m_MemoryTypeIndex,
7002  mem,
7003  allocInfo.allocationSize);
7004 
7005  m_Blocks.push_back(pBlock);
7006  if(pNewBlockIndex != VMA_NULL)
7007  {
7008  *pNewBlockIndex = m_Blocks.size() - 1;
7009  }
7010 
7011  return VK_SUCCESS;
7012 }
7013 
7014 #if VMA_STATS_STRING_ENABLED
7015 
7016 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
7017 {
7018  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7019 
7020  json.BeginObject();
7021 
7022  if(m_IsCustomPool)
7023  {
7024  json.WriteString("MemoryTypeIndex");
7025  json.WriteNumber(m_MemoryTypeIndex);
7026 
7027  json.WriteString("BlockSize");
7028  json.WriteNumber(m_PreferredBlockSize);
7029 
7030  json.WriteString("BlockCount");
7031  json.BeginObject(true);
7032  if(m_MinBlockCount > 0)
7033  {
7034  json.WriteString("Min");
7035  json.WriteNumber((uint64_t)m_MinBlockCount);
7036  }
7037  if(m_MaxBlockCount < SIZE_MAX)
7038  {
7039  json.WriteString("Max");
7040  json.WriteNumber((uint64_t)m_MaxBlockCount);
7041  }
7042  json.WriteString("Cur");
7043  json.WriteNumber((uint64_t)m_Blocks.size());
7044  json.EndObject();
7045 
7046  if(m_FrameInUseCount > 0)
7047  {
7048  json.WriteString("FrameInUseCount");
7049  json.WriteNumber(m_FrameInUseCount);
7050  }
7051  }
7052  else
7053  {
7054  json.WriteString("PreferredBlockSize");
7055  json.WriteNumber(m_PreferredBlockSize);
7056  }
7057 
7058  json.WriteString("Blocks");
7059  json.BeginArray();
7060  for(size_t i = 0; i < m_Blocks.size(); ++i)
7061  {
7062  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
7063  }
7064  json.EndArray();
7065 
7066  json.EndObject();
7067 }
7068 
7069 #endif // #if VMA_STATS_STRING_ENABLED
7070 
7071 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
7072  VmaAllocator hAllocator,
7073  uint32_t currentFrameIndex)
7074 {
7075  if(m_pDefragmentator == VMA_NULL)
7076  {
7077  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
7078  hAllocator,
7079  this,
7080  currentFrameIndex);
7081  }
7082 
7083  return m_pDefragmentator;
7084 }
7085 
7086 VkResult VmaBlockVector::Defragment(
7087  VmaDefragmentationStats* pDefragmentationStats,
7088  VkDeviceSize& maxBytesToMove,
7089  uint32_t& maxAllocationsToMove)
7090 {
7091  if(m_pDefragmentator == VMA_NULL)
7092  {
7093  return VK_SUCCESS;
7094  }
7095 
7096  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7097 
7098  // Defragment.
7099  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
7100 
7101  // Accumulate statistics.
7102  if(pDefragmentationStats != VMA_NULL)
7103  {
7104  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
7105  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
7106  pDefragmentationStats->bytesMoved += bytesMoved;
7107  pDefragmentationStats->allocationsMoved += allocationsMoved;
7108  VMA_ASSERT(bytesMoved <= maxBytesToMove);
7109  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
7110  maxBytesToMove -= bytesMoved;
7111  maxAllocationsToMove -= allocationsMoved;
7112  }
7113 
7114  // Free empty blocks.
7115  m_HasEmptyBlock = false;
7116  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
7117  {
7118  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
7119  if(pBlock->m_Metadata.IsEmpty())
7120  {
7121  if(m_Blocks.size() > m_MinBlockCount)
7122  {
7123  if(pDefragmentationStats != VMA_NULL)
7124  {
7125  ++pDefragmentationStats->deviceMemoryBlocksFreed;
7126  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
7127  }
7128 
7129  VmaVectorRemove(m_Blocks, blockIndex);
7130  pBlock->Destroy(m_hAllocator);
7131  vma_delete(m_hAllocator, pBlock);
7132  }
7133  else
7134  {
7135  m_HasEmptyBlock = true;
7136  }
7137  }
7138  }
7139 
7140  return result;
7141 }
7142 
7143 void VmaBlockVector::DestroyDefragmentator()
7144 {
7145  if(m_pDefragmentator != VMA_NULL)
7146  {
7147  vma_delete(m_hAllocator, m_pDefragmentator);
7148  m_pDefragmentator = VMA_NULL;
7149  }
7150 }
7151 
7152 void VmaBlockVector::MakePoolAllocationsLost(
7153  uint32_t currentFrameIndex,
7154  size_t* pLostAllocationCount)
7155 {
7156  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7157  size_t lostAllocationCount = 0;
7158  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7159  {
7160  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7161  VMA_ASSERT(pBlock);
7162  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
7163  }
7164  if(pLostAllocationCount != VMA_NULL)
7165  {
7166  *pLostAllocationCount = lostAllocationCount;
7167  }
7168 }
7169 
7170 void VmaBlockVector::AddStats(VmaStats* pStats)
7171 {
7172  const uint32_t memTypeIndex = m_MemoryTypeIndex;
7173  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
7174 
7175  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7176 
7177  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7178  {
7179  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7180  VMA_ASSERT(pBlock);
7181  VMA_HEAVY_ASSERT(pBlock->Validate());
7182  VmaStatInfo allocationStatInfo;
7183  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
7184  VmaAddStatInfo(pStats->total, allocationStatInfo);
7185  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7186  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7187  }
7188 }
7189 
7191 // VmaDefragmentator members definition
7192 
7193 VmaDefragmentator::VmaDefragmentator(
7194  VmaAllocator hAllocator,
7195  VmaBlockVector* pBlockVector,
7196  uint32_t currentFrameIndex) :
7197  m_hAllocator(hAllocator),
7198  m_pBlockVector(pBlockVector),
7199  m_CurrentFrameIndex(currentFrameIndex),
7200  m_BytesMoved(0),
7201  m_AllocationsMoved(0),
7202  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
7203  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
7204 {
7205 }
7206 
7207 VmaDefragmentator::~VmaDefragmentator()
7208 {
7209  for(size_t i = m_Blocks.size(); i--; )
7210  {
7211  vma_delete(m_hAllocator, m_Blocks[i]);
7212  }
7213 }
7214 
7215 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
7216 {
7217  AllocationInfo allocInfo;
7218  allocInfo.m_hAllocation = hAlloc;
7219  allocInfo.m_pChanged = pChanged;
7220  m_Allocations.push_back(allocInfo);
7221 }
7222 
7223 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
7224 {
7225  // It has already been mapped for defragmentation.
7226  if(m_pMappedDataForDefragmentation)
7227  {
7228  *ppMappedData = m_pMappedDataForDefragmentation;
7229  return VK_SUCCESS;
7230  }
7231 
7232  // It is originally mapped.
7233  if(m_pBlock->GetMappedData())
7234  {
7235  *ppMappedData = m_pBlock->GetMappedData();
7236  return VK_SUCCESS;
7237  }
7238 
7239  // Map on first usage.
7240  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
7241  *ppMappedData = m_pMappedDataForDefragmentation;
7242  return res;
7243 }
7244 
7245 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
7246 {
7247  if(m_pMappedDataForDefragmentation != VMA_NULL)
7248  {
7249  m_pBlock->Unmap(hAllocator, 1);
7250  }
7251 }
7252 
7253 VkResult VmaDefragmentator::DefragmentRound(
7254  VkDeviceSize maxBytesToMove,
7255  uint32_t maxAllocationsToMove)
7256 {
7257  if(m_Blocks.empty())
7258  {
7259  return VK_SUCCESS;
7260  }
7261 
7262  size_t srcBlockIndex = m_Blocks.size() - 1;
7263  size_t srcAllocIndex = SIZE_MAX;
7264  for(;;)
7265  {
7266  // 1. Find next allocation to move.
7267  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
7268  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
7269  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
7270  {
7271  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
7272  {
7273  // Finished: no more allocations to process.
7274  if(srcBlockIndex == 0)
7275  {
7276  return VK_SUCCESS;
7277  }
7278  else
7279  {
7280  --srcBlockIndex;
7281  srcAllocIndex = SIZE_MAX;
7282  }
7283  }
7284  else
7285  {
7286  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7287  }
7288  }
7289 
7290  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7291  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7292 
7293  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7294  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7295  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7296  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7297 
7298  // 2. Try to find new place for this allocation in preceding or current block.
7299  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7300  {
7301  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7302  VmaAllocationRequest dstAllocRequest;
7303  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7304  m_CurrentFrameIndex,
7305  m_pBlockVector->GetFrameInUseCount(),
7306  m_pBlockVector->GetBufferImageGranularity(),
7307  size,
7308  alignment,
7309  suballocType,
7310  false, // canMakeOtherLost
7311  &dstAllocRequest) &&
7312  MoveMakesSense(
7313  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7314  {
7315  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7316 
7317  // Reached limit on number of allocations or bytes to move.
7318  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7319  (m_BytesMoved + size > maxBytesToMove))
7320  {
7321  return VK_INCOMPLETE;
7322  }
7323 
7324  void* pDstMappedData = VMA_NULL;
7325  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7326  if(res != VK_SUCCESS)
7327  {
7328  return res;
7329  }
7330 
7331  void* pSrcMappedData = VMA_NULL;
7332  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7333  if(res != VK_SUCCESS)
7334  {
7335  return res;
7336  }
7337 
7338  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7339  memcpy(
7340  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7341  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7342  static_cast<size_t>(size));
7343 
7344  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7345  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7346 
7347  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7348 
7349  if(allocInfo.m_pChanged != VMA_NULL)
7350  {
7351  *allocInfo.m_pChanged = VK_TRUE;
7352  }
7353 
7354  ++m_AllocationsMoved;
7355  m_BytesMoved += size;
7356 
7357  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7358 
7359  break;
7360  }
7361  }
7362 
7363  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7364 
7365  if(srcAllocIndex > 0)
7366  {
7367  --srcAllocIndex;
7368  }
7369  else
7370  {
7371  if(srcBlockIndex > 0)
7372  {
7373  --srcBlockIndex;
7374  srcAllocIndex = SIZE_MAX;
7375  }
7376  else
7377  {
7378  return VK_SUCCESS;
7379  }
7380  }
7381  }
7382 }
7383 
7384 VkResult VmaDefragmentator::Defragment(
7385  VkDeviceSize maxBytesToMove,
7386  uint32_t maxAllocationsToMove)
7387 {
7388  if(m_Allocations.empty())
7389  {
7390  return VK_SUCCESS;
7391  }
7392 
7393  // Create block info for each block.
7394  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7395  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7396  {
7397  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7398  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7399  m_Blocks.push_back(pBlockInfo);
7400  }
7401 
7402  // Sort them by m_pBlock pointer value.
7403  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7404 
7405  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7406  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7407  {
7408  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7409  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7410  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7411  {
7412  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7413  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7414  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7415  {
7416  (*it)->m_Allocations.push_back(allocInfo);
7417  }
7418  else
7419  {
7420  VMA_ASSERT(0);
7421  }
7422  }
7423  }
7424  m_Allocations.clear();
7425 
7426  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7427  {
7428  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7429  pBlockInfo->CalcHasNonMovableAllocations();
7430  pBlockInfo->SortAllocationsBySizeDescecnding();
7431  }
7432 
7433  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7434  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7435 
7436  // Execute defragmentation rounds (the main part).
7437  VkResult result = VK_SUCCESS;
7438  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7439  {
7440  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7441  }
7442 
7443  // Unmap blocks that were mapped for defragmentation.
7444  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7445  {
7446  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7447  }
7448 
7449  return result;
7450 }
7451 
7452 bool VmaDefragmentator::MoveMakesSense(
7453  size_t dstBlockIndex, VkDeviceSize dstOffset,
7454  size_t srcBlockIndex, VkDeviceSize srcOffset)
7455 {
7456  if(dstBlockIndex < srcBlockIndex)
7457  {
7458  return true;
7459  }
7460  if(dstBlockIndex > srcBlockIndex)
7461  {
7462  return false;
7463  }
7464  if(dstOffset < srcOffset)
7465  {
7466  return true;
7467  }
7468  return false;
7469 }
7470 
7472 // VmaAllocator_T
7473 
7474 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7475  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7476  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7477  m_hDevice(pCreateInfo->device),
7478  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7479  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7480  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7481  m_PreferredLargeHeapBlockSize(0),
7482  m_PhysicalDevice(pCreateInfo->physicalDevice),
7483  m_CurrentFrameIndex(0),
7484  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7485 {
7486  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7487 
7488 #if !(VMA_DEDICATED_ALLOCATION)
7490  {
7491  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
7492  }
7493 #endif
7494 
7495  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7496  memset(&m_MemProps, 0, sizeof(m_MemProps));
7497  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7498 
7499  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7500  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7501 
7502  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7503  {
7504  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7505  }
7506 
7507  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7508  {
7509  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7510  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7511  }
7512 
7513  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7514 
7515  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7516  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7517 
7518  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7519  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7520 
7521  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7522  {
7523  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7524  {
7525  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7526  if(limit != VK_WHOLE_SIZE)
7527  {
7528  m_HeapSizeLimit[heapIndex] = limit;
7529  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7530  {
7531  m_MemProps.memoryHeaps[heapIndex].size = limit;
7532  }
7533  }
7534  }
7535  }
7536 
7537  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7538  {
7539  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7540 
7541  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7542  this,
7543  memTypeIndex,
7544  preferredBlockSize,
7545  0,
7546  SIZE_MAX,
7547  GetBufferImageGranularity(),
7548  pCreateInfo->frameInUseCount,
7549  false); // isCustomPool
7550  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7551  // becase minBlockCount is 0.
7552  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7553  }
7554 }
7555 
7556 VmaAllocator_T::~VmaAllocator_T()
7557 {
7558  VMA_ASSERT(m_Pools.empty());
7559 
7560  for(size_t i = GetMemoryTypeCount(); i--; )
7561  {
7562  vma_delete(this, m_pDedicatedAllocations[i]);
7563  vma_delete(this, m_pBlockVectors[i]);
7564  }
7565 }
7566 
7567 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7568 {
7569 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7570  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7571  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7572  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7573  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7574  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7575  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7576  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7577  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7578  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7579  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7580  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7581  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7582  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7583  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7584 #if VMA_DEDICATED_ALLOCATION
7585  if(m_UseKhrDedicatedAllocation)
7586  {
7587  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7588  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7589  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7590  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7591  }
7592 #endif // #if VMA_DEDICATED_ALLOCATION
7593 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7594 
7595 #define VMA_COPY_IF_NOT_NULL(funcName) \
7596  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7597 
7598  if(pVulkanFunctions != VMA_NULL)
7599  {
7600  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7601  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7602  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7603  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7604  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7605  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7606  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7607  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7608  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7609  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7610  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7611  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7612  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7613  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7614 #if VMA_DEDICATED_ALLOCATION
7615  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7616  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7617 #endif
7618  }
7619 
7620 #undef VMA_COPY_IF_NOT_NULL
7621 
7622  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7623  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7624  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7625  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7626  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7627  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7628  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7629  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7630  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7631  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7632  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7633  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7634  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7635  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7636  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7637  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7638 #if VMA_DEDICATED_ALLOCATION
7639  if(m_UseKhrDedicatedAllocation)
7640  {
7641  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7642  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7643  }
7644 #endif
7645 }
7646 
7647 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7648 {
7649  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7650  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7651  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7652  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7653 }
7654 
7655 VkResult VmaAllocator_T::AllocateMemoryOfType(
7656  const VkMemoryRequirements& vkMemReq,
7657  bool dedicatedAllocation,
7658  VkBuffer dedicatedBuffer,
7659  VkImage dedicatedImage,
7660  const VmaAllocationCreateInfo& createInfo,
7661  uint32_t memTypeIndex,
7662  VmaSuballocationType suballocType,
7663  VmaAllocation* pAllocation)
7664 {
7665  VMA_ASSERT(pAllocation != VMA_NULL);
7666  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7667 
7668  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7669 
7670  // If memory type is not HOST_VISIBLE, disable MAPPED.
7671  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7672  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7673  {
7674  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7675  }
7676 
7677  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7678  VMA_ASSERT(blockVector);
7679 
7680  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7681  bool preferDedicatedMemory =
7682  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7683  dedicatedAllocation ||
7684  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7685  vkMemReq.size > preferredBlockSize / 2;
7686 
7687  if(preferDedicatedMemory &&
7688  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7689  finalCreateInfo.pool == VK_NULL_HANDLE)
7690  {
7692  }
7693 
7694  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7695  {
7696  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7697  {
7698  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7699  }
7700  else
7701  {
7702  return AllocateDedicatedMemory(
7703  vkMemReq.size,
7704  suballocType,
7705  memTypeIndex,
7706  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7707  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7708  finalCreateInfo.pUserData,
7709  dedicatedBuffer,
7710  dedicatedImage,
7711  pAllocation);
7712  }
7713  }
7714  else
7715  {
7716  VkResult res = blockVector->Allocate(
7717  VK_NULL_HANDLE, // hCurrentPool
7718  m_CurrentFrameIndex.load(),
7719  vkMemReq,
7720  finalCreateInfo,
7721  suballocType,
7722  pAllocation);
7723  if(res == VK_SUCCESS)
7724  {
7725  return res;
7726  }
7727 
7728  // 5. Try dedicated memory.
7729  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7730  {
7731  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7732  }
7733  else
7734  {
7735  res = AllocateDedicatedMemory(
7736  vkMemReq.size,
7737  suballocType,
7738  memTypeIndex,
7739  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7740  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7741  finalCreateInfo.pUserData,
7742  dedicatedBuffer,
7743  dedicatedImage,
7744  pAllocation);
7745  if(res == VK_SUCCESS)
7746  {
7747  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7748  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7749  return VK_SUCCESS;
7750  }
7751  else
7752  {
7753  // Everything failed: Return error code.
7754  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7755  return res;
7756  }
7757  }
7758  }
7759 }
7760 
7761 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7762  VkDeviceSize size,
7763  VmaSuballocationType suballocType,
7764  uint32_t memTypeIndex,
7765  bool map,
7766  bool isUserDataString,
7767  void* pUserData,
7768  VkBuffer dedicatedBuffer,
7769  VkImage dedicatedImage,
7770  VmaAllocation* pAllocation)
7771 {
7772  VMA_ASSERT(pAllocation);
7773 
7774  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7775  allocInfo.memoryTypeIndex = memTypeIndex;
7776  allocInfo.allocationSize = size;
7777 
7778 #if VMA_DEDICATED_ALLOCATION
7779  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7780  if(m_UseKhrDedicatedAllocation)
7781  {
7782  if(dedicatedBuffer != VK_NULL_HANDLE)
7783  {
7784  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7785  dedicatedAllocInfo.buffer = dedicatedBuffer;
7786  allocInfo.pNext = &dedicatedAllocInfo;
7787  }
7788  else if(dedicatedImage != VK_NULL_HANDLE)
7789  {
7790  dedicatedAllocInfo.image = dedicatedImage;
7791  allocInfo.pNext = &dedicatedAllocInfo;
7792  }
7793  }
7794 #endif // #if VMA_DEDICATED_ALLOCATION
7795 
7796  // Allocate VkDeviceMemory.
7797  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7798  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7799  if(res < 0)
7800  {
7801  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7802  return res;
7803  }
7804 
7805  void* pMappedData = VMA_NULL;
7806  if(map)
7807  {
7808  res = (*m_VulkanFunctions.vkMapMemory)(
7809  m_hDevice,
7810  hMemory,
7811  0,
7812  VK_WHOLE_SIZE,
7813  0,
7814  &pMappedData);
7815  if(res < 0)
7816  {
7817  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7818  FreeVulkanMemory(memTypeIndex, size, hMemory);
7819  return res;
7820  }
7821  }
7822 
7823  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7824  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7825  (*pAllocation)->SetUserData(this, pUserData);
7826 
7827  // Register it in m_pDedicatedAllocations.
7828  {
7829  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7830  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7831  VMA_ASSERT(pDedicatedAllocations);
7832  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7833  }
7834 
7835  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7836 
7837  return VK_SUCCESS;
7838 }
7839 
7840 void VmaAllocator_T::GetBufferMemoryRequirements(
7841  VkBuffer hBuffer,
7842  VkMemoryRequirements& memReq,
7843  bool& requiresDedicatedAllocation,
7844  bool& prefersDedicatedAllocation) const
7845 {
7846 #if VMA_DEDICATED_ALLOCATION
7847  if(m_UseKhrDedicatedAllocation)
7848  {
7849  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7850  memReqInfo.buffer = hBuffer;
7851 
7852  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7853 
7854  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7855  memReq2.pNext = &memDedicatedReq;
7856 
7857  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7858 
7859  memReq = memReq2.memoryRequirements;
7860  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7861  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7862  }
7863  else
7864 #endif // #if VMA_DEDICATED_ALLOCATION
7865  {
7866  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7867  requiresDedicatedAllocation = false;
7868  prefersDedicatedAllocation = false;
7869  }
7870 }
7871 
7872 void VmaAllocator_T::GetImageMemoryRequirements(
7873  VkImage hImage,
7874  VkMemoryRequirements& memReq,
7875  bool& requiresDedicatedAllocation,
7876  bool& prefersDedicatedAllocation) const
7877 {
7878 #if VMA_DEDICATED_ALLOCATION
7879  if(m_UseKhrDedicatedAllocation)
7880  {
7881  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7882  memReqInfo.image = hImage;
7883 
7884  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7885 
7886  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7887  memReq2.pNext = &memDedicatedReq;
7888 
7889  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7890 
7891  memReq = memReq2.memoryRequirements;
7892  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7893  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7894  }
7895  else
7896 #endif // #if VMA_DEDICATED_ALLOCATION
7897  {
7898  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7899  requiresDedicatedAllocation = false;
7900  prefersDedicatedAllocation = false;
7901  }
7902 }
7903 
7904 VkResult VmaAllocator_T::AllocateMemory(
7905  const VkMemoryRequirements& vkMemReq,
7906  bool requiresDedicatedAllocation,
7907  bool prefersDedicatedAllocation,
7908  VkBuffer dedicatedBuffer,
7909  VkImage dedicatedImage,
7910  const VmaAllocationCreateInfo& createInfo,
7911  VmaSuballocationType suballocType,
7912  VmaAllocation* pAllocation)
7913 {
7914  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7915  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7916  {
7917  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7918  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7919  }
7920  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7922  {
7923  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7924  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7925  }
7926  if(requiresDedicatedAllocation)
7927  {
7928  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7929  {
7930  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7931  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7932  }
7933  if(createInfo.pool != VK_NULL_HANDLE)
7934  {
7935  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7936  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7937  }
7938  }
7939  if((createInfo.pool != VK_NULL_HANDLE) &&
7940  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7941  {
7942  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7943  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7944  }
7945 
7946  if(createInfo.pool != VK_NULL_HANDLE)
7947  {
7948  return createInfo.pool->m_BlockVector.Allocate(
7949  createInfo.pool,
7950  m_CurrentFrameIndex.load(),
7951  vkMemReq,
7952  createInfo,
7953  suballocType,
7954  pAllocation);
7955  }
7956  else
7957  {
7958  // Bit mask of memory Vulkan types acceptable for this allocation.
7959  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7960  uint32_t memTypeIndex = UINT32_MAX;
7961  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7962  if(res == VK_SUCCESS)
7963  {
7964  res = AllocateMemoryOfType(
7965  vkMemReq,
7966  requiresDedicatedAllocation || prefersDedicatedAllocation,
7967  dedicatedBuffer,
7968  dedicatedImage,
7969  createInfo,
7970  memTypeIndex,
7971  suballocType,
7972  pAllocation);
7973  // Succeeded on first try.
7974  if(res == VK_SUCCESS)
7975  {
7976  return res;
7977  }
7978  // Allocation from this memory type failed. Try other compatible memory types.
7979  else
7980  {
7981  for(;;)
7982  {
7983  // Remove old memTypeIndex from list of possibilities.
7984  memoryTypeBits &= ~(1u << memTypeIndex);
7985  // Find alternative memTypeIndex.
7986  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7987  if(res == VK_SUCCESS)
7988  {
7989  res = AllocateMemoryOfType(
7990  vkMemReq,
7991  requiresDedicatedAllocation || prefersDedicatedAllocation,
7992  dedicatedBuffer,
7993  dedicatedImage,
7994  createInfo,
7995  memTypeIndex,
7996  suballocType,
7997  pAllocation);
7998  // Allocation from this alternative memory type succeeded.
7999  if(res == VK_SUCCESS)
8000  {
8001  return res;
8002  }
8003  // else: Allocation from this memory type failed. Try next one - next loop iteration.
8004  }
8005  // No other matching memory type index could be found.
8006  else
8007  {
8008  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
8009  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
8010  }
8011  }
8012  }
8013  }
8014  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
8015  else
8016  return res;
8017  }
8018 }
8019 
8020 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
8021 {
8022  VMA_ASSERT(allocation);
8023 
8024  if(allocation->CanBecomeLost() == false ||
8025  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
8026  {
8027  switch(allocation->GetType())
8028  {
8029  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8030  {
8031  VmaBlockVector* pBlockVector = VMA_NULL;
8032  VmaPool hPool = allocation->GetPool();
8033  if(hPool != VK_NULL_HANDLE)
8034  {
8035  pBlockVector = &hPool->m_BlockVector;
8036  }
8037  else
8038  {
8039  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8040  pBlockVector = m_pBlockVectors[memTypeIndex];
8041  }
8042  pBlockVector->Free(allocation);
8043  }
8044  break;
8045  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8046  FreeDedicatedMemory(allocation);
8047  break;
8048  default:
8049  VMA_ASSERT(0);
8050  }
8051  }
8052 
8053  allocation->SetUserData(this, VMA_NULL);
8054  vma_delete(this, allocation);
8055 }
8056 
8057 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
8058 {
8059  // Initialize.
8060  InitStatInfo(pStats->total);
8061  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
8062  InitStatInfo(pStats->memoryType[i]);
8063  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
8064  InitStatInfo(pStats->memoryHeap[i]);
8065 
8066  // Process default pools.
8067  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8068  {
8069  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
8070  VMA_ASSERT(pBlockVector);
8071  pBlockVector->AddStats(pStats);
8072  }
8073 
8074  // Process custom pools.
8075  {
8076  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8077  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
8078  {
8079  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
8080  }
8081  }
8082 
8083  // Process dedicated allocations.
8084  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8085  {
8086  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
8087  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8088  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8089  VMA_ASSERT(pDedicatedAllocVector);
8090  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
8091  {
8092  VmaStatInfo allocationStatInfo;
8093  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
8094  VmaAddStatInfo(pStats->total, allocationStatInfo);
8095  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
8096  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
8097  }
8098  }
8099 
8100  // Postprocess.
8101  VmaPostprocessCalcStatInfo(pStats->total);
8102  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
8103  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
8104  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
8105  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
8106 }
8107 
8108 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
8109 
8110 VkResult VmaAllocator_T::Defragment(
8111  VmaAllocation* pAllocations,
8112  size_t allocationCount,
8113  VkBool32* pAllocationsChanged,
8114  const VmaDefragmentationInfo* pDefragmentationInfo,
8115  VmaDefragmentationStats* pDefragmentationStats)
8116 {
8117  if(pAllocationsChanged != VMA_NULL)
8118  {
8119  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
8120  }
8121  if(pDefragmentationStats != VMA_NULL)
8122  {
8123  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
8124  }
8125 
8126  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
8127 
8128  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
8129 
8130  const size_t poolCount = m_Pools.size();
8131 
8132  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
8133  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8134  {
8135  VmaAllocation hAlloc = pAllocations[allocIndex];
8136  VMA_ASSERT(hAlloc);
8137  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
8138  // DedicatedAlloc cannot be defragmented.
8139  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
8140  // Only HOST_VISIBLE memory types can be defragmented.
8141  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
8142  // Lost allocation cannot be defragmented.
8143  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
8144  {
8145  VmaBlockVector* pAllocBlockVector = VMA_NULL;
8146 
8147  const VmaPool hAllocPool = hAlloc->GetPool();
8148  // This allocation belongs to custom pool.
8149  if(hAllocPool != VK_NULL_HANDLE)
8150  {
8151  pAllocBlockVector = &hAllocPool->GetBlockVector();
8152  }
8153  // This allocation belongs to general pool.
8154  else
8155  {
8156  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
8157  }
8158 
8159  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
8160 
8161  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
8162  &pAllocationsChanged[allocIndex] : VMA_NULL;
8163  pDefragmentator->AddAllocation(hAlloc, pChanged);
8164  }
8165  }
8166 
8167  VkResult result = VK_SUCCESS;
8168 
8169  // ======== Main processing.
8170 
8171  VkDeviceSize maxBytesToMove = SIZE_MAX;
8172  uint32_t maxAllocationsToMove = UINT32_MAX;
8173  if(pDefragmentationInfo != VMA_NULL)
8174  {
8175  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
8176  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
8177  }
8178 
8179  // Process standard memory.
8180  for(uint32_t memTypeIndex = 0;
8181  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
8182  ++memTypeIndex)
8183  {
8184  // Only HOST_VISIBLE memory types can be defragmented.
8185  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8186  {
8187  result = m_pBlockVectors[memTypeIndex]->Defragment(
8188  pDefragmentationStats,
8189  maxBytesToMove,
8190  maxAllocationsToMove);
8191  }
8192  }
8193 
8194  // Process custom pools.
8195  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
8196  {
8197  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
8198  pDefragmentationStats,
8199  maxBytesToMove,
8200  maxAllocationsToMove);
8201  }
8202 
8203  // ======== Destroy defragmentators.
8204 
8205  // Process custom pools.
8206  for(size_t poolIndex = poolCount; poolIndex--; )
8207  {
8208  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
8209  }
8210 
8211  // Process standard memory.
8212  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
8213  {
8214  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8215  {
8216  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
8217  }
8218  }
8219 
8220  return result;
8221 }
8222 
8223 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
8224 {
8225  if(hAllocation->CanBecomeLost())
8226  {
8227  /*
8228  Warning: This is a carefully designed algorithm.
8229  Do not modify unless you really know what you're doing :)
8230  */
8231  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8232  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8233  for(;;)
8234  {
8235  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8236  {
8237  pAllocationInfo->memoryType = UINT32_MAX;
8238  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
8239  pAllocationInfo->offset = 0;
8240  pAllocationInfo->size = hAllocation->GetSize();
8241  pAllocationInfo->pMappedData = VMA_NULL;
8242  pAllocationInfo->pUserData = hAllocation->GetUserData();
8243  return;
8244  }
8245  else if(localLastUseFrameIndex == localCurrFrameIndex)
8246  {
8247  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8248  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8249  pAllocationInfo->offset = hAllocation->GetOffset();
8250  pAllocationInfo->size = hAllocation->GetSize();
8251  pAllocationInfo->pMappedData = VMA_NULL;
8252  pAllocationInfo->pUserData = hAllocation->GetUserData();
8253  return;
8254  }
8255  else // Last use time earlier than current time.
8256  {
8257  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8258  {
8259  localLastUseFrameIndex = localCurrFrameIndex;
8260  }
8261  }
8262  }
8263  }
8264  else
8265  {
8266 #if VMA_STATS_STRING_ENABLED
8267  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8268  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8269  for(;;)
8270  {
8271  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
8272  if(localLastUseFrameIndex == localCurrFrameIndex)
8273  {
8274  break;
8275  }
8276  else // Last use time earlier than current time.
8277  {
8278  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8279  {
8280  localLastUseFrameIndex = localCurrFrameIndex;
8281  }
8282  }
8283  }
8284 #endif
8285 
8286  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8287  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8288  pAllocationInfo->offset = hAllocation->GetOffset();
8289  pAllocationInfo->size = hAllocation->GetSize();
8290  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
8291  pAllocationInfo->pUserData = hAllocation->GetUserData();
8292  }
8293 }
8294 
8295 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
8296 {
8297  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
8298  if(hAllocation->CanBecomeLost())
8299  {
8300  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8301  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8302  for(;;)
8303  {
8304  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8305  {
8306  return false;
8307  }
8308  else if(localLastUseFrameIndex == localCurrFrameIndex)
8309  {
8310  return true;
8311  }
8312  else // Last use time earlier than current time.
8313  {
8314  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8315  {
8316  localLastUseFrameIndex = localCurrFrameIndex;
8317  }
8318  }
8319  }
8320  }
8321  else
8322  {
8323 #if VMA_STATS_STRING_ENABLED
8324  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8325  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8326  for(;;)
8327  {
8328  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
8329  if(localLastUseFrameIndex == localCurrFrameIndex)
8330  {
8331  break;
8332  }
8333  else // Last use time earlier than current time.
8334  {
8335  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8336  {
8337  localLastUseFrameIndex = localCurrFrameIndex;
8338  }
8339  }
8340  }
8341 #endif
8342 
8343  return true;
8344  }
8345 }
8346 
8347 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8348 {
8349  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8350 
8351  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8352 
8353  if(newCreateInfo.maxBlockCount == 0)
8354  {
8355  newCreateInfo.maxBlockCount = SIZE_MAX;
8356  }
8357  if(newCreateInfo.blockSize == 0)
8358  {
8359  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8360  }
8361 
8362  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8363 
8364  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8365  if(res != VK_SUCCESS)
8366  {
8367  vma_delete(this, *pPool);
8368  *pPool = VMA_NULL;
8369  return res;
8370  }
8371 
8372  // Add to m_Pools.
8373  {
8374  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8375  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8376  }
8377 
8378  return VK_SUCCESS;
8379 }
8380 
8381 void VmaAllocator_T::DestroyPool(VmaPool pool)
8382 {
8383  // Remove from m_Pools.
8384  {
8385  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8386  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8387  VMA_ASSERT(success && "Pool not found in Allocator.");
8388  }
8389 
8390  vma_delete(this, pool);
8391 }
8392 
8393 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8394 {
8395  pool->m_BlockVector.GetPoolStats(pPoolStats);
8396 }
8397 
8398 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8399 {
8400  m_CurrentFrameIndex.store(frameIndex);
8401 }
8402 
8403 void VmaAllocator_T::MakePoolAllocationsLost(
8404  VmaPool hPool,
8405  size_t* pLostAllocationCount)
8406 {
8407  hPool->m_BlockVector.MakePoolAllocationsLost(
8408  m_CurrentFrameIndex.load(),
8409  pLostAllocationCount);
8410 }
8411 
8412 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8413 {
8414  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8415  (*pAllocation)->InitLost();
8416 }
8417 
8418 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8419 {
8420  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8421 
8422  VkResult res;
8423  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8424  {
8425  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8426  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8427  {
8428  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8429  if(res == VK_SUCCESS)
8430  {
8431  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8432  }
8433  }
8434  else
8435  {
8436  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8437  }
8438  }
8439  else
8440  {
8441  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8442  }
8443 
8444  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8445  {
8446  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8447  }
8448 
8449  return res;
8450 }
8451 
8452 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8453 {
8454  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8455  {
8456  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8457  }
8458 
8459  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8460 
8461  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8462  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8463  {
8464  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8465  m_HeapSizeLimit[heapIndex] += size;
8466  }
8467 }
8468 
8469 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8470 {
8471  if(hAllocation->CanBecomeLost())
8472  {
8473  return VK_ERROR_MEMORY_MAP_FAILED;
8474  }
8475 
8476  switch(hAllocation->GetType())
8477  {
8478  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8479  {
8480  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8481  char *pBytes = VMA_NULL;
8482  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8483  if(res == VK_SUCCESS)
8484  {
8485  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8486  hAllocation->BlockAllocMap();
8487  }
8488  return res;
8489  }
8490  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8491  return hAllocation->DedicatedAllocMap(this, ppData);
8492  default:
8493  VMA_ASSERT(0);
8494  return VK_ERROR_MEMORY_MAP_FAILED;
8495  }
8496 }
8497 
8498 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8499 {
8500  switch(hAllocation->GetType())
8501  {
8502  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8503  {
8504  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8505  hAllocation->BlockAllocUnmap();
8506  pBlock->Unmap(this, 1);
8507  }
8508  break;
8509  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8510  hAllocation->DedicatedAllocUnmap(this);
8511  break;
8512  default:
8513  VMA_ASSERT(0);
8514  }
8515 }
8516 
8517 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
8518 {
8519  VkResult res = VK_SUCCESS;
8520  switch(hAllocation->GetType())
8521  {
8522  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8523  res = GetVulkanFunctions().vkBindBufferMemory(
8524  m_hDevice,
8525  hBuffer,
8526  hAllocation->GetMemory(),
8527  0); //memoryOffset
8528  break;
8529  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8530  {
8531  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8532  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
8533  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
8534  break;
8535  }
8536  default:
8537  VMA_ASSERT(0);
8538  }
8539  return res;
8540 }
8541 
8542 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
8543 {
8544  VkResult res = VK_SUCCESS;
8545  switch(hAllocation->GetType())
8546  {
8547  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8548  res = GetVulkanFunctions().vkBindImageMemory(
8549  m_hDevice,
8550  hImage,
8551  hAllocation->GetMemory(),
8552  0); //memoryOffset
8553  break;
8554  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8555  {
8556  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8557  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
8558  res = pBlock->BindImageMemory(this, hAllocation, hImage);
8559  break;
8560  }
8561  default:
8562  VMA_ASSERT(0);
8563  }
8564  return res;
8565 }
8566 
8567 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8568 {
8569  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8570 
8571  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8572  {
8573  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8574  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8575  VMA_ASSERT(pDedicatedAllocations);
8576  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8577  VMA_ASSERT(success);
8578  }
8579 
8580  VkDeviceMemory hMemory = allocation->GetMemory();
8581 
8582  if(allocation->GetMappedData() != VMA_NULL)
8583  {
8584  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8585  }
8586 
8587  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8588 
8589  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8590 }
8591 
8592 #if VMA_STATS_STRING_ENABLED
8593 
8594 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8595 {
8596  bool dedicatedAllocationsStarted = false;
8597  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8598  {
8599  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8600  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8601  VMA_ASSERT(pDedicatedAllocVector);
8602  if(pDedicatedAllocVector->empty() == false)
8603  {
8604  if(dedicatedAllocationsStarted == false)
8605  {
8606  dedicatedAllocationsStarted = true;
8607  json.WriteString("DedicatedAllocations");
8608  json.BeginObject();
8609  }
8610 
8611  json.BeginString("Type ");
8612  json.ContinueString(memTypeIndex);
8613  json.EndString();
8614 
8615  json.BeginArray();
8616 
8617  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8618  {
8619  json.BeginObject(true);
8620  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8621  hAlloc->PrintParameters(json);
8622  json.EndObject();
8623  }
8624 
8625  json.EndArray();
8626  }
8627  }
8628  if(dedicatedAllocationsStarted)
8629  {
8630  json.EndObject();
8631  }
8632 
8633  {
8634  bool allocationsStarted = false;
8635  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8636  {
8637  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8638  {
8639  if(allocationsStarted == false)
8640  {
8641  allocationsStarted = true;
8642  json.WriteString("DefaultPools");
8643  json.BeginObject();
8644  }
8645 
8646  json.BeginString("Type ");
8647  json.ContinueString(memTypeIndex);
8648  json.EndString();
8649 
8650  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8651  }
8652  }
8653  if(allocationsStarted)
8654  {
8655  json.EndObject();
8656  }
8657  }
8658 
8659  {
8660  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8661  const size_t poolCount = m_Pools.size();
8662  if(poolCount > 0)
8663  {
8664  json.WriteString("Pools");
8665  json.BeginArray();
8666  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8667  {
8668  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8669  }
8670  json.EndArray();
8671  }
8672  }
8673 }
8674 
8675 #endif // #if VMA_STATS_STRING_ENABLED
8676 
8677 static VkResult AllocateMemoryForImage(
8678  VmaAllocator allocator,
8679  VkImage image,
8680  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8681  VmaSuballocationType suballocType,
8682  VmaAllocation* pAllocation)
8683 {
8684  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8685 
8686  VkMemoryRequirements vkMemReq = {};
8687  bool requiresDedicatedAllocation = false;
8688  bool prefersDedicatedAllocation = false;
8689  allocator->GetImageMemoryRequirements(image, vkMemReq,
8690  requiresDedicatedAllocation, prefersDedicatedAllocation);
8691 
8692  return allocator->AllocateMemory(
8693  vkMemReq,
8694  requiresDedicatedAllocation,
8695  prefersDedicatedAllocation,
8696  VK_NULL_HANDLE, // dedicatedBuffer
8697  image, // dedicatedImage
8698  *pAllocationCreateInfo,
8699  suballocType,
8700  pAllocation);
8701 }
8702 
8704 // Public interface
8705 
8706 VkResult vmaCreateAllocator(
8707  const VmaAllocatorCreateInfo* pCreateInfo,
8708  VmaAllocator* pAllocator)
8709 {
8710  VMA_ASSERT(pCreateInfo && pAllocator);
8711  VMA_DEBUG_LOG("vmaCreateAllocator");
8712  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8713  return VK_SUCCESS;
8714 }
8715 
8716 void vmaDestroyAllocator(
8717  VmaAllocator allocator)
8718 {
8719  if(allocator != VK_NULL_HANDLE)
8720  {
8721  VMA_DEBUG_LOG("vmaDestroyAllocator");
8722  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8723  vma_delete(&allocationCallbacks, allocator);
8724  }
8725 }
8726 
8728  VmaAllocator allocator,
8729  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8730 {
8731  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8732  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8733 }
8734 
8736  VmaAllocator allocator,
8737  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8738 {
8739  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8740  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8741 }
8742 
8744  VmaAllocator allocator,
8745  uint32_t memoryTypeIndex,
8746  VkMemoryPropertyFlags* pFlags)
8747 {
8748  VMA_ASSERT(allocator && pFlags);
8749  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8750  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8751 }
8752 
8754  VmaAllocator allocator,
8755  uint32_t frameIndex)
8756 {
8757  VMA_ASSERT(allocator);
8758  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8759 
8760  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8761 
8762  allocator->SetCurrentFrameIndex(frameIndex);
8763 }
8764 
8765 void vmaCalculateStats(
8766  VmaAllocator allocator,
8767  VmaStats* pStats)
8768 {
8769  VMA_ASSERT(allocator && pStats);
8770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8771  allocator->CalculateStats(pStats);
8772 }
8773 
8774 #if VMA_STATS_STRING_ENABLED
8775 
8776 void vmaBuildStatsString(
8777  VmaAllocator allocator,
8778  char** ppStatsString,
8779  VkBool32 detailedMap)
8780 {
8781  VMA_ASSERT(allocator && ppStatsString);
8782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8783 
8784  VmaStringBuilder sb(allocator);
8785  {
8786  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8787  json.BeginObject();
8788 
8789  VmaStats stats;
8790  allocator->CalculateStats(&stats);
8791 
8792  json.WriteString("Total");
8793  VmaPrintStatInfo(json, stats.total);
8794 
8795  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8796  {
8797  json.BeginString("Heap ");
8798  json.ContinueString(heapIndex);
8799  json.EndString();
8800  json.BeginObject();
8801 
8802  json.WriteString("Size");
8803  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8804 
8805  json.WriteString("Flags");
8806  json.BeginArray(true);
8807  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8808  {
8809  json.WriteString("DEVICE_LOCAL");
8810  }
8811  json.EndArray();
8812 
8813  if(stats.memoryHeap[heapIndex].blockCount > 0)
8814  {
8815  json.WriteString("Stats");
8816  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8817  }
8818 
8819  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8820  {
8821  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8822  {
8823  json.BeginString("Type ");
8824  json.ContinueString(typeIndex);
8825  json.EndString();
8826 
8827  json.BeginObject();
8828 
8829  json.WriteString("Flags");
8830  json.BeginArray(true);
8831  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8832  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8833  {
8834  json.WriteString("DEVICE_LOCAL");
8835  }
8836  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8837  {
8838  json.WriteString("HOST_VISIBLE");
8839  }
8840  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8841  {
8842  json.WriteString("HOST_COHERENT");
8843  }
8844  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8845  {
8846  json.WriteString("HOST_CACHED");
8847  }
8848  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8849  {
8850  json.WriteString("LAZILY_ALLOCATED");
8851  }
8852  json.EndArray();
8853 
8854  if(stats.memoryType[typeIndex].blockCount > 0)
8855  {
8856  json.WriteString("Stats");
8857  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8858  }
8859 
8860  json.EndObject();
8861  }
8862  }
8863 
8864  json.EndObject();
8865  }
8866  if(detailedMap == VK_TRUE)
8867  {
8868  allocator->PrintDetailedMap(json);
8869  }
8870 
8871  json.EndObject();
8872  }
8873 
8874  const size_t len = sb.GetLength();
8875  char* const pChars = vma_new_array(allocator, char, len + 1);
8876  if(len > 0)
8877  {
8878  memcpy(pChars, sb.GetData(), len);
8879  }
8880  pChars[len] = '\0';
8881  *ppStatsString = pChars;
8882 }
8883 
8884 void vmaFreeStatsString(
8885  VmaAllocator allocator,
8886  char* pStatsString)
8887 {
8888  if(pStatsString != VMA_NULL)
8889  {
8890  VMA_ASSERT(allocator);
8891  size_t len = strlen(pStatsString);
8892  vma_delete_array(allocator, pStatsString, len + 1);
8893  }
8894 }
8895 
8896 #endif // #if VMA_STATS_STRING_ENABLED
8897 
8898 /*
8899 This function is not protected by any mutex because it just reads immutable data.
8900 */
8901 VkResult vmaFindMemoryTypeIndex(
8902  VmaAllocator allocator,
8903  uint32_t memoryTypeBits,
8904  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8905  uint32_t* pMemoryTypeIndex)
8906 {
8907  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8908  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8909  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8910 
8911  if(pAllocationCreateInfo->memoryTypeBits != 0)
8912  {
8913  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8914  }
8915 
8916  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8917  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8918 
8919  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
8920  if(mapped)
8921  {
8922  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8923  }
8924 
8925  // Convert usage to requiredFlags and preferredFlags.
8926  switch(pAllocationCreateInfo->usage)
8927  {
8929  break;
8931  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
8932  {
8933  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8934  }
8935  break;
8937  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8938  break;
8940  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8941  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
8942  {
8943  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8944  }
8945  break;
8947  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8948  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8949  break;
8950  default:
8951  break;
8952  }
8953 
8954  *pMemoryTypeIndex = UINT32_MAX;
8955  uint32_t minCost = UINT32_MAX;
8956  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8957  memTypeIndex < allocator->GetMemoryTypeCount();
8958  ++memTypeIndex, memTypeBit <<= 1)
8959  {
8960  // This memory type is acceptable according to memoryTypeBits bitmask.
8961  if((memTypeBit & memoryTypeBits) != 0)
8962  {
8963  const VkMemoryPropertyFlags currFlags =
8964  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8965  // This memory type contains requiredFlags.
8966  if((requiredFlags & ~currFlags) == 0)
8967  {
8968  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8969  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8970  // Remember memory type with lowest cost.
8971  if(currCost < minCost)
8972  {
8973  *pMemoryTypeIndex = memTypeIndex;
8974  if(currCost == 0)
8975  {
8976  return VK_SUCCESS;
8977  }
8978  minCost = currCost;
8979  }
8980  }
8981  }
8982  }
8983  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8984 }
8985 
8987  VmaAllocator allocator,
8988  const VkBufferCreateInfo* pBufferCreateInfo,
8989  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8990  uint32_t* pMemoryTypeIndex)
8991 {
8992  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8993  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8994  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8995  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8996 
8997  const VkDevice hDev = allocator->m_hDevice;
8998  VkBuffer hBuffer = VK_NULL_HANDLE;
8999  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
9000  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
9001  if(res == VK_SUCCESS)
9002  {
9003  VkMemoryRequirements memReq = {};
9004  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
9005  hDev, hBuffer, &memReq);
9006 
9007  res = vmaFindMemoryTypeIndex(
9008  allocator,
9009  memReq.memoryTypeBits,
9010  pAllocationCreateInfo,
9011  pMemoryTypeIndex);
9012 
9013  allocator->GetVulkanFunctions().vkDestroyBuffer(
9014  hDev, hBuffer, allocator->GetAllocationCallbacks());
9015  }
9016  return res;
9017 }
9018 
9020  VmaAllocator allocator,
9021  const VkImageCreateInfo* pImageCreateInfo,
9022  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9023  uint32_t* pMemoryTypeIndex)
9024 {
9025  VMA_ASSERT(allocator != VK_NULL_HANDLE);
9026  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
9027  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
9028  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
9029 
9030  const VkDevice hDev = allocator->m_hDevice;
9031  VkImage hImage = VK_NULL_HANDLE;
9032  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
9033  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
9034  if(res == VK_SUCCESS)
9035  {
9036  VkMemoryRequirements memReq = {};
9037  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
9038  hDev, hImage, &memReq);
9039 
9040  res = vmaFindMemoryTypeIndex(
9041  allocator,
9042  memReq.memoryTypeBits,
9043  pAllocationCreateInfo,
9044  pMemoryTypeIndex);
9045 
9046  allocator->GetVulkanFunctions().vkDestroyImage(
9047  hDev, hImage, allocator->GetAllocationCallbacks());
9048  }
9049  return res;
9050 }
9051 
9052 VkResult vmaCreatePool(
9053  VmaAllocator allocator,
9054  const VmaPoolCreateInfo* pCreateInfo,
9055  VmaPool* pPool)
9056 {
9057  VMA_ASSERT(allocator && pCreateInfo && pPool);
9058 
9059  VMA_DEBUG_LOG("vmaCreatePool");
9060 
9061  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9062 
9063  return allocator->CreatePool(pCreateInfo, pPool);
9064 }
9065 
9066 void vmaDestroyPool(
9067  VmaAllocator allocator,
9068  VmaPool pool)
9069 {
9070  VMA_ASSERT(allocator);
9071 
9072  if(pool == VK_NULL_HANDLE)
9073  {
9074  return;
9075  }
9076 
9077  VMA_DEBUG_LOG("vmaDestroyPool");
9078 
9079  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9080 
9081  allocator->DestroyPool(pool);
9082 }
9083 
9084 void vmaGetPoolStats(
9085  VmaAllocator allocator,
9086  VmaPool pool,
9087  VmaPoolStats* pPoolStats)
9088 {
9089  VMA_ASSERT(allocator && pool && pPoolStats);
9090 
9091  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9092 
9093  allocator->GetPoolStats(pool, pPoolStats);
9094 }
9095 
9097  VmaAllocator allocator,
9098  VmaPool pool,
9099  size_t* pLostAllocationCount)
9100 {
9101  VMA_ASSERT(allocator && pool);
9102 
9103  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9104 
9105  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
9106 }
9107 
9108 VkResult vmaAllocateMemory(
9109  VmaAllocator allocator,
9110  const VkMemoryRequirements* pVkMemoryRequirements,
9111  const VmaAllocationCreateInfo* pCreateInfo,
9112  VmaAllocation* pAllocation,
9113  VmaAllocationInfo* pAllocationInfo)
9114 {
9115  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
9116 
9117  VMA_DEBUG_LOG("vmaAllocateMemory");
9118 
9119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9120 
9121  VkResult result = allocator->AllocateMemory(
9122  *pVkMemoryRequirements,
9123  false, // requiresDedicatedAllocation
9124  false, // prefersDedicatedAllocation
9125  VK_NULL_HANDLE, // dedicatedBuffer
9126  VK_NULL_HANDLE, // dedicatedImage
9127  *pCreateInfo,
9128  VMA_SUBALLOCATION_TYPE_UNKNOWN,
9129  pAllocation);
9130 
9131  if(pAllocationInfo && result == VK_SUCCESS)
9132  {
9133  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9134  }
9135 
9136  return result;
9137 }
9138 
9140  VmaAllocator allocator,
9141  VkBuffer buffer,
9142  const VmaAllocationCreateInfo* pCreateInfo,
9143  VmaAllocation* pAllocation,
9144  VmaAllocationInfo* pAllocationInfo)
9145 {
9146  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9147 
9148  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
9149 
9150  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9151 
9152  VkMemoryRequirements vkMemReq = {};
9153  bool requiresDedicatedAllocation = false;
9154  bool prefersDedicatedAllocation = false;
9155  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
9156  requiresDedicatedAllocation,
9157  prefersDedicatedAllocation);
9158 
9159  VkResult result = allocator->AllocateMemory(
9160  vkMemReq,
9161  requiresDedicatedAllocation,
9162  prefersDedicatedAllocation,
9163  buffer, // dedicatedBuffer
9164  VK_NULL_HANDLE, // dedicatedImage
9165  *pCreateInfo,
9166  VMA_SUBALLOCATION_TYPE_BUFFER,
9167  pAllocation);
9168 
9169  if(pAllocationInfo && result == VK_SUCCESS)
9170  {
9171  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9172  }
9173 
9174  return result;
9175 }
9176 
9177 VkResult vmaAllocateMemoryForImage(
9178  VmaAllocator allocator,
9179  VkImage image,
9180  const VmaAllocationCreateInfo* pCreateInfo,
9181  VmaAllocation* pAllocation,
9182  VmaAllocationInfo* pAllocationInfo)
9183 {
9184  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9185 
9186  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
9187 
9188  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9189 
9190  VkResult result = AllocateMemoryForImage(
9191  allocator,
9192  image,
9193  pCreateInfo,
9194  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
9195  pAllocation);
9196 
9197  if(pAllocationInfo && result == VK_SUCCESS)
9198  {
9199  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9200  }
9201 
9202  return result;
9203 }
9204 
9205 void vmaFreeMemory(
9206  VmaAllocator allocator,
9207  VmaAllocation allocation)
9208 {
9209  VMA_ASSERT(allocator);
9210  VMA_DEBUG_LOG("vmaFreeMemory");
9211  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9212  if(allocation != VK_NULL_HANDLE)
9213  {
9214  allocator->FreeMemory(allocation);
9215  }
9216 }
9217 
9219  VmaAllocator allocator,
9220  VmaAllocation allocation,
9221  VmaAllocationInfo* pAllocationInfo)
9222 {
9223  VMA_ASSERT(allocator && allocation && pAllocationInfo);
9224 
9225  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9226 
9227  allocator->GetAllocationInfo(allocation, pAllocationInfo);
9228 }
9229 
9230 VkBool32 vmaTouchAllocation(
9231  VmaAllocator allocator,
9232  VmaAllocation allocation)
9233 {
9234  VMA_ASSERT(allocator && allocation);
9235 
9236  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9237 
9238  return allocator->TouchAllocation(allocation);
9239 }
9240 
9242  VmaAllocator allocator,
9243  VmaAllocation allocation,
9244  void* pUserData)
9245 {
9246  VMA_ASSERT(allocator && allocation);
9247 
9248  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9249 
9250  allocation->SetUserData(allocator, pUserData);
9251 }
9252 
9254  VmaAllocator allocator,
9255  VmaAllocation* pAllocation)
9256 {
9257  VMA_ASSERT(allocator && pAllocation);
9258 
9259  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
9260 
9261  allocator->CreateLostAllocation(pAllocation);
9262 }
9263 
9264 VkResult vmaMapMemory(
9265  VmaAllocator allocator,
9266  VmaAllocation allocation,
9267  void** ppData)
9268 {
9269  VMA_ASSERT(allocator && allocation && ppData);
9270 
9271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9272 
9273  return allocator->Map(allocation, ppData);
9274 }
9275 
9276 void vmaUnmapMemory(
9277  VmaAllocator allocator,
9278  VmaAllocation allocation)
9279 {
9280  VMA_ASSERT(allocator && allocation);
9281 
9282  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9283 
9284  allocator->Unmap(allocation);
9285 }
9286 
9287 VkResult vmaDefragment(
9288  VmaAllocator allocator,
9289  VmaAllocation* pAllocations,
9290  size_t allocationCount,
9291  VkBool32* pAllocationsChanged,
9292  const VmaDefragmentationInfo *pDefragmentationInfo,
9293  VmaDefragmentationStats* pDefragmentationStats)
9294 {
9295  VMA_ASSERT(allocator && pAllocations);
9296 
9297  VMA_DEBUG_LOG("vmaDefragment");
9298 
9299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9300 
9301  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
9302 }
9303 
9304 VkResult vmaBindBufferMemory(
9305  VmaAllocator allocator,
9306  VmaAllocation allocation,
9307  VkBuffer buffer)
9308 {
9309  VMA_ASSERT(allocator && allocation && buffer);
9310 
9311  VMA_DEBUG_LOG("vmaBindBufferMemory");
9312 
9313  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9314 
9315  return allocator->BindBufferMemory(allocation, buffer);
9316 }
9317 
9318 VkResult vmaBindImageMemory(
9319  VmaAllocator allocator,
9320  VmaAllocation allocation,
9321  VkImage image)
9322 {
9323  VMA_ASSERT(allocator && allocation && image);
9324 
9325  VMA_DEBUG_LOG("vmaBindImageMemory");
9326 
9327  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9328 
9329  return allocator->BindImageMemory(allocation, image);
9330 }
9331 
9332 VkResult vmaCreateBuffer(
9333  VmaAllocator allocator,
9334  const VkBufferCreateInfo* pBufferCreateInfo,
9335  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9336  VkBuffer* pBuffer,
9337  VmaAllocation* pAllocation,
9338  VmaAllocationInfo* pAllocationInfo)
9339 {
9340  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
9341 
9342  VMA_DEBUG_LOG("vmaCreateBuffer");
9343 
9344  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9345 
9346  *pBuffer = VK_NULL_HANDLE;
9347  *pAllocation = VK_NULL_HANDLE;
9348 
9349  // 1. Create VkBuffer.
9350  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
9351  allocator->m_hDevice,
9352  pBufferCreateInfo,
9353  allocator->GetAllocationCallbacks(),
9354  pBuffer);
9355  if(res >= 0)
9356  {
9357  // 2. vkGetBufferMemoryRequirements.
9358  VkMemoryRequirements vkMemReq = {};
9359  bool requiresDedicatedAllocation = false;
9360  bool prefersDedicatedAllocation = false;
9361  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
9362  requiresDedicatedAllocation, prefersDedicatedAllocation);
9363 
9364  // Make sure alignment requirements for specific buffer usages reported
9365  // in Physical Device Properties are included in alignment reported by memory requirements.
9366  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
9367  {
9368  VMA_ASSERT(vkMemReq.alignment %
9369  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
9370  }
9371  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
9372  {
9373  VMA_ASSERT(vkMemReq.alignment %
9374  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
9375  }
9376  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
9377  {
9378  VMA_ASSERT(vkMemReq.alignment %
9379  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
9380  }
9381 
9382  // 3. Allocate memory using allocator.
9383  res = allocator->AllocateMemory(
9384  vkMemReq,
9385  requiresDedicatedAllocation,
9386  prefersDedicatedAllocation,
9387  *pBuffer, // dedicatedBuffer
9388  VK_NULL_HANDLE, // dedicatedImage
9389  *pAllocationCreateInfo,
9390  VMA_SUBALLOCATION_TYPE_BUFFER,
9391  pAllocation);
9392  if(res >= 0)
9393  {
9394  // 3. Bind buffer with memory.
9395  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
9396  if(res >= 0)
9397  {
9398  // All steps succeeded.
9399  #if VMA_STATS_STRING_ENABLED
9400  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
9401  #endif
9402  if(pAllocationInfo != VMA_NULL)
9403  {
9404  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9405  }
9406  return VK_SUCCESS;
9407  }
9408  allocator->FreeMemory(*pAllocation);
9409  *pAllocation = VK_NULL_HANDLE;
9410  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9411  *pBuffer = VK_NULL_HANDLE;
9412  return res;
9413  }
9414  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9415  *pBuffer = VK_NULL_HANDLE;
9416  return res;
9417  }
9418  return res;
9419 }
9420 
9421 void vmaDestroyBuffer(
9422  VmaAllocator allocator,
9423  VkBuffer buffer,
9424  VmaAllocation allocation)
9425 {
9426  VMA_ASSERT(allocator);
9427  VMA_DEBUG_LOG("vmaDestroyBuffer");
9428  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9429  if(buffer != VK_NULL_HANDLE)
9430  {
9431  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
9432  }
9433  if(allocation != VK_NULL_HANDLE)
9434  {
9435  allocator->FreeMemory(allocation);
9436  }
9437 }
9438 
9439 VkResult vmaCreateImage(
9440  VmaAllocator allocator,
9441  const VkImageCreateInfo* pImageCreateInfo,
9442  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9443  VkImage* pImage,
9444  VmaAllocation* pAllocation,
9445  VmaAllocationInfo* pAllocationInfo)
9446 {
9447  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9448 
9449  VMA_DEBUG_LOG("vmaCreateImage");
9450 
9451  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9452 
9453  *pImage = VK_NULL_HANDLE;
9454  *pAllocation = VK_NULL_HANDLE;
9455 
9456  // 1. Create VkImage.
9457  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9458  allocator->m_hDevice,
9459  pImageCreateInfo,
9460  allocator->GetAllocationCallbacks(),
9461  pImage);
9462  if(res >= 0)
9463  {
9464  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9465  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9466  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9467 
9468  // 2. Allocate memory using allocator.
9469  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9470  if(res >= 0)
9471  {
9472  // 3. Bind image with memory.
9473  res = allocator->BindImageMemory(*pAllocation, *pImage);
9474  if(res >= 0)
9475  {
9476  // All steps succeeded.
9477  #if VMA_STATS_STRING_ENABLED
9478  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
9479  #endif
9480  if(pAllocationInfo != VMA_NULL)
9481  {
9482  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9483  }
9484  return VK_SUCCESS;
9485  }
9486  allocator->FreeMemory(*pAllocation);
9487  *pAllocation = VK_NULL_HANDLE;
9488  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9489  *pImage = VK_NULL_HANDLE;
9490  return res;
9491  }
9492  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9493  *pImage = VK_NULL_HANDLE;
9494  return res;
9495  }
9496  return res;
9497 }
9498 
9499 void vmaDestroyImage(
9500  VmaAllocator allocator,
9501  VkImage image,
9502  VmaAllocation allocation)
9503 {
9504  VMA_ASSERT(allocator);
9505  VMA_DEBUG_LOG("vmaDestroyImage");
9506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9507  if(image != VK_NULL_HANDLE)
9508  {
9509  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9510  }
9511  if(allocation != VK_NULL_HANDLE)
9512  {
9513  allocator->FreeMemory(allocation);
9514  }
9515 }
9516 
9517 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1179
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1443
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1206
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Represents single memory allocation.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1189
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1400
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1183
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1773
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1203
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1972
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1619
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1673
Definition: vk_mem_alloc.h:1480
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1172
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1518
Definition: vk_mem_alloc.h:1427
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1215
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1268
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1200
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1431
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1333
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1186
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1332
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1976
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1232
VmaStatInfo total
Definition: vk_mem_alloc.h:1342
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1984
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1502
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1967
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1187
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1114
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1209
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1627
Definition: vk_mem_alloc.h:1621
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1783
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1184
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1539
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1643
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1679
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1170
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1630
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1378
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1962
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1980
Definition: vk_mem_alloc.h:1417
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1526
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1185
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Optional configuration parameters to be passed to function vmaDefragment().
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1338
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1120
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1141
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1146
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1982
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1513
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1689
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1180
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1321
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1638
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1133
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1487
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1334
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1137
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1633
Definition: vk_mem_alloc.h:1426
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1508
Definition: vk_mem_alloc.h:1499
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1324
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1182
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:1651
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1218
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1682
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1497
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1532
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1256
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1340
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1467
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1333
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1191
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1135
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1190
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1665
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1797
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1212
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1333
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1330
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1670
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1778
Definition: vk_mem_alloc.h:1495
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1978
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1178
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1328
Definition: vk_mem_alloc.h:1383
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1623
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1326
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1188
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1192
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1454
Definition: vk_mem_alloc.h:1410
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1792
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1168
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1181
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1759
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1601
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1334
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1341
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1676
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1334
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1764