diff --git a/docs/html/choosing_memory_type.html b/docs/html/choosing_memory_type.html index 53177d3..dbee1f9 100644 --- a/docs/html/choosing_memory_type.html +++ b/docs/html/choosing_memory_type.html @@ -68,7 +68,7 @@ $(function() {

Physical devices in Vulkan support various combinations of memory heaps and types. Help with choosing correct and optimal memory type for your specific resource is one of the key features of this library. You can use it by filling appropriate members of VmaAllocationCreateInfo structure, as described below. You can also combine multiple methods.

    -
  1. If you just want to find memory type index that meets your requirements, you can use function vmaFindMemoryTypeIndex().
  2. +
  3. If you just want to find memory type index that meets your requirements, you can use function vmaFindMemoryTypeIndex().
  4. If you want to allocate a region of device memory without association with any specific image or buffer, you can use function vmaAllocateMemory(). Usage of this function is not recommended and usually not needed.
  5. If you already have a buffer or an image created, you want to allocate memory for it and then you will bind it yourself, you can use function vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
  6. If you want to create a buffer or an image, allocate memory for it and bind them together, all in one call, you can use function vmaCreateBuffer(), vmaCreateImage(). This is the recommended way to use this library.
  7. diff --git a/docs/html/globals.html b/docs/html/globals.html index a5f53ea..7f943ab 100644 --- a/docs/html/globals.html +++ b/docs/html/globals.html @@ -206,6 +206,12 @@ $(function() {
  8. vmaFindMemoryTypeIndex() : vk_mem_alloc.h
  9. +
  10. vmaFindMemoryTypeIndexForBufferInfo() +: vk_mem_alloc.h +
  11. +
  12. vmaFindMemoryTypeIndexForImageInfo() +: vk_mem_alloc.h +
  13. vmaFreeMemory() : vk_mem_alloc.h
  14. @@ -260,6 +266,9 @@ $(function() {
  15. VmaStats : vk_mem_alloc.h
  16. +
  17. vmaTouchAllocation() +: vk_mem_alloc.h +
  18. vmaUnmapMemory() : vk_mem_alloc.h
  19. diff --git a/docs/html/globals_func.html b/docs/html/globals_func.html index 4e529e3..b97dc86 100644 --- a/docs/html/globals_func.html +++ b/docs/html/globals_func.html @@ -58,7 +58,9 @@ $(function() {
-

Version 2.0.0-alpha.7 (2018-02-09)

+

Version 2.0.0-alpha.8 (2018-03-05)

Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
License: MIT

Documentation of all members: vk_mem_alloc.h

diff --git a/docs/html/menudata.js b/docs/html/menudata.js index ef6a32a..3c413e5 100644 --- a/docs/html/menudata.js +++ b/docs/html/menudata.js @@ -37,7 +37,8 @@ var menudata={children:[ {text:"All",url:"globals.html",children:[ {text:"p",url:"globals.html#index_p"}, {text:"v",url:"globals.html#index_v"}]}, -{text:"Functions",url:"globals_func.html"}, +{text:"Functions",url:"globals_func.html",children:[ +{text:"v",url:"globals_func.html#index_v"}]}, {text:"Typedefs",url:"globals_type.html"}, {text:"Enumerations",url:"globals_enum.html"}, {text:"Enumerator",url:"globals_eval.html"}, diff --git a/docs/html/search/all_e.js b/docs/html/search/all_e.js index 2063f3d..98c5060 100644 --- a/docs/html/search/all_e.js +++ b/docs/html/search/all_e.js @@ -64,6 +64,8 @@ var searchData= ['vmadestroypool',['vmaDestroyPool',['../vk__mem__alloc_8h.html#a5485779c8f1948238fc4e92232fa65e1',1,'vk_mem_alloc.h']]], ['vmadevicememorycallbacks',['VmaDeviceMemoryCallbacks',['../struct_vma_device_memory_callbacks.html',1,'VmaDeviceMemoryCallbacks'],['../vk__mem__alloc_8h.html#a5e2eb68d727cfd4df25702b027b7aa31',1,'VmaDeviceMemoryCallbacks(): vk_mem_alloc.h']]], ['vmafindmemorytypeindex',['vmaFindMemoryTypeIndex',['../vk__mem__alloc_8h.html#aef15a94b58fbcb0fe706d5720e84a74a',1,'vk_mem_alloc.h']]], + ['vmafindmemorytypeindexforbufferinfo',['vmaFindMemoryTypeIndexForBufferInfo',['../vk__mem__alloc_8h.html#ae790ab9ffaf7667fb8f62523e6897888',1,'vk_mem_alloc.h']]], + ['vmafindmemorytypeindexforimageinfo',['vmaFindMemoryTypeIndexForImageInfo',['../vk__mem__alloc_8h.html#a088da83d8eaf3ce9056d9ea0b981d472',1,'vk_mem_alloc.h']]], ['vmafreememory',['vmaFreeMemory',['../vk__mem__alloc_8h.html#a11f0fbc034fa81a4efedd73d61ce7568',1,'vk_mem_alloc.h']]], ['vmafreestatsstring',['vmaFreeStatsString',['../vk__mem__alloc_8h.html#a3104eb30d8122c84dd8541063f145288',1,'vk_mem_alloc.h']]], ['vmagetallocationinfo',['vmaGetAllocationInfo',['../vk__mem__alloc_8h.html#a86dd08aba8633bfa4ad0df2e76481d8b',1,'vk_mem_alloc.h']]], @@ -82,6 +84,7 @@ var searchData= ['vmasetcurrentframeindex',['vmaSetCurrentFrameIndex',['../vk__mem__alloc_8h.html#ade56bf8dc9f5a5eaddf5f119ed525236',1,'vk_mem_alloc.h']]], ['vmastatinfo',['VmaStatInfo',['../struct_vma_stat_info.html',1,'VmaStatInfo'],['../vk__mem__alloc_8h.html#a810b009a788ee8aac72a25b42ffbe31c',1,'VmaStatInfo(): vk_mem_alloc.h']]], ['vmastats',['VmaStats',['../struct_vma_stats.html',1,'VmaStats'],['../vk__mem__alloc_8h.html#a732be855fb4a7c248e6853d928a729af',1,'VmaStats(): vk_mem_alloc.h']]], + ['vmatouchallocation',['vmaTouchAllocation',['../vk__mem__alloc_8h.html#a108cb6dcb9ad32b81f0d61c08d1b4323',1,'vk_mem_alloc.h']]], ['vmaunmapmemory',['vmaUnmapMemory',['../vk__mem__alloc_8h.html#a9bc268595cb33f6ec4d519cfce81ff45',1,'vk_mem_alloc.h']]], ['vmavulkanfunctions',['VmaVulkanFunctions',['../struct_vma_vulkan_functions.html',1,'VmaVulkanFunctions'],['../vk__mem__alloc_8h.html#a97064a1a271b0061ebfc3a079862d0c5',1,'VmaVulkanFunctions(): vk_mem_alloc.h']]] ]; diff --git a/docs/html/search/functions_0.js b/docs/html/search/functions_0.js index 9c722df..cce33d7 100644 --- a/docs/html/search/functions_0.js +++ b/docs/html/search/functions_0.js @@ -16,6 +16,8 @@ var searchData= ['vmadestroyimage',['vmaDestroyImage',['../vk__mem__alloc_8h.html#ae50d2cb3b4a3bfd4dd40987234e50e7e',1,'vk_mem_alloc.h']]], ['vmadestroypool',['vmaDestroyPool',['../vk__mem__alloc_8h.html#a5485779c8f1948238fc4e92232fa65e1',1,'vk_mem_alloc.h']]], ['vmafindmemorytypeindex',['vmaFindMemoryTypeIndex',['../vk__mem__alloc_8h.html#aef15a94b58fbcb0fe706d5720e84a74a',1,'vk_mem_alloc.h']]], + ['vmafindmemorytypeindexforbufferinfo',['vmaFindMemoryTypeIndexForBufferInfo',['../vk__mem__alloc_8h.html#ae790ab9ffaf7667fb8f62523e6897888',1,'vk_mem_alloc.h']]], + ['vmafindmemorytypeindexforimageinfo',['vmaFindMemoryTypeIndexForImageInfo',['../vk__mem__alloc_8h.html#a088da83d8eaf3ce9056d9ea0b981d472',1,'vk_mem_alloc.h']]], ['vmafreememory',['vmaFreeMemory',['../vk__mem__alloc_8h.html#a11f0fbc034fa81a4efedd73d61ce7568',1,'vk_mem_alloc.h']]], ['vmafreestatsstring',['vmaFreeStatsString',['../vk__mem__alloc_8h.html#a3104eb30d8122c84dd8541063f145288',1,'vk_mem_alloc.h']]], ['vmagetallocationinfo',['vmaGetAllocationInfo',['../vk__mem__alloc_8h.html#a86dd08aba8633bfa4ad0df2e76481d8b',1,'vk_mem_alloc.h']]], @@ -27,5 +29,6 @@ var searchData= ['vmamapmemory',['vmaMapMemory',['../vk__mem__alloc_8h.html#ad5bd1243512d099706de88168992f069',1,'vk_mem_alloc.h']]], ['vmasetallocationuserdata',['vmaSetAllocationUserData',['../vk__mem__alloc_8h.html#af9147d31ffc11d62fc187bde283ed14f',1,'vk_mem_alloc.h']]], ['vmasetcurrentframeindex',['vmaSetCurrentFrameIndex',['../vk__mem__alloc_8h.html#ade56bf8dc9f5a5eaddf5f119ed525236',1,'vk_mem_alloc.h']]], + ['vmatouchallocation',['vmaTouchAllocation',['../vk__mem__alloc_8h.html#a108cb6dcb9ad32b81f0d61c08d1b4323',1,'vk_mem_alloc.h']]], ['vmaunmapmemory',['vmaUnmapMemory',['../vk__mem__alloc_8h.html#a9bc268595cb33f6ec4d519cfce81ff45',1,'vk_mem_alloc.h']]] ]; diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index f8b4d11..ce34209 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -235,7 +235,14 @@ Functions void vmaFreeStatsString (VmaAllocator allocator, char *pStatsString)   VkResult vmaFindMemoryTypeIndex (VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex) + Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. More...
  +VkResult vmaFindMemoryTypeIndexForBufferInfo (VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex) + Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. More...
+  +VkResult vmaFindMemoryTypeIndexForImageInfo (VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex) + Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. More...
+  VkResult vmaCreatePool (VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)  Allocates Vulkan device memory and creates VmaPool object. More...
  @@ -262,6 +269,9 @@ Functions void vmaGetAllocationInfo (VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)  Returns current information about specified allocation. More...
  +bool vmaTouchAllocation (VmaAllocator allocator, VmaAllocation allocation) + TODO finish documentation... More...
+  void vmaSetAllocationUserData (VmaAllocator allocator, VmaAllocation allocation, void *pUserData)  Sets pUserData in given allocation to new value. More...
  @@ -1464,6 +1474,8 @@ Functions
+ +

Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.

This algorithm tries to find a memory type that:

  • Is allowed by memoryTypeBits.
  • @@ -1473,6 +1485,104 @@ Functions
Returns
Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result from this function or any other allocating function probably means that your device doesn't support any memory type with requested features for the specific type of resource you want to use it for. Please check parameters of your resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
+
+
+ +

◆ vmaFindMemoryTypeIndexForBufferInfo()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VkResult vmaFindMemoryTypeIndexForBufferInfo (VmaAllocator allocator,
const VkBufferCreateInfo * pBufferCreateInfo,
const VmaAllocationCreateInfopAllocationCreateInfo,
uint32_t * pMemoryTypeIndex 
)
+
+ +

Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.

+

It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. It internally creates a temporary, dummy buffer that never has memory bound. It is just a convenience function, equivalent to calling:

+
    +
  • vkCreateBuffer
  • +
  • vkGetBufferMemoryRequirements
  • +
  • vmaFindMemoryTypeIndex
  • +
  • vkDestroyBuffer
  • +
+ +
+
+ +

◆ vmaFindMemoryTypeIndexForImageInfo()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VkResult vmaFindMemoryTypeIndexForImageInfo (VmaAllocator allocator,
const VkImageCreateInfo * pImageCreateInfo,
const VmaAllocationCreateInfopAllocationCreateInfo,
uint32_t * pMemoryTypeIndex 
)
+
+ +

Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.

+

It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. It internally creates a temporary, dummy image that never has memory bound. It is just a convenience function, equivalent to calling:

+
    +
  • vkCreateImage
  • +
  • vkGetImageMemoryRequirements
  • +
  • vmaFindMemoryTypeIndex
  • +
  • vkDestroyImage
  • +
+
@@ -1566,6 +1676,7 @@ Functions

Returns current information about specified allocation.

+

It also "touches" allocation... TODO finish documentation.

@@ -1862,6 +1973,36 @@ Functions

Sets index of the current frame.

This function must be used if you make allocations with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot become lost in the current frame.

+ + + +

◆ vmaTouchAllocation()

+ +
+
+ + + + + + + + + + + + + + + + + + +
bool vmaTouchAllocation (VmaAllocator allocator,
VmaAllocation allocation 
)
+
+ +

TODO finish documentation...

+
diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 5a9e8b1..dc19ca4 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -62,7 +62,7 @@ $(function() {
vk_mem_alloc.h
-Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
826 #include <vulkan/vulkan.h>
827 
828 VK_DEFINE_HANDLE(VmaAllocator)
829 
830 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
832  VmaAllocator allocator,
833  uint32_t memoryType,
834  VkDeviceMemory memory,
835  VkDeviceSize size);
837 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
838  VmaAllocator allocator,
839  uint32_t memoryType,
840  VkDeviceMemory memory,
841  VkDeviceSize size);
842 
850 typedef struct VmaDeviceMemoryCallbacks {
856 
886 
889 typedef VkFlags VmaAllocatorCreateFlags;
890 
895 typedef struct VmaVulkanFunctions {
896  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
897  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
898  PFN_vkAllocateMemory vkAllocateMemory;
899  PFN_vkFreeMemory vkFreeMemory;
900  PFN_vkMapMemory vkMapMemory;
901  PFN_vkUnmapMemory vkUnmapMemory;
902  PFN_vkBindBufferMemory vkBindBufferMemory;
903  PFN_vkBindImageMemory vkBindImageMemory;
904  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
905  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
906  PFN_vkCreateBuffer vkCreateBuffer;
907  PFN_vkDestroyBuffer vkDestroyBuffer;
908  PFN_vkCreateImage vkCreateImage;
909  PFN_vkDestroyImage vkDestroyImage;
910  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
911  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
913 
916 {
918  VmaAllocatorCreateFlags flags;
920 
921  VkPhysicalDevice physicalDevice;
923 
924  VkDevice device;
926 
929 
930  const VkAllocationCallbacks* pAllocationCallbacks;
932 
947  uint32_t frameInUseCount;
971  const VkDeviceSize* pHeapSizeLimit;
985 
987 VkResult vmaCreateAllocator(
988  const VmaAllocatorCreateInfo* pCreateInfo,
989  VmaAllocator* pAllocator);
990 
993  VmaAllocator allocator);
994 
1000  VmaAllocator allocator,
1001  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1002 
1008  VmaAllocator allocator,
1009  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1010 
1018  VmaAllocator allocator,
1019  uint32_t memoryTypeIndex,
1020  VkMemoryPropertyFlags* pFlags);
1021 
1031  VmaAllocator allocator,
1032  uint32_t frameIndex);
1033 
1036 typedef struct VmaStatInfo
1037 {
1039  uint32_t blockCount;
1045  VkDeviceSize usedBytes;
1047  VkDeviceSize unusedBytes;
1048  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1049  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1050 } VmaStatInfo;
1051 
1053 typedef struct VmaStats
1054 {
1055  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1056  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1058 } VmaStats;
1059 
1061 void vmaCalculateStats(
1062  VmaAllocator allocator,
1063  VmaStats* pStats);
1064 
1065 #define VMA_STATS_STRING_ENABLED 1
1066 
1067 #if VMA_STATS_STRING_ENABLED
1068 
1070 
1072 void vmaBuildStatsString(
1073  VmaAllocator allocator,
1074  char** ppStatsString,
1075  VkBool32 detailedMap);
1076 
1077 void vmaFreeStatsString(
1078  VmaAllocator allocator,
1079  char* pStatsString);
1080 
1081 #endif // #if VMA_STATS_STRING_ENABLED
1082 
1083 VK_DEFINE_HANDLE(VmaPool)
1084 
1085 typedef enum VmaMemoryUsage
1086 {
1135 } VmaMemoryUsage;
1136 
1151 
1201 
1205 
1207 {
1209  VmaAllocationCreateFlags flags;
1220  VkMemoryPropertyFlags requiredFlags;
1225  VkMemoryPropertyFlags preferredFlags;
1233  uint32_t memoryTypeBits;
1239  VmaPool pool;
1246  void* pUserData;
1248 
1263 VkResult vmaFindMemoryTypeIndex(
1264  VmaAllocator allocator,
1265  uint32_t memoryTypeBits,
1266  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1267  uint32_t* pMemoryTypeIndex);
1268 
1289 
1292 typedef VkFlags VmaPoolCreateFlags;
1293 
1296 typedef struct VmaPoolCreateInfo {
1302  VmaPoolCreateFlags flags;
1307  VkDeviceSize blockSize;
1336 
1339 typedef struct VmaPoolStats {
1342  VkDeviceSize size;
1345  VkDeviceSize unusedSize;
1358  VkDeviceSize unusedRangeSizeMax;
1359 } VmaPoolStats;
1360 
1367 VkResult vmaCreatePool(
1368  VmaAllocator allocator,
1369  const VmaPoolCreateInfo* pCreateInfo,
1370  VmaPool* pPool);
1371 
1374 void vmaDestroyPool(
1375  VmaAllocator allocator,
1376  VmaPool pool);
1377 
1384 void vmaGetPoolStats(
1385  VmaAllocator allocator,
1386  VmaPool pool,
1387  VmaPoolStats* pPoolStats);
1388 
1396  VmaAllocator allocator,
1397  VmaPool pool,
1398  size_t* pLostAllocationCount);
1399 
1400 VK_DEFINE_HANDLE(VmaAllocation)
1401 
1402 
1404 typedef struct VmaAllocationInfo {
1409  uint32_t memoryType;
1418  VkDeviceMemory deviceMemory;
1423  VkDeviceSize offset;
1428  VkDeviceSize size;
1442  void* pUserData;
1444 
1455 VkResult vmaAllocateMemory(
1456  VmaAllocator allocator,
1457  const VkMemoryRequirements* pVkMemoryRequirements,
1458  const VmaAllocationCreateInfo* pCreateInfo,
1459  VmaAllocation* pAllocation,
1460  VmaAllocationInfo* pAllocationInfo);
1461 
1469  VmaAllocator allocator,
1470  VkBuffer buffer,
1471  const VmaAllocationCreateInfo* pCreateInfo,
1472  VmaAllocation* pAllocation,
1473  VmaAllocationInfo* pAllocationInfo);
1474 
1476 VkResult vmaAllocateMemoryForImage(
1477  VmaAllocator allocator,
1478  VkImage image,
1479  const VmaAllocationCreateInfo* pCreateInfo,
1480  VmaAllocation* pAllocation,
1481  VmaAllocationInfo* pAllocationInfo);
1482 
1484 void vmaFreeMemory(
1485  VmaAllocator allocator,
1486  VmaAllocation allocation);
1487 
1490  VmaAllocator allocator,
1491  VmaAllocation allocation,
1492  VmaAllocationInfo* pAllocationInfo);
1493 
1508  VmaAllocator allocator,
1509  VmaAllocation allocation,
1510  void* pUserData);
1511 
1523  VmaAllocator allocator,
1524  VmaAllocation* pAllocation);
1525 
1560 VkResult vmaMapMemory(
1561  VmaAllocator allocator,
1562  VmaAllocation allocation,
1563  void** ppData);
1564 
1569 void vmaUnmapMemory(
1570  VmaAllocator allocator,
1571  VmaAllocation allocation);
1572 
1574 typedef struct VmaDefragmentationInfo {
1579  VkDeviceSize maxBytesToMove;
1586 
1588 typedef struct VmaDefragmentationStats {
1590  VkDeviceSize bytesMoved;
1592  VkDeviceSize bytesFreed;
1598 
1681 VkResult vmaDefragment(
1682  VmaAllocator allocator,
1683  VmaAllocation* pAllocations,
1684  size_t allocationCount,
1685  VkBool32* pAllocationsChanged,
1686  const VmaDefragmentationInfo *pDefragmentationInfo,
1687  VmaDefragmentationStats* pDefragmentationStats);
1688 
1715 VkResult vmaCreateBuffer(
1716  VmaAllocator allocator,
1717  const VkBufferCreateInfo* pBufferCreateInfo,
1718  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1719  VkBuffer* pBuffer,
1720  VmaAllocation* pAllocation,
1721  VmaAllocationInfo* pAllocationInfo);
1722 
1734 void vmaDestroyBuffer(
1735  VmaAllocator allocator,
1736  VkBuffer buffer,
1737  VmaAllocation allocation);
1738 
1740 VkResult vmaCreateImage(
1741  VmaAllocator allocator,
1742  const VkImageCreateInfo* pImageCreateInfo,
1743  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1744  VkImage* pImage,
1745  VmaAllocation* pAllocation,
1746  VmaAllocationInfo* pAllocationInfo);
1747 
1759 void vmaDestroyImage(
1760  VmaAllocator allocator,
1761  VkImage image,
1762  VmaAllocation allocation);
1763 
1764 #ifdef __cplusplus
1765 }
1766 #endif
1767 
1768 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1769 
1770 // For Visual Studio IntelliSense.
1771 #ifdef __INTELLISENSE__
1772 #define VMA_IMPLEMENTATION
1773 #endif
1774 
1775 #ifdef VMA_IMPLEMENTATION
1776 #undef VMA_IMPLEMENTATION
1777 
1778 #include <cstdint>
1779 #include <cstdlib>
1780 #include <cstring>
1781 
1782 /*******************************************************************************
1783 CONFIGURATION SECTION
1784 
1785 Define some of these macros before each #include of this header or change them
1786 here if you need other then default behavior depending on your environment.
1787 */
1788 
1789 /*
1790 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1791 internally, like:
1792 
1793  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1794 
1795 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1796 VmaAllocatorCreateInfo::pVulkanFunctions.
1797 */
1798 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1799 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1800 #endif
1801 
1802 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1803 //#define VMA_USE_STL_CONTAINERS 1
1804 
1805 /* Set this macro to 1 to make the library including and using STL containers:
1806 std::pair, std::vector, std::list, std::unordered_map.
1807 
1808 Set it to 0 or undefined to make the library using its own implementation of
1809 the containers.
1810 */
1811 #if VMA_USE_STL_CONTAINERS
1812  #define VMA_USE_STL_VECTOR 1
1813  #define VMA_USE_STL_UNORDERED_MAP 1
1814  #define VMA_USE_STL_LIST 1
1815 #endif
1816 
1817 #if VMA_USE_STL_VECTOR
1818  #include <vector>
1819 #endif
1820 
1821 #if VMA_USE_STL_UNORDERED_MAP
1822  #include <unordered_map>
1823 #endif
1824 
1825 #if VMA_USE_STL_LIST
1826  #include <list>
1827 #endif
1828 
1829 /*
1830 Following headers are used in this CONFIGURATION section only, so feel free to
1831 remove them if not needed.
1832 */
1833 #include <cassert> // for assert
1834 #include <algorithm> // for min, max
1835 #include <mutex> // for std::mutex
1836 #include <atomic> // for std::atomic
1837 
1838 #if !defined(_WIN32) && !defined(__APPLE__)
1839  #include <malloc.h> // for aligned_alloc()
1840 #endif
1841 
1842 #if defined(__APPLE__)
1843 #include <cstdlib>
1844 void *aligned_alloc(size_t alignment, size_t size)
1845 {
1846  // alignment must be >= sizeof(void*)
1847  if(alignment < sizeof(void*))
1848  {
1849  alignment = sizeof(void*);
1850  }
1851 
1852  void *pointer;
1853  if(posix_memalign(&pointer, alignment, size) == 0)
1854  return pointer;
1855  return VMA_NULL;
1856 }
1857 #endif
1858 
1859 // Normal assert to check for programmer's errors, especially in Debug configuration.
1860 #ifndef VMA_ASSERT
1861  #ifdef _DEBUG
1862  #define VMA_ASSERT(expr) assert(expr)
1863  #else
1864  #define VMA_ASSERT(expr)
1865  #endif
1866 #endif
1867 
1868 // Assert that will be called very often, like inside data structures e.g. operator[].
1869 // Making it non-empty can make program slow.
1870 #ifndef VMA_HEAVY_ASSERT
1871  #ifdef _DEBUG
1872  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1873  #else
1874  #define VMA_HEAVY_ASSERT(expr)
1875  #endif
1876 #endif
1877 
1878 #ifndef VMA_NULL
1879  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1880  #define VMA_NULL nullptr
1881 #endif
1882 
1883 #ifndef VMA_ALIGN_OF
1884  #define VMA_ALIGN_OF(type) (__alignof(type))
1885 #endif
1886 
1887 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1888  #if defined(_WIN32)
1889  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1890  #else
1891  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1892  #endif
1893 #endif
1894 
1895 #ifndef VMA_SYSTEM_FREE
1896  #if defined(_WIN32)
1897  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1898  #else
1899  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1900  #endif
1901 #endif
1902 
1903 #ifndef VMA_MIN
1904  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1905 #endif
1906 
1907 #ifndef VMA_MAX
1908  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1909 #endif
1910 
1911 #ifndef VMA_SWAP
1912  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1913 #endif
1914 
1915 #ifndef VMA_SORT
1916  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1917 #endif
1918 
1919 #ifndef VMA_DEBUG_LOG
1920  #define VMA_DEBUG_LOG(format, ...)
1921  /*
1922  #define VMA_DEBUG_LOG(format, ...) do { \
1923  printf(format, __VA_ARGS__); \
1924  printf("\n"); \
1925  } while(false)
1926  */
1927 #endif
1928 
1929 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1930 #if VMA_STATS_STRING_ENABLED
1931  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1932  {
1933  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1934  }
1935  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1936  {
1937  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1938  }
1939  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1940  {
1941  snprintf(outStr, strLen, "%p", ptr);
1942  }
1943 #endif
1944 
1945 #ifndef VMA_MUTEX
1946  class VmaMutex
1947  {
1948  public:
1949  VmaMutex() { }
1950  ~VmaMutex() { }
1951  void Lock() { m_Mutex.lock(); }
1952  void Unlock() { m_Mutex.unlock(); }
1953  private:
1954  std::mutex m_Mutex;
1955  };
1956  #define VMA_MUTEX VmaMutex
1957 #endif
1958 
1959 /*
1960 If providing your own implementation, you need to implement a subset of std::atomic:
1961 
1962 - Constructor(uint32_t desired)
1963 - uint32_t load() const
1964 - void store(uint32_t desired)
1965 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
1966 */
1967 #ifndef VMA_ATOMIC_UINT32
1968  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
1969 #endif
1970 
1971 #ifndef VMA_BEST_FIT
1972 
1984  #define VMA_BEST_FIT (1)
1985 #endif
1986 
1987 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
1988 
1992  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
1993 #endif
1994 
1995 #ifndef VMA_DEBUG_ALIGNMENT
1996 
2000  #define VMA_DEBUG_ALIGNMENT (1)
2001 #endif
2002 
2003 #ifndef VMA_DEBUG_MARGIN
2004 
2008  #define VMA_DEBUG_MARGIN (0)
2009 #endif
2010 
2011 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2012 
2016  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2017 #endif
2018 
2019 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2020 
2024  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2025 #endif
2026 
2027 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2028  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2030 #endif
2031 
2032 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2033  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2035 #endif
2036 
2037 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2038 
2039 /*******************************************************************************
2040 END OF CONFIGURATION
2041 */
2042 
2043 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2044  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2045 
2046 // Returns number of bits set to 1 in (v).
2047 static inline uint32_t VmaCountBitsSet(uint32_t v)
2048 {
2049  uint32_t c = v - ((v >> 1) & 0x55555555);
2050  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2051  c = ((c >> 4) + c) & 0x0F0F0F0F;
2052  c = ((c >> 8) + c) & 0x00FF00FF;
2053  c = ((c >> 16) + c) & 0x0000FFFF;
2054  return c;
2055 }
2056 
2057 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2058 // Use types like uint32_t, uint64_t as T.
2059 template <typename T>
2060 static inline T VmaAlignUp(T val, T align)
2061 {
2062  return (val + align - 1) / align * align;
2063 }
2064 
2065 // Division with mathematical rounding to nearest number.
2066 template <typename T>
2067 inline T VmaRoundDiv(T x, T y)
2068 {
2069  return (x + (y / (T)2)) / y;
2070 }
2071 
2072 #ifndef VMA_SORT
2073 
2074 template<typename Iterator, typename Compare>
2075 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2076 {
2077  Iterator centerValue = end; --centerValue;
2078  Iterator insertIndex = beg;
2079  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2080  {
2081  if(cmp(*memTypeIndex, *centerValue))
2082  {
2083  if(insertIndex != memTypeIndex)
2084  {
2085  VMA_SWAP(*memTypeIndex, *insertIndex);
2086  }
2087  ++insertIndex;
2088  }
2089  }
2090  if(insertIndex != centerValue)
2091  {
2092  VMA_SWAP(*insertIndex, *centerValue);
2093  }
2094  return insertIndex;
2095 }
2096 
2097 template<typename Iterator, typename Compare>
2098 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2099 {
2100  if(beg < end)
2101  {
2102  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2103  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2104  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2105  }
2106 }
2107 
2108 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2109 
2110 #endif // #ifndef VMA_SORT
2111 
2112 /*
2113 Returns true if two memory blocks occupy overlapping pages.
2114 ResourceA must be in less memory offset than ResourceB.
2115 
2116 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2117 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2118 */
2119 static inline bool VmaBlocksOnSamePage(
2120  VkDeviceSize resourceAOffset,
2121  VkDeviceSize resourceASize,
2122  VkDeviceSize resourceBOffset,
2123  VkDeviceSize pageSize)
2124 {
2125  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2126  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2127  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2128  VkDeviceSize resourceBStart = resourceBOffset;
2129  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2130  return resourceAEndPage == resourceBStartPage;
2131 }
2132 
2133 enum VmaSuballocationType
2134 {
2135  VMA_SUBALLOCATION_TYPE_FREE = 0,
2136  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2137  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2138  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2139  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2140  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2141  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2142 };
2143 
2144 /*
2145 Returns true if given suballocation types could conflict and must respect
2146 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2147 or linear image and another one is optimal image. If type is unknown, behave
2148 conservatively.
2149 */
2150 static inline bool VmaIsBufferImageGranularityConflict(
2151  VmaSuballocationType suballocType1,
2152  VmaSuballocationType suballocType2)
2153 {
2154  if(suballocType1 > suballocType2)
2155  {
2156  VMA_SWAP(suballocType1, suballocType2);
2157  }
2158 
2159  switch(suballocType1)
2160  {
2161  case VMA_SUBALLOCATION_TYPE_FREE:
2162  return false;
2163  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2164  return true;
2165  case VMA_SUBALLOCATION_TYPE_BUFFER:
2166  return
2167  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2168  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2169  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2170  return
2171  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2172  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2173  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2174  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2175  return
2176  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2177  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2178  return false;
2179  default:
2180  VMA_ASSERT(0);
2181  return true;
2182  }
2183 }
2184 
2185 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2186 struct VmaMutexLock
2187 {
2188 public:
2189  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2190  m_pMutex(useMutex ? &mutex : VMA_NULL)
2191  {
2192  if(m_pMutex)
2193  {
2194  m_pMutex->Lock();
2195  }
2196  }
2197 
2198  ~VmaMutexLock()
2199  {
2200  if(m_pMutex)
2201  {
2202  m_pMutex->Unlock();
2203  }
2204  }
2205 
2206 private:
2207  VMA_MUTEX* m_pMutex;
2208 };
2209 
2210 #if VMA_DEBUG_GLOBAL_MUTEX
2211  static VMA_MUTEX gDebugGlobalMutex;
2212  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2213 #else
2214  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2215 #endif
2216 
2217 // Minimum size of a free suballocation to register it in the free suballocation collection.
2218 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2219 
2220 /*
2221 Performs binary search and returns iterator to first element that is greater or
2222 equal to (key), according to comparison (cmp).
2223 
2224 Cmp should return true if first argument is less than second argument.
2225 
2226 Returned value is the found element, if present in the collection or place where
2227 new element with value (key) should be inserted.
2228 */
2229 template <typename IterT, typename KeyT, typename CmpT>
2230 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2231 {
2232  size_t down = 0, up = (end - beg);
2233  while(down < up)
2234  {
2235  const size_t mid = (down + up) / 2;
2236  if(cmp(*(beg+mid), key))
2237  {
2238  down = mid + 1;
2239  }
2240  else
2241  {
2242  up = mid;
2243  }
2244  }
2245  return beg + down;
2246 }
2247 
2249 // Memory allocation
2250 
2251 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2252 {
2253  if((pAllocationCallbacks != VMA_NULL) &&
2254  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2255  {
2256  return (*pAllocationCallbacks->pfnAllocation)(
2257  pAllocationCallbacks->pUserData,
2258  size,
2259  alignment,
2260  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2261  }
2262  else
2263  {
2264  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2265  }
2266 }
2267 
2268 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2269 {
2270  if((pAllocationCallbacks != VMA_NULL) &&
2271  (pAllocationCallbacks->pfnFree != VMA_NULL))
2272  {
2273  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2274  }
2275  else
2276  {
2277  VMA_SYSTEM_FREE(ptr);
2278  }
2279 }
2280 
2281 template<typename T>
2282 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2283 {
2284  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2285 }
2286 
2287 template<typename T>
2288 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2289 {
2290  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2291 }
2292 
2293 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2294 
2295 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2296 
2297 template<typename T>
2298 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2299 {
2300  ptr->~T();
2301  VmaFree(pAllocationCallbacks, ptr);
2302 }
2303 
2304 template<typename T>
2305 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2306 {
2307  if(ptr != VMA_NULL)
2308  {
2309  for(size_t i = count; i--; )
2310  {
2311  ptr[i].~T();
2312  }
2313  VmaFree(pAllocationCallbacks, ptr);
2314  }
2315 }
2316 
2317 // STL-compatible allocator.
2318 template<typename T>
2319 class VmaStlAllocator
2320 {
2321 public:
2322  const VkAllocationCallbacks* const m_pCallbacks;
2323  typedef T value_type;
2324 
2325  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2326  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2327 
2328  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2329  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2330 
2331  template<typename U>
2332  bool operator==(const VmaStlAllocator<U>& rhs) const
2333  {
2334  return m_pCallbacks == rhs.m_pCallbacks;
2335  }
2336  template<typename U>
2337  bool operator!=(const VmaStlAllocator<U>& rhs) const
2338  {
2339  return m_pCallbacks != rhs.m_pCallbacks;
2340  }
2341 
2342  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2343 };
2344 
2345 #if VMA_USE_STL_VECTOR
2346 
2347 #define VmaVector std::vector
2348 
2349 template<typename T, typename allocatorT>
2350 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2351 {
2352  vec.insert(vec.begin() + index, item);
2353 }
2354 
2355 template<typename T, typename allocatorT>
2356 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2357 {
2358  vec.erase(vec.begin() + index);
2359 }
2360 
2361 #else // #if VMA_USE_STL_VECTOR
2362 
2363 /* Class with interface compatible with subset of std::vector.
2364 T must be POD because constructors and destructors are not called and memcpy is
2365 used for these objects. */
2366 template<typename T, typename AllocatorT>
2367 class VmaVector
2368 {
2369 public:
2370  typedef T value_type;
2371 
2372  VmaVector(const AllocatorT& allocator) :
2373  m_Allocator(allocator),
2374  m_pArray(VMA_NULL),
2375  m_Count(0),
2376  m_Capacity(0)
2377  {
2378  }
2379 
2380  VmaVector(size_t count, const AllocatorT& allocator) :
2381  m_Allocator(allocator),
2382  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2383  m_Count(count),
2384  m_Capacity(count)
2385  {
2386  }
2387 
2388  VmaVector(const VmaVector<T, AllocatorT>& src) :
2389  m_Allocator(src.m_Allocator),
2390  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2391  m_Count(src.m_Count),
2392  m_Capacity(src.m_Count)
2393  {
2394  if(m_Count != 0)
2395  {
2396  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2397  }
2398  }
2399 
2400  ~VmaVector()
2401  {
2402  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2403  }
2404 
2405  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2406  {
2407  if(&rhs != this)
2408  {
2409  resize(rhs.m_Count);
2410  if(m_Count != 0)
2411  {
2412  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2413  }
2414  }
2415  return *this;
2416  }
2417 
2418  bool empty() const { return m_Count == 0; }
2419  size_t size() const { return m_Count; }
2420  T* data() { return m_pArray; }
2421  const T* data() const { return m_pArray; }
2422 
2423  T& operator[](size_t index)
2424  {
2425  VMA_HEAVY_ASSERT(index < m_Count);
2426  return m_pArray[index];
2427  }
2428  const T& operator[](size_t index) const
2429  {
2430  VMA_HEAVY_ASSERT(index < m_Count);
2431  return m_pArray[index];
2432  }
2433 
2434  T& front()
2435  {
2436  VMA_HEAVY_ASSERT(m_Count > 0);
2437  return m_pArray[0];
2438  }
2439  const T& front() const
2440  {
2441  VMA_HEAVY_ASSERT(m_Count > 0);
2442  return m_pArray[0];
2443  }
2444  T& back()
2445  {
2446  VMA_HEAVY_ASSERT(m_Count > 0);
2447  return m_pArray[m_Count - 1];
2448  }
2449  const T& back() const
2450  {
2451  VMA_HEAVY_ASSERT(m_Count > 0);
2452  return m_pArray[m_Count - 1];
2453  }
2454 
2455  void reserve(size_t newCapacity, bool freeMemory = false)
2456  {
2457  newCapacity = VMA_MAX(newCapacity, m_Count);
2458 
2459  if((newCapacity < m_Capacity) && !freeMemory)
2460  {
2461  newCapacity = m_Capacity;
2462  }
2463 
2464  if(newCapacity != m_Capacity)
2465  {
2466  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2467  if(m_Count != 0)
2468  {
2469  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2470  }
2471  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2472  m_Capacity = newCapacity;
2473  m_pArray = newArray;
2474  }
2475  }
2476 
2477  void resize(size_t newCount, bool freeMemory = false)
2478  {
2479  size_t newCapacity = m_Capacity;
2480  if(newCount > m_Capacity)
2481  {
2482  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2483  }
2484  else if(freeMemory)
2485  {
2486  newCapacity = newCount;
2487  }
2488 
2489  if(newCapacity != m_Capacity)
2490  {
2491  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2492  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2493  if(elementsToCopy != 0)
2494  {
2495  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2496  }
2497  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2498  m_Capacity = newCapacity;
2499  m_pArray = newArray;
2500  }
2501 
2502  m_Count = newCount;
2503  }
2504 
2505  void clear(bool freeMemory = false)
2506  {
2507  resize(0, freeMemory);
2508  }
2509 
2510  void insert(size_t index, const T& src)
2511  {
2512  VMA_HEAVY_ASSERT(index <= m_Count);
2513  const size_t oldCount = size();
2514  resize(oldCount + 1);
2515  if(index < oldCount)
2516  {
2517  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2518  }
2519  m_pArray[index] = src;
2520  }
2521 
2522  void remove(size_t index)
2523  {
2524  VMA_HEAVY_ASSERT(index < m_Count);
2525  const size_t oldCount = size();
2526  if(index < oldCount - 1)
2527  {
2528  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2529  }
2530  resize(oldCount - 1);
2531  }
2532 
2533  void push_back(const T& src)
2534  {
2535  const size_t newIndex = size();
2536  resize(newIndex + 1);
2537  m_pArray[newIndex] = src;
2538  }
2539 
2540  void pop_back()
2541  {
2542  VMA_HEAVY_ASSERT(m_Count > 0);
2543  resize(size() - 1);
2544  }
2545 
2546  void push_front(const T& src)
2547  {
2548  insert(0, src);
2549  }
2550 
2551  void pop_front()
2552  {
2553  VMA_HEAVY_ASSERT(m_Count > 0);
2554  remove(0);
2555  }
2556 
2557  typedef T* iterator;
2558 
2559  iterator begin() { return m_pArray; }
2560  iterator end() { return m_pArray + m_Count; }
2561 
2562 private:
2563  AllocatorT m_Allocator;
2564  T* m_pArray;
2565  size_t m_Count;
2566  size_t m_Capacity;
2567 };
2568 
2569 template<typename T, typename allocatorT>
2570 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2571 {
2572  vec.insert(index, item);
2573 }
2574 
2575 template<typename T, typename allocatorT>
2576 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2577 {
2578  vec.remove(index);
2579 }
2580 
2581 #endif // #if VMA_USE_STL_VECTOR
2582 
2583 template<typename CmpLess, typename VectorT>
2584 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2585 {
2586  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2587  vector.data(),
2588  vector.data() + vector.size(),
2589  value,
2590  CmpLess()) - vector.data();
2591  VmaVectorInsert(vector, indexToInsert, value);
2592  return indexToInsert;
2593 }
2594 
2595 template<typename CmpLess, typename VectorT>
2596 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2597 {
2598  CmpLess comparator;
2599  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2600  vector.begin(),
2601  vector.end(),
2602  value,
2603  comparator);
2604  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2605  {
2606  size_t indexToRemove = it - vector.begin();
2607  VmaVectorRemove(vector, indexToRemove);
2608  return true;
2609  }
2610  return false;
2611 }
2612 
2613 template<typename CmpLess, typename VectorT>
2614 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2615 {
2616  CmpLess comparator;
2617  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2618  vector.data(),
2619  vector.data() + vector.size(),
2620  value,
2621  comparator);
2622  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2623  {
2624  return it - vector.begin();
2625  }
2626  else
2627  {
2628  return vector.size();
2629  }
2630 }
2631 
2633 // class VmaPoolAllocator
2634 
2635 /*
2636 Allocator for objects of type T using a list of arrays (pools) to speed up
2637 allocation. Number of elements that can be allocated is not bounded because
2638 allocator can create multiple blocks.
2639 */
2640 template<typename T>
2641 class VmaPoolAllocator
2642 {
2643 public:
2644  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2645  ~VmaPoolAllocator();
2646  void Clear();
2647  T* Alloc();
2648  void Free(T* ptr);
2649 
2650 private:
2651  union Item
2652  {
2653  uint32_t NextFreeIndex;
2654  T Value;
2655  };
2656 
2657  struct ItemBlock
2658  {
2659  Item* pItems;
2660  uint32_t FirstFreeIndex;
2661  };
2662 
2663  const VkAllocationCallbacks* m_pAllocationCallbacks;
2664  size_t m_ItemsPerBlock;
2665  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2666 
2667  ItemBlock& CreateNewBlock();
2668 };
2669 
2670 template<typename T>
2671 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2672  m_pAllocationCallbacks(pAllocationCallbacks),
2673  m_ItemsPerBlock(itemsPerBlock),
2674  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2675 {
2676  VMA_ASSERT(itemsPerBlock > 0);
2677 }
2678 
2679 template<typename T>
2680 VmaPoolAllocator<T>::~VmaPoolAllocator()
2681 {
2682  Clear();
2683 }
2684 
2685 template<typename T>
2686 void VmaPoolAllocator<T>::Clear()
2687 {
2688  for(size_t i = m_ItemBlocks.size(); i--; )
2689  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2690  m_ItemBlocks.clear();
2691 }
2692 
2693 template<typename T>
2694 T* VmaPoolAllocator<T>::Alloc()
2695 {
2696  for(size_t i = m_ItemBlocks.size(); i--; )
2697  {
2698  ItemBlock& block = m_ItemBlocks[i];
2699  // This block has some free items: Use first one.
2700  if(block.FirstFreeIndex != UINT32_MAX)
2701  {
2702  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2703  block.FirstFreeIndex = pItem->NextFreeIndex;
2704  return &pItem->Value;
2705  }
2706  }
2707 
2708  // No block has free item: Create new one and use it.
2709  ItemBlock& newBlock = CreateNewBlock();
2710  Item* const pItem = &newBlock.pItems[0];
2711  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2712  return &pItem->Value;
2713 }
2714 
2715 template<typename T>
2716 void VmaPoolAllocator<T>::Free(T* ptr)
2717 {
2718  // Search all memory blocks to find ptr.
2719  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2720  {
2721  ItemBlock& block = m_ItemBlocks[i];
2722 
2723  // Casting to union.
2724  Item* pItemPtr;
2725  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2726 
2727  // Check if pItemPtr is in address range of this block.
2728  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2729  {
2730  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2731  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2732  block.FirstFreeIndex = index;
2733  return;
2734  }
2735  }
2736  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2737 }
2738 
2739 template<typename T>
2740 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2741 {
2742  ItemBlock newBlock = {
2743  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2744 
2745  m_ItemBlocks.push_back(newBlock);
2746 
2747  // Setup singly-linked list of all free items in this block.
2748  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2749  newBlock.pItems[i].NextFreeIndex = i + 1;
2750  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2751  return m_ItemBlocks.back();
2752 }
2753 
2755 // class VmaRawList, VmaList
2756 
2757 #if VMA_USE_STL_LIST
2758 
2759 #define VmaList std::list
2760 
2761 #else // #if VMA_USE_STL_LIST
2762 
2763 template<typename T>
2764 struct VmaListItem
2765 {
2766  VmaListItem* pPrev;
2767  VmaListItem* pNext;
2768  T Value;
2769 };
2770 
2771 // Doubly linked list.
2772 template<typename T>
2773 class VmaRawList
2774 {
2775 public:
2776  typedef VmaListItem<T> ItemType;
2777 
2778  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2779  ~VmaRawList();
2780  void Clear();
2781 
2782  size_t GetCount() const { return m_Count; }
2783  bool IsEmpty() const { return m_Count == 0; }
2784 
2785  ItemType* Front() { return m_pFront; }
2786  const ItemType* Front() const { return m_pFront; }
2787  ItemType* Back() { return m_pBack; }
2788  const ItemType* Back() const { return m_pBack; }
2789 
2790  ItemType* PushBack();
2791  ItemType* PushFront();
2792  ItemType* PushBack(const T& value);
2793  ItemType* PushFront(const T& value);
2794  void PopBack();
2795  void PopFront();
2796 
2797  // Item can be null - it means PushBack.
2798  ItemType* InsertBefore(ItemType* pItem);
2799  // Item can be null - it means PushFront.
2800  ItemType* InsertAfter(ItemType* pItem);
2801 
2802  ItemType* InsertBefore(ItemType* pItem, const T& value);
2803  ItemType* InsertAfter(ItemType* pItem, const T& value);
2804 
2805  void Remove(ItemType* pItem);
2806 
2807 private:
2808  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2809  VmaPoolAllocator<ItemType> m_ItemAllocator;
2810  ItemType* m_pFront;
2811  ItemType* m_pBack;
2812  size_t m_Count;
2813 
2814  // Declared not defined, to block copy constructor and assignment operator.
2815  VmaRawList(const VmaRawList<T>& src);
2816  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2817 };
2818 
2819 template<typename T>
2820 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2821  m_pAllocationCallbacks(pAllocationCallbacks),
2822  m_ItemAllocator(pAllocationCallbacks, 128),
2823  m_pFront(VMA_NULL),
2824  m_pBack(VMA_NULL),
2825  m_Count(0)
2826 {
2827 }
2828 
2829 template<typename T>
2830 VmaRawList<T>::~VmaRawList()
2831 {
2832  // Intentionally not calling Clear, because that would be unnecessary
2833  // computations to return all items to m_ItemAllocator as free.
2834 }
2835 
2836 template<typename T>
2837 void VmaRawList<T>::Clear()
2838 {
2839  if(IsEmpty() == false)
2840  {
2841  ItemType* pItem = m_pBack;
2842  while(pItem != VMA_NULL)
2843  {
2844  ItemType* const pPrevItem = pItem->pPrev;
2845  m_ItemAllocator.Free(pItem);
2846  pItem = pPrevItem;
2847  }
2848  m_pFront = VMA_NULL;
2849  m_pBack = VMA_NULL;
2850  m_Count = 0;
2851  }
2852 }
2853 
2854 template<typename T>
2855 VmaListItem<T>* VmaRawList<T>::PushBack()
2856 {
2857  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2858  pNewItem->pNext = VMA_NULL;
2859  if(IsEmpty())
2860  {
2861  pNewItem->pPrev = VMA_NULL;
2862  m_pFront = pNewItem;
2863  m_pBack = pNewItem;
2864  m_Count = 1;
2865  }
2866  else
2867  {
2868  pNewItem->pPrev = m_pBack;
2869  m_pBack->pNext = pNewItem;
2870  m_pBack = pNewItem;
2871  ++m_Count;
2872  }
2873  return pNewItem;
2874 }
2875 
2876 template<typename T>
2877 VmaListItem<T>* VmaRawList<T>::PushFront()
2878 {
2879  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2880  pNewItem->pPrev = VMA_NULL;
2881  if(IsEmpty())
2882  {
2883  pNewItem->pNext = VMA_NULL;
2884  m_pFront = pNewItem;
2885  m_pBack = pNewItem;
2886  m_Count = 1;
2887  }
2888  else
2889  {
2890  pNewItem->pNext = m_pFront;
2891  m_pFront->pPrev = pNewItem;
2892  m_pFront = pNewItem;
2893  ++m_Count;
2894  }
2895  return pNewItem;
2896 }
2897 
2898 template<typename T>
2899 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2900 {
2901  ItemType* const pNewItem = PushBack();
2902  pNewItem->Value = value;
2903  return pNewItem;
2904 }
2905 
2906 template<typename T>
2907 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2908 {
2909  ItemType* const pNewItem = PushFront();
2910  pNewItem->Value = value;
2911  return pNewItem;
2912 }
2913 
2914 template<typename T>
2915 void VmaRawList<T>::PopBack()
2916 {
2917  VMA_HEAVY_ASSERT(m_Count > 0);
2918  ItemType* const pBackItem = m_pBack;
2919  ItemType* const pPrevItem = pBackItem->pPrev;
2920  if(pPrevItem != VMA_NULL)
2921  {
2922  pPrevItem->pNext = VMA_NULL;
2923  }
2924  m_pBack = pPrevItem;
2925  m_ItemAllocator.Free(pBackItem);
2926  --m_Count;
2927 }
2928 
2929 template<typename T>
2930 void VmaRawList<T>::PopFront()
2931 {
2932  VMA_HEAVY_ASSERT(m_Count > 0);
2933  ItemType* const pFrontItem = m_pFront;
2934  ItemType* const pNextItem = pFrontItem->pNext;
2935  if(pNextItem != VMA_NULL)
2936  {
2937  pNextItem->pPrev = VMA_NULL;
2938  }
2939  m_pFront = pNextItem;
2940  m_ItemAllocator.Free(pFrontItem);
2941  --m_Count;
2942 }
2943 
2944 template<typename T>
2945 void VmaRawList<T>::Remove(ItemType* pItem)
2946 {
2947  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2948  VMA_HEAVY_ASSERT(m_Count > 0);
2949 
2950  if(pItem->pPrev != VMA_NULL)
2951  {
2952  pItem->pPrev->pNext = pItem->pNext;
2953  }
2954  else
2955  {
2956  VMA_HEAVY_ASSERT(m_pFront == pItem);
2957  m_pFront = pItem->pNext;
2958  }
2959 
2960  if(pItem->pNext != VMA_NULL)
2961  {
2962  pItem->pNext->pPrev = pItem->pPrev;
2963  }
2964  else
2965  {
2966  VMA_HEAVY_ASSERT(m_pBack == pItem);
2967  m_pBack = pItem->pPrev;
2968  }
2969 
2970  m_ItemAllocator.Free(pItem);
2971  --m_Count;
2972 }
2973 
2974 template<typename T>
2975 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
2976 {
2977  if(pItem != VMA_NULL)
2978  {
2979  ItemType* const prevItem = pItem->pPrev;
2980  ItemType* const newItem = m_ItemAllocator.Alloc();
2981  newItem->pPrev = prevItem;
2982  newItem->pNext = pItem;
2983  pItem->pPrev = newItem;
2984  if(prevItem != VMA_NULL)
2985  {
2986  prevItem->pNext = newItem;
2987  }
2988  else
2989  {
2990  VMA_HEAVY_ASSERT(m_pFront == pItem);
2991  m_pFront = newItem;
2992  }
2993  ++m_Count;
2994  return newItem;
2995  }
2996  else
2997  return PushBack();
2998 }
2999 
3000 template<typename T>
3001 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3002 {
3003  if(pItem != VMA_NULL)
3004  {
3005  ItemType* const nextItem = pItem->pNext;
3006  ItemType* const newItem = m_ItemAllocator.Alloc();
3007  newItem->pNext = nextItem;
3008  newItem->pPrev = pItem;
3009  pItem->pNext = newItem;
3010  if(nextItem != VMA_NULL)
3011  {
3012  nextItem->pPrev = newItem;
3013  }
3014  else
3015  {
3016  VMA_HEAVY_ASSERT(m_pBack == pItem);
3017  m_pBack = newItem;
3018  }
3019  ++m_Count;
3020  return newItem;
3021  }
3022  else
3023  return PushFront();
3024 }
3025 
3026 template<typename T>
3027 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3028 {
3029  ItemType* const newItem = InsertBefore(pItem);
3030  newItem->Value = value;
3031  return newItem;
3032 }
3033 
3034 template<typename T>
3035 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3036 {
3037  ItemType* const newItem = InsertAfter(pItem);
3038  newItem->Value = value;
3039  return newItem;
3040 }
3041 
3042 template<typename T, typename AllocatorT>
3043 class VmaList
3044 {
3045 public:
3046  class iterator
3047  {
3048  public:
3049  iterator() :
3050  m_pList(VMA_NULL),
3051  m_pItem(VMA_NULL)
3052  {
3053  }
3054 
3055  T& operator*() const
3056  {
3057  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3058  return m_pItem->Value;
3059  }
3060  T* operator->() const
3061  {
3062  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3063  return &m_pItem->Value;
3064  }
3065 
3066  iterator& operator++()
3067  {
3068  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3069  m_pItem = m_pItem->pNext;
3070  return *this;
3071  }
3072  iterator& operator--()
3073  {
3074  if(m_pItem != VMA_NULL)
3075  {
3076  m_pItem = m_pItem->pPrev;
3077  }
3078  else
3079  {
3080  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3081  m_pItem = m_pList->Back();
3082  }
3083  return *this;
3084  }
3085 
3086  iterator operator++(int)
3087  {
3088  iterator result = *this;
3089  ++*this;
3090  return result;
3091  }
3092  iterator operator--(int)
3093  {
3094  iterator result = *this;
3095  --*this;
3096  return result;
3097  }
3098 
3099  bool operator==(const iterator& rhs) const
3100  {
3101  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3102  return m_pItem == rhs.m_pItem;
3103  }
3104  bool operator!=(const iterator& rhs) const
3105  {
3106  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3107  return m_pItem != rhs.m_pItem;
3108  }
3109 
3110  private:
3111  VmaRawList<T>* m_pList;
3112  VmaListItem<T>* m_pItem;
3113 
3114  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3115  m_pList(pList),
3116  m_pItem(pItem)
3117  {
3118  }
3119 
3120  friend class VmaList<T, AllocatorT>;
3121  };
3122 
3123  class const_iterator
3124  {
3125  public:
3126  const_iterator() :
3127  m_pList(VMA_NULL),
3128  m_pItem(VMA_NULL)
3129  {
3130  }
3131 
3132  const_iterator(const iterator& src) :
3133  m_pList(src.m_pList),
3134  m_pItem(src.m_pItem)
3135  {
3136  }
3137 
3138  const T& operator*() const
3139  {
3140  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3141  return m_pItem->Value;
3142  }
3143  const T* operator->() const
3144  {
3145  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3146  return &m_pItem->Value;
3147  }
3148 
3149  const_iterator& operator++()
3150  {
3151  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3152  m_pItem = m_pItem->pNext;
3153  return *this;
3154  }
3155  const_iterator& operator--()
3156  {
3157  if(m_pItem != VMA_NULL)
3158  {
3159  m_pItem = m_pItem->pPrev;
3160  }
3161  else
3162  {
3163  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3164  m_pItem = m_pList->Back();
3165  }
3166  return *this;
3167  }
3168 
3169  const_iterator operator++(int)
3170  {
3171  const_iterator result = *this;
3172  ++*this;
3173  return result;
3174  }
3175  const_iterator operator--(int)
3176  {
3177  const_iterator result = *this;
3178  --*this;
3179  return result;
3180  }
3181 
3182  bool operator==(const const_iterator& rhs) const
3183  {
3184  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3185  return m_pItem == rhs.m_pItem;
3186  }
3187  bool operator!=(const const_iterator& rhs) const
3188  {
3189  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3190  return m_pItem != rhs.m_pItem;
3191  }
3192 
3193  private:
3194  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3195  m_pList(pList),
3196  m_pItem(pItem)
3197  {
3198  }
3199 
3200  const VmaRawList<T>* m_pList;
3201  const VmaListItem<T>* m_pItem;
3202 
3203  friend class VmaList<T, AllocatorT>;
3204  };
3205 
3206  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3207 
3208  bool empty() const { return m_RawList.IsEmpty(); }
3209  size_t size() const { return m_RawList.GetCount(); }
3210 
3211  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3212  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3213 
3214  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3215  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3216 
3217  void clear() { m_RawList.Clear(); }
3218  void push_back(const T& value) { m_RawList.PushBack(value); }
3219  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3220  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3221 
3222 private:
3223  VmaRawList<T> m_RawList;
3224 };
3225 
3226 #endif // #if VMA_USE_STL_LIST
3227 
3229 // class VmaMap
3230 
3231 // Unused in this version.
3232 #if 0
3233 
3234 #if VMA_USE_STL_UNORDERED_MAP
3235 
3236 #define VmaPair std::pair
3237 
3238 #define VMA_MAP_TYPE(KeyT, ValueT) \
3239  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3240 
3241 #else // #if VMA_USE_STL_UNORDERED_MAP
3242 
3243 template<typename T1, typename T2>
3244 struct VmaPair
3245 {
3246  T1 first;
3247  T2 second;
3248 
3249  VmaPair() : first(), second() { }
3250  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3251 };
3252 
3253 /* Class compatible with subset of interface of std::unordered_map.
3254 KeyT, ValueT must be POD because they will be stored in VmaVector.
3255 */
3256 template<typename KeyT, typename ValueT>
3257 class VmaMap
3258 {
3259 public:
3260  typedef VmaPair<KeyT, ValueT> PairType;
3261  typedef PairType* iterator;
3262 
3263  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3264 
3265  iterator begin() { return m_Vector.begin(); }
3266  iterator end() { return m_Vector.end(); }
3267 
3268  void insert(const PairType& pair);
3269  iterator find(const KeyT& key);
3270  void erase(iterator it);
3271 
3272 private:
3273  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3274 };
3275 
3276 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3277 
3278 template<typename FirstT, typename SecondT>
3279 struct VmaPairFirstLess
3280 {
3281  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3282  {
3283  return lhs.first < rhs.first;
3284  }
3285  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3286  {
3287  return lhs.first < rhsFirst;
3288  }
3289 };
3290 
3291 template<typename KeyT, typename ValueT>
3292 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3293 {
3294  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3295  m_Vector.data(),
3296  m_Vector.data() + m_Vector.size(),
3297  pair,
3298  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3299  VmaVectorInsert(m_Vector, indexToInsert, pair);
3300 }
3301 
3302 template<typename KeyT, typename ValueT>
3303 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3304 {
3305  PairType* it = VmaBinaryFindFirstNotLess(
3306  m_Vector.data(),
3307  m_Vector.data() + m_Vector.size(),
3308  key,
3309  VmaPairFirstLess<KeyT, ValueT>());
3310  if((it != m_Vector.end()) && (it->first == key))
3311  {
3312  return it;
3313  }
3314  else
3315  {
3316  return m_Vector.end();
3317  }
3318 }
3319 
3320 template<typename KeyT, typename ValueT>
3321 void VmaMap<KeyT, ValueT>::erase(iterator it)
3322 {
3323  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3324 }
3325 
3326 #endif // #if VMA_USE_STL_UNORDERED_MAP
3327 
3328 #endif // #if 0
3329 
3331 
3332 class VmaDeviceMemoryBlock;
3333 
3334 struct VmaAllocation_T
3335 {
3336 private:
3337  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3338 
3339  enum FLAGS
3340  {
3341  FLAG_USER_DATA_STRING = 0x01,
3342  };
3343 
3344 public:
3345  enum ALLOCATION_TYPE
3346  {
3347  ALLOCATION_TYPE_NONE,
3348  ALLOCATION_TYPE_BLOCK,
3349  ALLOCATION_TYPE_DEDICATED,
3350  };
3351 
3352  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3353  m_Alignment(1),
3354  m_Size(0),
3355  m_pUserData(VMA_NULL),
3356  m_LastUseFrameIndex(currentFrameIndex),
3357  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3358  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3359  m_MapCount(0),
3360  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3361  {
3362  }
3363 
3364  ~VmaAllocation_T()
3365  {
3366  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3367 
3368  // Check if owned string was freed.
3369  VMA_ASSERT(m_pUserData == VMA_NULL);
3370  }
3371 
3372  void InitBlockAllocation(
3373  VmaPool hPool,
3374  VmaDeviceMemoryBlock* block,
3375  VkDeviceSize offset,
3376  VkDeviceSize alignment,
3377  VkDeviceSize size,
3378  VmaSuballocationType suballocationType,
3379  bool mapped,
3380  bool canBecomeLost)
3381  {
3382  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3383  VMA_ASSERT(block != VMA_NULL);
3384  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3385  m_Alignment = alignment;
3386  m_Size = size;
3387  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3388  m_SuballocationType = (uint8_t)suballocationType;
3389  m_BlockAllocation.m_hPool = hPool;
3390  m_BlockAllocation.m_Block = block;
3391  m_BlockAllocation.m_Offset = offset;
3392  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3393  }
3394 
3395  void InitLost()
3396  {
3397  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3398  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3399  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3400  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3401  m_BlockAllocation.m_Block = VMA_NULL;
3402  m_BlockAllocation.m_Offset = 0;
3403  m_BlockAllocation.m_CanBecomeLost = true;
3404  }
3405 
3406  void ChangeBlockAllocation(
3407  VmaAllocator hAllocator,
3408  VmaDeviceMemoryBlock* block,
3409  VkDeviceSize offset);
3410 
3411  // pMappedData not null means allocation is created with MAPPED flag.
3412  void InitDedicatedAllocation(
3413  uint32_t memoryTypeIndex,
3414  VkDeviceMemory hMemory,
3415  VmaSuballocationType suballocationType,
3416  void* pMappedData,
3417  VkDeviceSize size)
3418  {
3419  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3420  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3421  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3422  m_Alignment = 0;
3423  m_Size = size;
3424  m_SuballocationType = (uint8_t)suballocationType;
3425  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3426  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3427  m_DedicatedAllocation.m_hMemory = hMemory;
3428  m_DedicatedAllocation.m_pMappedData = pMappedData;
3429  }
3430 
3431  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3432  VkDeviceSize GetAlignment() const { return m_Alignment; }
3433  VkDeviceSize GetSize() const { return m_Size; }
3434  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3435  void* GetUserData() const { return m_pUserData; }
3436  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3437  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3438 
3439  VmaDeviceMemoryBlock* GetBlock() const
3440  {
3441  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3442  return m_BlockAllocation.m_Block;
3443  }
3444  VkDeviceSize GetOffset() const;
3445  VkDeviceMemory GetMemory() const;
3446  uint32_t GetMemoryTypeIndex() const;
3447  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3448  void* GetMappedData() const;
3449  bool CanBecomeLost() const;
3450  VmaPool GetPool() const;
3451 
3452  uint32_t GetLastUseFrameIndex() const
3453  {
3454  return m_LastUseFrameIndex.load();
3455  }
3456  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3457  {
3458  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3459  }
3460  /*
3461  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3462  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3463  - Else, returns false.
3464 
3465  If hAllocation is already lost, assert - you should not call it then.
3466  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3467  */
3468  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3469 
3470  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3471  {
3472  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3473  outInfo.blockCount = 1;
3474  outInfo.allocationCount = 1;
3475  outInfo.unusedRangeCount = 0;
3476  outInfo.usedBytes = m_Size;
3477  outInfo.unusedBytes = 0;
3478  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3479  outInfo.unusedRangeSizeMin = UINT64_MAX;
3480  outInfo.unusedRangeSizeMax = 0;
3481  }
3482 
3483  void BlockAllocMap();
3484  void BlockAllocUnmap();
3485  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3486  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3487 
3488 private:
3489  VkDeviceSize m_Alignment;
3490  VkDeviceSize m_Size;
3491  void* m_pUserData;
3492  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3493  uint8_t m_Type; // ALLOCATION_TYPE
3494  uint8_t m_SuballocationType; // VmaSuballocationType
3495  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3496  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3497  uint8_t m_MapCount;
3498  uint8_t m_Flags; // enum FLAGS
3499 
3500  // Allocation out of VmaDeviceMemoryBlock.
3501  struct BlockAllocation
3502  {
3503  VmaPool m_hPool; // Null if belongs to general memory.
3504  VmaDeviceMemoryBlock* m_Block;
3505  VkDeviceSize m_Offset;
3506  bool m_CanBecomeLost;
3507  };
3508 
3509  // Allocation for an object that has its own private VkDeviceMemory.
3510  struct DedicatedAllocation
3511  {
3512  uint32_t m_MemoryTypeIndex;
3513  VkDeviceMemory m_hMemory;
3514  void* m_pMappedData; // Not null means memory is mapped.
3515  };
3516 
3517  union
3518  {
3519  // Allocation out of VmaDeviceMemoryBlock.
3520  BlockAllocation m_BlockAllocation;
3521  // Allocation for an object that has its own private VkDeviceMemory.
3522  DedicatedAllocation m_DedicatedAllocation;
3523  };
3524 
3525  void FreeUserDataString(VmaAllocator hAllocator);
3526 };
3527 
3528 /*
3529 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3530 allocated memory block or free.
3531 */
3532 struct VmaSuballocation
3533 {
3534  VkDeviceSize offset;
3535  VkDeviceSize size;
3536  VmaAllocation hAllocation;
3537  VmaSuballocationType type;
3538 };
3539 
3540 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3541 
3542 // Cost of one additional allocation lost, as equivalent in bytes.
3543 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3544 
3545 /*
3546 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3547 
3548 If canMakeOtherLost was false:
3549 - item points to a FREE suballocation.
3550 - itemsToMakeLostCount is 0.
3551 
3552 If canMakeOtherLost was true:
3553 - item points to first of sequence of suballocations, which are either FREE,
3554  or point to VmaAllocations that can become lost.
3555 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3556  the requested allocation to succeed.
3557 */
3558 struct VmaAllocationRequest
3559 {
3560  VkDeviceSize offset;
3561  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3562  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3563  VmaSuballocationList::iterator item;
3564  size_t itemsToMakeLostCount;
3565 
3566  VkDeviceSize CalcCost() const
3567  {
3568  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3569  }
3570 };
3571 
3572 /*
3573 Data structure used for bookkeeping of allocations and unused ranges of memory
3574 in a single VkDeviceMemory block.
3575 */
3576 class VmaBlockMetadata
3577 {
3578 public:
3579  VmaBlockMetadata(VmaAllocator hAllocator);
3580  ~VmaBlockMetadata();
3581  void Init(VkDeviceSize size);
3582 
3583  // Validates all data structures inside this object. If not valid, returns false.
3584  bool Validate() const;
3585  VkDeviceSize GetSize() const { return m_Size; }
3586  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3587  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3588  VkDeviceSize GetUnusedRangeSizeMax() const;
3589  // Returns true if this block is empty - contains only single free suballocation.
3590  bool IsEmpty() const;
3591 
3592  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3593  void AddPoolStats(VmaPoolStats& inoutStats) const;
3594 
3595 #if VMA_STATS_STRING_ENABLED
3596  void PrintDetailedMap(class VmaJsonWriter& json) const;
3597 #endif
3598 
3599  // Creates trivial request for case when block is empty.
3600  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3601 
3602  // Tries to find a place for suballocation with given parameters inside this block.
3603  // If succeeded, fills pAllocationRequest and returns true.
3604  // If failed, returns false.
3605  bool CreateAllocationRequest(
3606  uint32_t currentFrameIndex,
3607  uint32_t frameInUseCount,
3608  VkDeviceSize bufferImageGranularity,
3609  VkDeviceSize allocSize,
3610  VkDeviceSize allocAlignment,
3611  VmaSuballocationType allocType,
3612  bool canMakeOtherLost,
3613  VmaAllocationRequest* pAllocationRequest);
3614 
3615  bool MakeRequestedAllocationsLost(
3616  uint32_t currentFrameIndex,
3617  uint32_t frameInUseCount,
3618  VmaAllocationRequest* pAllocationRequest);
3619 
3620  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3621 
3622  // Makes actual allocation based on request. Request must already be checked and valid.
3623  void Alloc(
3624  const VmaAllocationRequest& request,
3625  VmaSuballocationType type,
3626  VkDeviceSize allocSize,
3627  VmaAllocation hAllocation);
3628 
3629  // Frees suballocation assigned to given memory region.
3630  void Free(const VmaAllocation allocation);
3631  void FreeAtOffset(VkDeviceSize offset);
3632 
3633 private:
3634  VkDeviceSize m_Size;
3635  uint32_t m_FreeCount;
3636  VkDeviceSize m_SumFreeSize;
3637  VmaSuballocationList m_Suballocations;
3638  // Suballocations that are free and have size greater than certain threshold.
3639  // Sorted by size, ascending.
3640  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3641 
3642  bool ValidateFreeSuballocationList() const;
3643 
3644  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3645  // If yes, fills pOffset and returns true. If no, returns false.
3646  bool CheckAllocation(
3647  uint32_t currentFrameIndex,
3648  uint32_t frameInUseCount,
3649  VkDeviceSize bufferImageGranularity,
3650  VkDeviceSize allocSize,
3651  VkDeviceSize allocAlignment,
3652  VmaSuballocationType allocType,
3653  VmaSuballocationList::const_iterator suballocItem,
3654  bool canMakeOtherLost,
3655  VkDeviceSize* pOffset,
3656  size_t* itemsToMakeLostCount,
3657  VkDeviceSize* pSumFreeSize,
3658  VkDeviceSize* pSumItemSize) const;
3659  // Given free suballocation, it merges it with following one, which must also be free.
3660  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3661  // Releases given suballocation, making it free.
3662  // Merges it with adjacent free suballocations if applicable.
3663  // Returns iterator to new free suballocation at this place.
3664  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3665  // Given free suballocation, it inserts it into sorted list of
3666  // m_FreeSuballocationsBySize if it's suitable.
3667  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3668  // Given free suballocation, it removes it from sorted list of
3669  // m_FreeSuballocationsBySize if it's suitable.
3670  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3671 };
3672 
3673 // Helper class that represents mapped memory. Synchronized internally.
3674 class VmaDeviceMemoryMapping
3675 {
3676 public:
3677  VmaDeviceMemoryMapping();
3678  ~VmaDeviceMemoryMapping();
3679 
3680  void* GetMappedData() const { return m_pMappedData; }
3681 
3682  // ppData can be null.
3683  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3684  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3685 
3686 private:
3687  VMA_MUTEX m_Mutex;
3688  uint32_t m_MapCount;
3689  void* m_pMappedData;
3690 };
3691 
3692 /*
3693 Represents a single block of device memory (`VkDeviceMemory`) with all the
3694 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3695 
3696 Thread-safety: This class must be externally synchronized.
3697 */
3698 class VmaDeviceMemoryBlock
3699 {
3700 public:
3701  uint32_t m_MemoryTypeIndex;
3702  VkDeviceMemory m_hMemory;
3703  VmaDeviceMemoryMapping m_Mapping;
3704  VmaBlockMetadata m_Metadata;
3705 
3706  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3707 
3708  ~VmaDeviceMemoryBlock()
3709  {
3710  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3711  }
3712 
3713  // Always call after construction.
3714  void Init(
3715  uint32_t newMemoryTypeIndex,
3716  VkDeviceMemory newMemory,
3717  VkDeviceSize newSize);
3718  // Always call before destruction.
3719  void Destroy(VmaAllocator allocator);
3720 
3721  // Validates all data structures inside this object. If not valid, returns false.
3722  bool Validate() const;
3723 
3724  // ppData can be null.
3725  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3726  void Unmap(VmaAllocator hAllocator, uint32_t count);
3727 };
3728 
3729 struct VmaPointerLess
3730 {
3731  bool operator()(const void* lhs, const void* rhs) const
3732  {
3733  return lhs < rhs;
3734  }
3735 };
3736 
3737 class VmaDefragmentator;
3738 
3739 /*
3740 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3741 Vulkan memory type.
3742 
3743 Synchronized internally with a mutex.
3744 */
3745 struct VmaBlockVector
3746 {
3747  VmaBlockVector(
3748  VmaAllocator hAllocator,
3749  uint32_t memoryTypeIndex,
3750  VkDeviceSize preferredBlockSize,
3751  size_t minBlockCount,
3752  size_t maxBlockCount,
3753  VkDeviceSize bufferImageGranularity,
3754  uint32_t frameInUseCount,
3755  bool isCustomPool);
3756  ~VmaBlockVector();
3757 
3758  VkResult CreateMinBlocks();
3759 
3760  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3761  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3762  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3763  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3764 
3765  void GetPoolStats(VmaPoolStats* pStats);
3766 
3767  bool IsEmpty() const { return m_Blocks.empty(); }
3768 
3769  VkResult Allocate(
3770  VmaPool hCurrentPool,
3771  uint32_t currentFrameIndex,
3772  const VkMemoryRequirements& vkMemReq,
3773  const VmaAllocationCreateInfo& createInfo,
3774  VmaSuballocationType suballocType,
3775  VmaAllocation* pAllocation);
3776 
3777  void Free(
3778  VmaAllocation hAllocation);
3779 
3780  // Adds statistics of this BlockVector to pStats.
3781  void AddStats(VmaStats* pStats);
3782 
3783 #if VMA_STATS_STRING_ENABLED
3784  void PrintDetailedMap(class VmaJsonWriter& json);
3785 #endif
3786 
3787  void MakePoolAllocationsLost(
3788  uint32_t currentFrameIndex,
3789  size_t* pLostAllocationCount);
3790 
3791  VmaDefragmentator* EnsureDefragmentator(
3792  VmaAllocator hAllocator,
3793  uint32_t currentFrameIndex);
3794 
3795  VkResult Defragment(
3796  VmaDefragmentationStats* pDefragmentationStats,
3797  VkDeviceSize& maxBytesToMove,
3798  uint32_t& maxAllocationsToMove);
3799 
3800  void DestroyDefragmentator();
3801 
3802 private:
3803  friend class VmaDefragmentator;
3804 
3805  const VmaAllocator m_hAllocator;
3806  const uint32_t m_MemoryTypeIndex;
3807  const VkDeviceSize m_PreferredBlockSize;
3808  const size_t m_MinBlockCount;
3809  const size_t m_MaxBlockCount;
3810  const VkDeviceSize m_BufferImageGranularity;
3811  const uint32_t m_FrameInUseCount;
3812  const bool m_IsCustomPool;
3813  VMA_MUTEX m_Mutex;
3814  // Incrementally sorted by sumFreeSize, ascending.
3815  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3816  /* There can be at most one allocation that is completely empty - a
3817  hysteresis to avoid pessimistic case of alternating creation and destruction
3818  of a VkDeviceMemory. */
3819  bool m_HasEmptyBlock;
3820  VmaDefragmentator* m_pDefragmentator;
3821 
3822  size_t CalcMaxBlockSize() const;
3823 
3824  // Finds and removes given block from vector.
3825  void Remove(VmaDeviceMemoryBlock* pBlock);
3826 
3827  // Performs single step in sorting m_Blocks. They may not be fully sorted
3828  // after this call.
3829  void IncrementallySortBlocks();
3830 
3831  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3832 };
3833 
3834 struct VmaPool_T
3835 {
3836 public:
3837  VmaBlockVector m_BlockVector;
3838 
3839  // Takes ownership.
3840  VmaPool_T(
3841  VmaAllocator hAllocator,
3842  const VmaPoolCreateInfo& createInfo);
3843  ~VmaPool_T();
3844 
3845  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3846 
3847 #if VMA_STATS_STRING_ENABLED
3848  //void PrintDetailedMap(class VmaStringBuilder& sb);
3849 #endif
3850 };
3851 
3852 class VmaDefragmentator
3853 {
3854  const VmaAllocator m_hAllocator;
3855  VmaBlockVector* const m_pBlockVector;
3856  uint32_t m_CurrentFrameIndex;
3857  VkDeviceSize m_BytesMoved;
3858  uint32_t m_AllocationsMoved;
3859 
3860  struct AllocationInfo
3861  {
3862  VmaAllocation m_hAllocation;
3863  VkBool32* m_pChanged;
3864 
3865  AllocationInfo() :
3866  m_hAllocation(VK_NULL_HANDLE),
3867  m_pChanged(VMA_NULL)
3868  {
3869  }
3870  };
3871 
3872  struct AllocationInfoSizeGreater
3873  {
3874  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3875  {
3876  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3877  }
3878  };
3879 
3880  // Used between AddAllocation and Defragment.
3881  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3882 
3883  struct BlockInfo
3884  {
3885  VmaDeviceMemoryBlock* m_pBlock;
3886  bool m_HasNonMovableAllocations;
3887  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3888 
3889  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3890  m_pBlock(VMA_NULL),
3891  m_HasNonMovableAllocations(true),
3892  m_Allocations(pAllocationCallbacks),
3893  m_pMappedDataForDefragmentation(VMA_NULL)
3894  {
3895  }
3896 
3897  void CalcHasNonMovableAllocations()
3898  {
3899  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3900  const size_t defragmentAllocCount = m_Allocations.size();
3901  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3902  }
3903 
3904  void SortAllocationsBySizeDescecnding()
3905  {
3906  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3907  }
3908 
3909  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3910  void Unmap(VmaAllocator hAllocator);
3911 
3912  private:
3913  // Not null if mapped for defragmentation only, not originally mapped.
3914  void* m_pMappedDataForDefragmentation;
3915  };
3916 
3917  struct BlockPointerLess
3918  {
3919  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3920  {
3921  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3922  }
3923  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3924  {
3925  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3926  }
3927  };
3928 
3929  // 1. Blocks with some non-movable allocations go first.
3930  // 2. Blocks with smaller sumFreeSize go first.
3931  struct BlockInfoCompareMoveDestination
3932  {
3933  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3934  {
3935  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3936  {
3937  return true;
3938  }
3939  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3940  {
3941  return false;
3942  }
3943  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3944  {
3945  return true;
3946  }
3947  return false;
3948  }
3949  };
3950 
3951  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3952  BlockInfoVector m_Blocks;
3953 
3954  VkResult DefragmentRound(
3955  VkDeviceSize maxBytesToMove,
3956  uint32_t maxAllocationsToMove);
3957 
3958  static bool MoveMakesSense(
3959  size_t dstBlockIndex, VkDeviceSize dstOffset,
3960  size_t srcBlockIndex, VkDeviceSize srcOffset);
3961 
3962 public:
3963  VmaDefragmentator(
3964  VmaAllocator hAllocator,
3965  VmaBlockVector* pBlockVector,
3966  uint32_t currentFrameIndex);
3967 
3968  ~VmaDefragmentator();
3969 
3970  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
3971  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
3972 
3973  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
3974 
3975  VkResult Defragment(
3976  VkDeviceSize maxBytesToMove,
3977  uint32_t maxAllocationsToMove);
3978 };
3979 
3980 // Main allocator object.
3981 struct VmaAllocator_T
3982 {
3983  bool m_UseMutex;
3984  bool m_UseKhrDedicatedAllocation;
3985  VkDevice m_hDevice;
3986  bool m_AllocationCallbacksSpecified;
3987  VkAllocationCallbacks m_AllocationCallbacks;
3988  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
3989 
3990  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
3991  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
3992  VMA_MUTEX m_HeapSizeLimitMutex;
3993 
3994  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
3995  VkPhysicalDeviceMemoryProperties m_MemProps;
3996 
3997  // Default pools.
3998  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
3999 
4000  // Each vector is sorted by memory (handle value).
4001  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4002  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4003  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4004 
4005  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4006  ~VmaAllocator_T();
4007 
4008  const VkAllocationCallbacks* GetAllocationCallbacks() const
4009  {
4010  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4011  }
4012  const VmaVulkanFunctions& GetVulkanFunctions() const
4013  {
4014  return m_VulkanFunctions;
4015  }
4016 
4017  VkDeviceSize GetBufferImageGranularity() const
4018  {
4019  return VMA_MAX(
4020  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4021  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4022  }
4023 
4024  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4025  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4026 
4027  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4028  {
4029  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4030  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4031  }
4032 
4033  void GetBufferMemoryRequirements(
4034  VkBuffer hBuffer,
4035  VkMemoryRequirements& memReq,
4036  bool& requiresDedicatedAllocation,
4037  bool& prefersDedicatedAllocation) const;
4038  void GetImageMemoryRequirements(
4039  VkImage hImage,
4040  VkMemoryRequirements& memReq,
4041  bool& requiresDedicatedAllocation,
4042  bool& prefersDedicatedAllocation) const;
4043 
4044  // Main allocation function.
4045  VkResult AllocateMemory(
4046  const VkMemoryRequirements& vkMemReq,
4047  bool requiresDedicatedAllocation,
4048  bool prefersDedicatedAllocation,
4049  VkBuffer dedicatedBuffer,
4050  VkImage dedicatedImage,
4051  const VmaAllocationCreateInfo& createInfo,
4052  VmaSuballocationType suballocType,
4053  VmaAllocation* pAllocation);
4054 
4055  // Main deallocation function.
4056  void FreeMemory(const VmaAllocation allocation);
4057 
4058  void CalculateStats(VmaStats* pStats);
4059 
4060 #if VMA_STATS_STRING_ENABLED
4061  void PrintDetailedMap(class VmaJsonWriter& json);
4062 #endif
4063 
4064  VkResult Defragment(
4065  VmaAllocation* pAllocations,
4066  size_t allocationCount,
4067  VkBool32* pAllocationsChanged,
4068  const VmaDefragmentationInfo* pDefragmentationInfo,
4069  VmaDefragmentationStats* pDefragmentationStats);
4070 
4071  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4072 
4073  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4074  void DestroyPool(VmaPool pool);
4075  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4076 
4077  void SetCurrentFrameIndex(uint32_t frameIndex);
4078 
4079  void MakePoolAllocationsLost(
4080  VmaPool hPool,
4081  size_t* pLostAllocationCount);
4082 
4083  void CreateLostAllocation(VmaAllocation* pAllocation);
4084 
4085  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4086  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4087 
4088  VkResult Map(VmaAllocation hAllocation, void** ppData);
4089  void Unmap(VmaAllocation hAllocation);
4090 
4091 private:
4092  VkDeviceSize m_PreferredLargeHeapBlockSize;
4093 
4094  VkPhysicalDevice m_PhysicalDevice;
4095  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4096 
4097  VMA_MUTEX m_PoolsMutex;
4098  // Protected by m_PoolsMutex. Sorted by pointer value.
4099  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4100 
4101  VmaVulkanFunctions m_VulkanFunctions;
4102 
4103  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4104 
4105  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4106 
4107  VkResult AllocateMemoryOfType(
4108  const VkMemoryRequirements& vkMemReq,
4109  bool dedicatedAllocation,
4110  VkBuffer dedicatedBuffer,
4111  VkImage dedicatedImage,
4112  const VmaAllocationCreateInfo& createInfo,
4113  uint32_t memTypeIndex,
4114  VmaSuballocationType suballocType,
4115  VmaAllocation* pAllocation);
4116 
4117  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4118  VkResult AllocateDedicatedMemory(
4119  VkDeviceSize size,
4120  VmaSuballocationType suballocType,
4121  uint32_t memTypeIndex,
4122  bool map,
4123  bool isUserDataString,
4124  void* pUserData,
4125  VkBuffer dedicatedBuffer,
4126  VkImage dedicatedImage,
4127  VmaAllocation* pAllocation);
4128 
4129  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4130  void FreeDedicatedMemory(VmaAllocation allocation);
4131 };
4132 
4134 // Memory allocation #2 after VmaAllocator_T definition
4135 
4136 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4137 {
4138  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4139 }
4140 
4141 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4142 {
4143  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4144 }
4145 
4146 template<typename T>
4147 static T* VmaAllocate(VmaAllocator hAllocator)
4148 {
4149  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4150 }
4151 
4152 template<typename T>
4153 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4154 {
4155  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4156 }
4157 
4158 template<typename T>
4159 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4160 {
4161  if(ptr != VMA_NULL)
4162  {
4163  ptr->~T();
4164  VmaFree(hAllocator, ptr);
4165  }
4166 }
4167 
4168 template<typename T>
4169 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4170 {
4171  if(ptr != VMA_NULL)
4172  {
4173  for(size_t i = count; i--; )
4174  ptr[i].~T();
4175  VmaFree(hAllocator, ptr);
4176  }
4177 }
4178 
4180 // VmaStringBuilder
4181 
4182 #if VMA_STATS_STRING_ENABLED
4183 
4184 class VmaStringBuilder
4185 {
4186 public:
4187  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4188  size_t GetLength() const { return m_Data.size(); }
4189  const char* GetData() const { return m_Data.data(); }
4190 
4191  void Add(char ch) { m_Data.push_back(ch); }
4192  void Add(const char* pStr);
4193  void AddNewLine() { Add('\n'); }
4194  void AddNumber(uint32_t num);
4195  void AddNumber(uint64_t num);
4196  void AddPointer(const void* ptr);
4197 
4198 private:
4199  VmaVector< char, VmaStlAllocator<char> > m_Data;
4200 };
4201 
4202 void VmaStringBuilder::Add(const char* pStr)
4203 {
4204  const size_t strLen = strlen(pStr);
4205  if(strLen > 0)
4206  {
4207  const size_t oldCount = m_Data.size();
4208  m_Data.resize(oldCount + strLen);
4209  memcpy(m_Data.data() + oldCount, pStr, strLen);
4210  }
4211 }
4212 
4213 void VmaStringBuilder::AddNumber(uint32_t num)
4214 {
4215  char buf[11];
4216  VmaUint32ToStr(buf, sizeof(buf), num);
4217  Add(buf);
4218 }
4219 
4220 void VmaStringBuilder::AddNumber(uint64_t num)
4221 {
4222  char buf[21];
4223  VmaUint64ToStr(buf, sizeof(buf), num);
4224  Add(buf);
4225 }
4226 
4227 void VmaStringBuilder::AddPointer(const void* ptr)
4228 {
4229  char buf[21];
4230  VmaPtrToStr(buf, sizeof(buf), ptr);
4231  Add(buf);
4232 }
4233 
4234 #endif // #if VMA_STATS_STRING_ENABLED
4235 
4237 // VmaJsonWriter
4238 
4239 #if VMA_STATS_STRING_ENABLED
4240 
4241 class VmaJsonWriter
4242 {
4243 public:
4244  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4245  ~VmaJsonWriter();
4246 
4247  void BeginObject(bool singleLine = false);
4248  void EndObject();
4249 
4250  void BeginArray(bool singleLine = false);
4251  void EndArray();
4252 
4253  void WriteString(const char* pStr);
4254  void BeginString(const char* pStr = VMA_NULL);
4255  void ContinueString(const char* pStr);
4256  void ContinueString(uint32_t n);
4257  void ContinueString(uint64_t n);
4258  void ContinueString_Pointer(const void* ptr);
4259  void EndString(const char* pStr = VMA_NULL);
4260 
4261  void WriteNumber(uint32_t n);
4262  void WriteNumber(uint64_t n);
4263  void WriteBool(bool b);
4264  void WriteNull();
4265 
4266 private:
4267  static const char* const INDENT;
4268 
4269  enum COLLECTION_TYPE
4270  {
4271  COLLECTION_TYPE_OBJECT,
4272  COLLECTION_TYPE_ARRAY,
4273  };
4274  struct StackItem
4275  {
4276  COLLECTION_TYPE type;
4277  uint32_t valueCount;
4278  bool singleLineMode;
4279  };
4280 
4281  VmaStringBuilder& m_SB;
4282  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4283  bool m_InsideString;
4284 
4285  void BeginValue(bool isString);
4286  void WriteIndent(bool oneLess = false);
4287 };
4288 
4289 const char* const VmaJsonWriter::INDENT = " ";
4290 
4291 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4292  m_SB(sb),
4293  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4294  m_InsideString(false)
4295 {
4296 }
4297 
4298 VmaJsonWriter::~VmaJsonWriter()
4299 {
4300  VMA_ASSERT(!m_InsideString);
4301  VMA_ASSERT(m_Stack.empty());
4302 }
4303 
4304 void VmaJsonWriter::BeginObject(bool singleLine)
4305 {
4306  VMA_ASSERT(!m_InsideString);
4307 
4308  BeginValue(false);
4309  m_SB.Add('{');
4310 
4311  StackItem item;
4312  item.type = COLLECTION_TYPE_OBJECT;
4313  item.valueCount = 0;
4314  item.singleLineMode = singleLine;
4315  m_Stack.push_back(item);
4316 }
4317 
4318 void VmaJsonWriter::EndObject()
4319 {
4320  VMA_ASSERT(!m_InsideString);
4321 
4322  WriteIndent(true);
4323  m_SB.Add('}');
4324 
4325  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4326  m_Stack.pop_back();
4327 }
4328 
4329 void VmaJsonWriter::BeginArray(bool singleLine)
4330 {
4331  VMA_ASSERT(!m_InsideString);
4332 
4333  BeginValue(false);
4334  m_SB.Add('[');
4335 
4336  StackItem item;
4337  item.type = COLLECTION_TYPE_ARRAY;
4338  item.valueCount = 0;
4339  item.singleLineMode = singleLine;
4340  m_Stack.push_back(item);
4341 }
4342 
4343 void VmaJsonWriter::EndArray()
4344 {
4345  VMA_ASSERT(!m_InsideString);
4346 
4347  WriteIndent(true);
4348  m_SB.Add(']');
4349 
4350  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4351  m_Stack.pop_back();
4352 }
4353 
4354 void VmaJsonWriter::WriteString(const char* pStr)
4355 {
4356  BeginString(pStr);
4357  EndString();
4358 }
4359 
4360 void VmaJsonWriter::BeginString(const char* pStr)
4361 {
4362  VMA_ASSERT(!m_InsideString);
4363 
4364  BeginValue(true);
4365  m_SB.Add('"');
4366  m_InsideString = true;
4367  if(pStr != VMA_NULL && pStr[0] != '\0')
4368  {
4369  ContinueString(pStr);
4370  }
4371 }
4372 
4373 void VmaJsonWriter::ContinueString(const char* pStr)
4374 {
4375  VMA_ASSERT(m_InsideString);
4376 
4377  const size_t strLen = strlen(pStr);
4378  for(size_t i = 0; i < strLen; ++i)
4379  {
4380  char ch = pStr[i];
4381  if(ch == '\'')
4382  {
4383  m_SB.Add("\\\\");
4384  }
4385  else if(ch == '"')
4386  {
4387  m_SB.Add("\\\"");
4388  }
4389  else if(ch >= 32)
4390  {
4391  m_SB.Add(ch);
4392  }
4393  else switch(ch)
4394  {
4395  case '\b':
4396  m_SB.Add("\\b");
4397  break;
4398  case '\f':
4399  m_SB.Add("\\f");
4400  break;
4401  case '\n':
4402  m_SB.Add("\\n");
4403  break;
4404  case '\r':
4405  m_SB.Add("\\r");
4406  break;
4407  case '\t':
4408  m_SB.Add("\\t");
4409  break;
4410  default:
4411  VMA_ASSERT(0 && "Character not currently supported.");
4412  break;
4413  }
4414  }
4415 }
4416 
4417 void VmaJsonWriter::ContinueString(uint32_t n)
4418 {
4419  VMA_ASSERT(m_InsideString);
4420  m_SB.AddNumber(n);
4421 }
4422 
4423 void VmaJsonWriter::ContinueString(uint64_t n)
4424 {
4425  VMA_ASSERT(m_InsideString);
4426  m_SB.AddNumber(n);
4427 }
4428 
4429 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4430 {
4431  VMA_ASSERT(m_InsideString);
4432  m_SB.AddPointer(ptr);
4433 }
4434 
4435 void VmaJsonWriter::EndString(const char* pStr)
4436 {
4437  VMA_ASSERT(m_InsideString);
4438  if(pStr != VMA_NULL && pStr[0] != '\0')
4439  {
4440  ContinueString(pStr);
4441  }
4442  m_SB.Add('"');
4443  m_InsideString = false;
4444 }
4445 
4446 void VmaJsonWriter::WriteNumber(uint32_t n)
4447 {
4448  VMA_ASSERT(!m_InsideString);
4449  BeginValue(false);
4450  m_SB.AddNumber(n);
4451 }
4452 
4453 void VmaJsonWriter::WriteNumber(uint64_t n)
4454 {
4455  VMA_ASSERT(!m_InsideString);
4456  BeginValue(false);
4457  m_SB.AddNumber(n);
4458 }
4459 
4460 void VmaJsonWriter::WriteBool(bool b)
4461 {
4462  VMA_ASSERT(!m_InsideString);
4463  BeginValue(false);
4464  m_SB.Add(b ? "true" : "false");
4465 }
4466 
4467 void VmaJsonWriter::WriteNull()
4468 {
4469  VMA_ASSERT(!m_InsideString);
4470  BeginValue(false);
4471  m_SB.Add("null");
4472 }
4473 
4474 void VmaJsonWriter::BeginValue(bool isString)
4475 {
4476  if(!m_Stack.empty())
4477  {
4478  StackItem& currItem = m_Stack.back();
4479  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4480  currItem.valueCount % 2 == 0)
4481  {
4482  VMA_ASSERT(isString);
4483  }
4484 
4485  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4486  currItem.valueCount % 2 != 0)
4487  {
4488  m_SB.Add(": ");
4489  }
4490  else if(currItem.valueCount > 0)
4491  {
4492  m_SB.Add(", ");
4493  WriteIndent();
4494  }
4495  else
4496  {
4497  WriteIndent();
4498  }
4499  ++currItem.valueCount;
4500  }
4501 }
4502 
4503 void VmaJsonWriter::WriteIndent(bool oneLess)
4504 {
4505  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4506  {
4507  m_SB.AddNewLine();
4508 
4509  size_t count = m_Stack.size();
4510  if(count > 0 && oneLess)
4511  {
4512  --count;
4513  }
4514  for(size_t i = 0; i < count; ++i)
4515  {
4516  m_SB.Add(INDENT);
4517  }
4518  }
4519 }
4520 
4521 #endif // #if VMA_STATS_STRING_ENABLED
4522 
4524 
4525 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4526 {
4527  if(IsUserDataString())
4528  {
4529  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4530 
4531  FreeUserDataString(hAllocator);
4532 
4533  if(pUserData != VMA_NULL)
4534  {
4535  const char* const newStrSrc = (char*)pUserData;
4536  const size_t newStrLen = strlen(newStrSrc);
4537  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4538  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4539  m_pUserData = newStrDst;
4540  }
4541  }
4542  else
4543  {
4544  m_pUserData = pUserData;
4545  }
4546 }
4547 
4548 void VmaAllocation_T::ChangeBlockAllocation(
4549  VmaAllocator hAllocator,
4550  VmaDeviceMemoryBlock* block,
4551  VkDeviceSize offset)
4552 {
4553  VMA_ASSERT(block != VMA_NULL);
4554  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4555 
4556  // Move mapping reference counter from old block to new block.
4557  if(block != m_BlockAllocation.m_Block)
4558  {
4559  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4560  if(IsPersistentMap())
4561  ++mapRefCount;
4562  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4563  block->Map(hAllocator, mapRefCount, VMA_NULL);
4564  }
4565 
4566  m_BlockAllocation.m_Block = block;
4567  m_BlockAllocation.m_Offset = offset;
4568 }
4569 
4570 VkDeviceSize VmaAllocation_T::GetOffset() const
4571 {
4572  switch(m_Type)
4573  {
4574  case ALLOCATION_TYPE_BLOCK:
4575  return m_BlockAllocation.m_Offset;
4576  case ALLOCATION_TYPE_DEDICATED:
4577  return 0;
4578  default:
4579  VMA_ASSERT(0);
4580  return 0;
4581  }
4582 }
4583 
4584 VkDeviceMemory VmaAllocation_T::GetMemory() const
4585 {
4586  switch(m_Type)
4587  {
4588  case ALLOCATION_TYPE_BLOCK:
4589  return m_BlockAllocation.m_Block->m_hMemory;
4590  case ALLOCATION_TYPE_DEDICATED:
4591  return m_DedicatedAllocation.m_hMemory;
4592  default:
4593  VMA_ASSERT(0);
4594  return VK_NULL_HANDLE;
4595  }
4596 }
4597 
4598 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4599 {
4600  switch(m_Type)
4601  {
4602  case ALLOCATION_TYPE_BLOCK:
4603  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4604  case ALLOCATION_TYPE_DEDICATED:
4605  return m_DedicatedAllocation.m_MemoryTypeIndex;
4606  default:
4607  VMA_ASSERT(0);
4608  return UINT32_MAX;
4609  }
4610 }
4611 
4612 void* VmaAllocation_T::GetMappedData() const
4613 {
4614  switch(m_Type)
4615  {
4616  case ALLOCATION_TYPE_BLOCK:
4617  if(m_MapCount != 0)
4618  {
4619  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4620  VMA_ASSERT(pBlockData != VMA_NULL);
4621  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4622  }
4623  else
4624  {
4625  return VMA_NULL;
4626  }
4627  break;
4628  case ALLOCATION_TYPE_DEDICATED:
4629  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4630  return m_DedicatedAllocation.m_pMappedData;
4631  default:
4632  VMA_ASSERT(0);
4633  return VMA_NULL;
4634  }
4635 }
4636 
4637 bool VmaAllocation_T::CanBecomeLost() const
4638 {
4639  switch(m_Type)
4640  {
4641  case ALLOCATION_TYPE_BLOCK:
4642  return m_BlockAllocation.m_CanBecomeLost;
4643  case ALLOCATION_TYPE_DEDICATED:
4644  return false;
4645  default:
4646  VMA_ASSERT(0);
4647  return false;
4648  }
4649 }
4650 
4651 VmaPool VmaAllocation_T::GetPool() const
4652 {
4653  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4654  return m_BlockAllocation.m_hPool;
4655 }
4656 
4657 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4658 {
4659  VMA_ASSERT(CanBecomeLost());
4660 
4661  /*
4662  Warning: This is a carefully designed algorithm.
4663  Do not modify unless you really know what you're doing :)
4664  */
4665  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4666  for(;;)
4667  {
4668  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4669  {
4670  VMA_ASSERT(0);
4671  return false;
4672  }
4673  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4674  {
4675  return false;
4676  }
4677  else // Last use time earlier than current time.
4678  {
4679  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4680  {
4681  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4682  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4683  return true;
4684  }
4685  }
4686  }
4687 }
4688 
4689 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4690 {
4691  VMA_ASSERT(IsUserDataString());
4692  if(m_pUserData != VMA_NULL)
4693  {
4694  char* const oldStr = (char*)m_pUserData;
4695  const size_t oldStrLen = strlen(oldStr);
4696  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4697  m_pUserData = VMA_NULL;
4698  }
4699 }
4700 
4701 void VmaAllocation_T::BlockAllocMap()
4702 {
4703  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4704 
4705  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4706  {
4707  ++m_MapCount;
4708  }
4709  else
4710  {
4711  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4712  }
4713 }
4714 
4715 void VmaAllocation_T::BlockAllocUnmap()
4716 {
4717  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4718 
4719  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4720  {
4721  --m_MapCount;
4722  }
4723  else
4724  {
4725  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4726  }
4727 }
4728 
4729 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4730 {
4731  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4732 
4733  if(m_MapCount != 0)
4734  {
4735  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4736  {
4737  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4738  *ppData = m_DedicatedAllocation.m_pMappedData;
4739  ++m_MapCount;
4740  return VK_SUCCESS;
4741  }
4742  else
4743  {
4744  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4745  return VK_ERROR_MEMORY_MAP_FAILED;
4746  }
4747  }
4748  else
4749  {
4750  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4751  hAllocator->m_hDevice,
4752  m_DedicatedAllocation.m_hMemory,
4753  0, // offset
4754  VK_WHOLE_SIZE,
4755  0, // flags
4756  ppData);
4757  if(result == VK_SUCCESS)
4758  {
4759  m_DedicatedAllocation.m_pMappedData = *ppData;
4760  m_MapCount = 1;
4761  }
4762  return result;
4763  }
4764 }
4765 
4766 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4767 {
4768  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4769 
4770  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4771  {
4772  --m_MapCount;
4773  if(m_MapCount == 0)
4774  {
4775  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4776  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4777  hAllocator->m_hDevice,
4778  m_DedicatedAllocation.m_hMemory);
4779  }
4780  }
4781  else
4782  {
4783  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4784  }
4785 }
4786 
4787 #if VMA_STATS_STRING_ENABLED
4788 
4789 // Correspond to values of enum VmaSuballocationType.
4790 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4791  "FREE",
4792  "UNKNOWN",
4793  "BUFFER",
4794  "IMAGE_UNKNOWN",
4795  "IMAGE_LINEAR",
4796  "IMAGE_OPTIMAL",
4797 };
4798 
4799 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4800 {
4801  json.BeginObject();
4802 
4803  json.WriteString("Blocks");
4804  json.WriteNumber(stat.blockCount);
4805 
4806  json.WriteString("Allocations");
4807  json.WriteNumber(stat.allocationCount);
4808 
4809  json.WriteString("UnusedRanges");
4810  json.WriteNumber(stat.unusedRangeCount);
4811 
4812  json.WriteString("UsedBytes");
4813  json.WriteNumber(stat.usedBytes);
4814 
4815  json.WriteString("UnusedBytes");
4816  json.WriteNumber(stat.unusedBytes);
4817 
4818  if(stat.allocationCount > 1)
4819  {
4820  json.WriteString("AllocationSize");
4821  json.BeginObject(true);
4822  json.WriteString("Min");
4823  json.WriteNumber(stat.allocationSizeMin);
4824  json.WriteString("Avg");
4825  json.WriteNumber(stat.allocationSizeAvg);
4826  json.WriteString("Max");
4827  json.WriteNumber(stat.allocationSizeMax);
4828  json.EndObject();
4829  }
4830 
4831  if(stat.unusedRangeCount > 1)
4832  {
4833  json.WriteString("UnusedRangeSize");
4834  json.BeginObject(true);
4835  json.WriteString("Min");
4836  json.WriteNumber(stat.unusedRangeSizeMin);
4837  json.WriteString("Avg");
4838  json.WriteNumber(stat.unusedRangeSizeAvg);
4839  json.WriteString("Max");
4840  json.WriteNumber(stat.unusedRangeSizeMax);
4841  json.EndObject();
4842  }
4843 
4844  json.EndObject();
4845 }
4846 
4847 #endif // #if VMA_STATS_STRING_ENABLED
4848 
4849 struct VmaSuballocationItemSizeLess
4850 {
4851  bool operator()(
4852  const VmaSuballocationList::iterator lhs,
4853  const VmaSuballocationList::iterator rhs) const
4854  {
4855  return lhs->size < rhs->size;
4856  }
4857  bool operator()(
4858  const VmaSuballocationList::iterator lhs,
4859  VkDeviceSize rhsSize) const
4860  {
4861  return lhs->size < rhsSize;
4862  }
4863 };
4864 
4866 // class VmaBlockMetadata
4867 
4868 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4869  m_Size(0),
4870  m_FreeCount(0),
4871  m_SumFreeSize(0),
4872  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4873  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4874 {
4875 }
4876 
4877 VmaBlockMetadata::~VmaBlockMetadata()
4878 {
4879 }
4880 
4881 void VmaBlockMetadata::Init(VkDeviceSize size)
4882 {
4883  m_Size = size;
4884  m_FreeCount = 1;
4885  m_SumFreeSize = size;
4886 
4887  VmaSuballocation suballoc = {};
4888  suballoc.offset = 0;
4889  suballoc.size = size;
4890  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4891  suballoc.hAllocation = VK_NULL_HANDLE;
4892 
4893  m_Suballocations.push_back(suballoc);
4894  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4895  --suballocItem;
4896  m_FreeSuballocationsBySize.push_back(suballocItem);
4897 }
4898 
4899 bool VmaBlockMetadata::Validate() const
4900 {
4901  if(m_Suballocations.empty())
4902  {
4903  return false;
4904  }
4905 
4906  // Expected offset of new suballocation as calculates from previous ones.
4907  VkDeviceSize calculatedOffset = 0;
4908  // Expected number of free suballocations as calculated from traversing their list.
4909  uint32_t calculatedFreeCount = 0;
4910  // Expected sum size of free suballocations as calculated from traversing their list.
4911  VkDeviceSize calculatedSumFreeSize = 0;
4912  // Expected number of free suballocations that should be registered in
4913  // m_FreeSuballocationsBySize calculated from traversing their list.
4914  size_t freeSuballocationsToRegister = 0;
4915  // True if previous visisted suballocation was free.
4916  bool prevFree = false;
4917 
4918  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4919  suballocItem != m_Suballocations.cend();
4920  ++suballocItem)
4921  {
4922  const VmaSuballocation& subAlloc = *suballocItem;
4923 
4924  // Actual offset of this suballocation doesn't match expected one.
4925  if(subAlloc.offset != calculatedOffset)
4926  {
4927  return false;
4928  }
4929 
4930  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4931  // Two adjacent free suballocations are invalid. They should be merged.
4932  if(prevFree && currFree)
4933  {
4934  return false;
4935  }
4936 
4937  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4938  {
4939  return false;
4940  }
4941 
4942  if(currFree)
4943  {
4944  calculatedSumFreeSize += subAlloc.size;
4945  ++calculatedFreeCount;
4946  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4947  {
4948  ++freeSuballocationsToRegister;
4949  }
4950  }
4951  else
4952  {
4953  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
4954  {
4955  return false;
4956  }
4957  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
4958  {
4959  return false;
4960  }
4961  }
4962 
4963  calculatedOffset += subAlloc.size;
4964  prevFree = currFree;
4965  }
4966 
4967  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
4968  // match expected one.
4969  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
4970  {
4971  return false;
4972  }
4973 
4974  VkDeviceSize lastSize = 0;
4975  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
4976  {
4977  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
4978 
4979  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
4980  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
4981  {
4982  return false;
4983  }
4984  // They must be sorted by size ascending.
4985  if(suballocItem->size < lastSize)
4986  {
4987  return false;
4988  }
4989 
4990  lastSize = suballocItem->size;
4991  }
4992 
4993  // Check if totals match calculacted values.
4994  if(!ValidateFreeSuballocationList() ||
4995  (calculatedOffset != m_Size) ||
4996  (calculatedSumFreeSize != m_SumFreeSize) ||
4997  (calculatedFreeCount != m_FreeCount))
4998  {
4999  return false;
5000  }
5001 
5002  return true;
5003 }
5004 
5005 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5006 {
5007  if(!m_FreeSuballocationsBySize.empty())
5008  {
5009  return m_FreeSuballocationsBySize.back()->size;
5010  }
5011  else
5012  {
5013  return 0;
5014  }
5015 }
5016 
5017 bool VmaBlockMetadata::IsEmpty() const
5018 {
5019  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5020 }
5021 
5022 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5023 {
5024  outInfo.blockCount = 1;
5025 
5026  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5027  outInfo.allocationCount = rangeCount - m_FreeCount;
5028  outInfo.unusedRangeCount = m_FreeCount;
5029 
5030  outInfo.unusedBytes = m_SumFreeSize;
5031  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5032 
5033  outInfo.allocationSizeMin = UINT64_MAX;
5034  outInfo.allocationSizeMax = 0;
5035  outInfo.unusedRangeSizeMin = UINT64_MAX;
5036  outInfo.unusedRangeSizeMax = 0;
5037 
5038  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5039  suballocItem != m_Suballocations.cend();
5040  ++suballocItem)
5041  {
5042  const VmaSuballocation& suballoc = *suballocItem;
5043  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5044  {
5045  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5046  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5047  }
5048  else
5049  {
5050  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5051  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5052  }
5053  }
5054 }
5055 
5056 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5057 {
5058  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5059 
5060  inoutStats.size += m_Size;
5061  inoutStats.unusedSize += m_SumFreeSize;
5062  inoutStats.allocationCount += rangeCount - m_FreeCount;
5063  inoutStats.unusedRangeCount += m_FreeCount;
5064  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5065 }
5066 
5067 #if VMA_STATS_STRING_ENABLED
5068 
5069 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5070 {
5071  json.BeginObject();
5072 
5073  json.WriteString("TotalBytes");
5074  json.WriteNumber(m_Size);
5075 
5076  json.WriteString("UnusedBytes");
5077  json.WriteNumber(m_SumFreeSize);
5078 
5079  json.WriteString("Allocations");
5080  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5081 
5082  json.WriteString("UnusedRanges");
5083  json.WriteNumber(m_FreeCount);
5084 
5085  json.WriteString("Suballocations");
5086  json.BeginArray();
5087  size_t i = 0;
5088  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5089  suballocItem != m_Suballocations.cend();
5090  ++suballocItem, ++i)
5091  {
5092  json.BeginObject(true);
5093 
5094  json.WriteString("Type");
5095  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5096 
5097  json.WriteString("Size");
5098  json.WriteNumber(suballocItem->size);
5099 
5100  json.WriteString("Offset");
5101  json.WriteNumber(suballocItem->offset);
5102 
5103  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5104  {
5105  const void* pUserData = suballocItem->hAllocation->GetUserData();
5106  if(pUserData != VMA_NULL)
5107  {
5108  json.WriteString("UserData");
5109  if(suballocItem->hAllocation->IsUserDataString())
5110  {
5111  json.WriteString((const char*)pUserData);
5112  }
5113  else
5114  {
5115  json.BeginString();
5116  json.ContinueString_Pointer(pUserData);
5117  json.EndString();
5118  }
5119  }
5120  }
5121 
5122  json.EndObject();
5123  }
5124  json.EndArray();
5125 
5126  json.EndObject();
5127 }
5128 
5129 #endif // #if VMA_STATS_STRING_ENABLED
5130 
5131 /*
5132 How many suitable free suballocations to analyze before choosing best one.
5133 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5134  be chosen.
5135 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5136  suballocations will be analized and best one will be chosen.
5137 - Any other value is also acceptable.
5138 */
5139 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5140 
5141 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5142 {
5143  VMA_ASSERT(IsEmpty());
5144  pAllocationRequest->offset = 0;
5145  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5146  pAllocationRequest->sumItemSize = 0;
5147  pAllocationRequest->item = m_Suballocations.begin();
5148  pAllocationRequest->itemsToMakeLostCount = 0;
5149 }
5150 
5151 bool VmaBlockMetadata::CreateAllocationRequest(
5152  uint32_t currentFrameIndex,
5153  uint32_t frameInUseCount,
5154  VkDeviceSize bufferImageGranularity,
5155  VkDeviceSize allocSize,
5156  VkDeviceSize allocAlignment,
5157  VmaSuballocationType allocType,
5158  bool canMakeOtherLost,
5159  VmaAllocationRequest* pAllocationRequest)
5160 {
5161  VMA_ASSERT(allocSize > 0);
5162  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5163  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5164  VMA_HEAVY_ASSERT(Validate());
5165 
5166  // There is not enough total free space in this block to fullfill the request: Early return.
5167  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5168  {
5169  return false;
5170  }
5171 
5172  // New algorithm, efficiently searching freeSuballocationsBySize.
5173  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5174  if(freeSuballocCount > 0)
5175  {
5176  if(VMA_BEST_FIT)
5177  {
5178  // Find first free suballocation with size not less than allocSize.
5179  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5180  m_FreeSuballocationsBySize.data(),
5181  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5182  allocSize,
5183  VmaSuballocationItemSizeLess());
5184  size_t index = it - m_FreeSuballocationsBySize.data();
5185  for(; index < freeSuballocCount; ++index)
5186  {
5187  if(CheckAllocation(
5188  currentFrameIndex,
5189  frameInUseCount,
5190  bufferImageGranularity,
5191  allocSize,
5192  allocAlignment,
5193  allocType,
5194  m_FreeSuballocationsBySize[index],
5195  false, // canMakeOtherLost
5196  &pAllocationRequest->offset,
5197  &pAllocationRequest->itemsToMakeLostCount,
5198  &pAllocationRequest->sumFreeSize,
5199  &pAllocationRequest->sumItemSize))
5200  {
5201  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5202  return true;
5203  }
5204  }
5205  }
5206  else
5207  {
5208  // Search staring from biggest suballocations.
5209  for(size_t index = freeSuballocCount; index--; )
5210  {
5211  if(CheckAllocation(
5212  currentFrameIndex,
5213  frameInUseCount,
5214  bufferImageGranularity,
5215  allocSize,
5216  allocAlignment,
5217  allocType,
5218  m_FreeSuballocationsBySize[index],
5219  false, // canMakeOtherLost
5220  &pAllocationRequest->offset,
5221  &pAllocationRequest->itemsToMakeLostCount,
5222  &pAllocationRequest->sumFreeSize,
5223  &pAllocationRequest->sumItemSize))
5224  {
5225  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5226  return true;
5227  }
5228  }
5229  }
5230  }
5231 
5232  if(canMakeOtherLost)
5233  {
5234  // Brute-force algorithm. TODO: Come up with something better.
5235 
5236  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5237  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5238 
5239  VmaAllocationRequest tmpAllocRequest = {};
5240  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5241  suballocIt != m_Suballocations.end();
5242  ++suballocIt)
5243  {
5244  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5245  suballocIt->hAllocation->CanBecomeLost())
5246  {
5247  if(CheckAllocation(
5248  currentFrameIndex,
5249  frameInUseCount,
5250  bufferImageGranularity,
5251  allocSize,
5252  allocAlignment,
5253  allocType,
5254  suballocIt,
5255  canMakeOtherLost,
5256  &tmpAllocRequest.offset,
5257  &tmpAllocRequest.itemsToMakeLostCount,
5258  &tmpAllocRequest.sumFreeSize,
5259  &tmpAllocRequest.sumItemSize))
5260  {
5261  tmpAllocRequest.item = suballocIt;
5262 
5263  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5264  {
5265  *pAllocationRequest = tmpAllocRequest;
5266  }
5267  }
5268  }
5269  }
5270 
5271  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5272  {
5273  return true;
5274  }
5275  }
5276 
5277  return false;
5278 }
5279 
5280 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5281  uint32_t currentFrameIndex,
5282  uint32_t frameInUseCount,
5283  VmaAllocationRequest* pAllocationRequest)
5284 {
5285  while(pAllocationRequest->itemsToMakeLostCount > 0)
5286  {
5287  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5288  {
5289  ++pAllocationRequest->item;
5290  }
5291  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5292  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5293  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5294  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5295  {
5296  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5297  --pAllocationRequest->itemsToMakeLostCount;
5298  }
5299  else
5300  {
5301  return false;
5302  }
5303  }
5304 
5305  VMA_HEAVY_ASSERT(Validate());
5306  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5307  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5308 
5309  return true;
5310 }
5311 
5312 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5313 {
5314  uint32_t lostAllocationCount = 0;
5315  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5316  it != m_Suballocations.end();
5317  ++it)
5318  {
5319  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5320  it->hAllocation->CanBecomeLost() &&
5321  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5322  {
5323  it = FreeSuballocation(it);
5324  ++lostAllocationCount;
5325  }
5326  }
5327  return lostAllocationCount;
5328 }
5329 
5330 void VmaBlockMetadata::Alloc(
5331  const VmaAllocationRequest& request,
5332  VmaSuballocationType type,
5333  VkDeviceSize allocSize,
5334  VmaAllocation hAllocation)
5335 {
5336  VMA_ASSERT(request.item != m_Suballocations.end());
5337  VmaSuballocation& suballoc = *request.item;
5338  // Given suballocation is a free block.
5339  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5340  // Given offset is inside this suballocation.
5341  VMA_ASSERT(request.offset >= suballoc.offset);
5342  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5343  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5344  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5345 
5346  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5347  // it to become used.
5348  UnregisterFreeSuballocation(request.item);
5349 
5350  suballoc.offset = request.offset;
5351  suballoc.size = allocSize;
5352  suballoc.type = type;
5353  suballoc.hAllocation = hAllocation;
5354 
5355  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5356  if(paddingEnd)
5357  {
5358  VmaSuballocation paddingSuballoc = {};
5359  paddingSuballoc.offset = request.offset + allocSize;
5360  paddingSuballoc.size = paddingEnd;
5361  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5362  VmaSuballocationList::iterator next = request.item;
5363  ++next;
5364  const VmaSuballocationList::iterator paddingEndItem =
5365  m_Suballocations.insert(next, paddingSuballoc);
5366  RegisterFreeSuballocation(paddingEndItem);
5367  }
5368 
5369  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5370  if(paddingBegin)
5371  {
5372  VmaSuballocation paddingSuballoc = {};
5373  paddingSuballoc.offset = request.offset - paddingBegin;
5374  paddingSuballoc.size = paddingBegin;
5375  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5376  const VmaSuballocationList::iterator paddingBeginItem =
5377  m_Suballocations.insert(request.item, paddingSuballoc);
5378  RegisterFreeSuballocation(paddingBeginItem);
5379  }
5380 
5381  // Update totals.
5382  m_FreeCount = m_FreeCount - 1;
5383  if(paddingBegin > 0)
5384  {
5385  ++m_FreeCount;
5386  }
5387  if(paddingEnd > 0)
5388  {
5389  ++m_FreeCount;
5390  }
5391  m_SumFreeSize -= allocSize;
5392 }
5393 
5394 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5395 {
5396  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5397  suballocItem != m_Suballocations.end();
5398  ++suballocItem)
5399  {
5400  VmaSuballocation& suballoc = *suballocItem;
5401  if(suballoc.hAllocation == allocation)
5402  {
5403  FreeSuballocation(suballocItem);
5404  VMA_HEAVY_ASSERT(Validate());
5405  return;
5406  }
5407  }
5408  VMA_ASSERT(0 && "Not found!");
5409 }
5410 
5411 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5412 {
5413  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5414  suballocItem != m_Suballocations.end();
5415  ++suballocItem)
5416  {
5417  VmaSuballocation& suballoc = *suballocItem;
5418  if(suballoc.offset == offset)
5419  {
5420  FreeSuballocation(suballocItem);
5421  return;
5422  }
5423  }
5424  VMA_ASSERT(0 && "Not found!");
5425 }
5426 
5427 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5428 {
5429  VkDeviceSize lastSize = 0;
5430  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5431  {
5432  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5433 
5434  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5435  {
5436  VMA_ASSERT(0);
5437  return false;
5438  }
5439  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5440  {
5441  VMA_ASSERT(0);
5442  return false;
5443  }
5444  if(it->size < lastSize)
5445  {
5446  VMA_ASSERT(0);
5447  return false;
5448  }
5449 
5450  lastSize = it->size;
5451  }
5452  return true;
5453 }
5454 
5455 bool VmaBlockMetadata::CheckAllocation(
5456  uint32_t currentFrameIndex,
5457  uint32_t frameInUseCount,
5458  VkDeviceSize bufferImageGranularity,
5459  VkDeviceSize allocSize,
5460  VkDeviceSize allocAlignment,
5461  VmaSuballocationType allocType,
5462  VmaSuballocationList::const_iterator suballocItem,
5463  bool canMakeOtherLost,
5464  VkDeviceSize* pOffset,
5465  size_t* itemsToMakeLostCount,
5466  VkDeviceSize* pSumFreeSize,
5467  VkDeviceSize* pSumItemSize) const
5468 {
5469  VMA_ASSERT(allocSize > 0);
5470  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5471  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5472  VMA_ASSERT(pOffset != VMA_NULL);
5473 
5474  *itemsToMakeLostCount = 0;
5475  *pSumFreeSize = 0;
5476  *pSumItemSize = 0;
5477 
5478  if(canMakeOtherLost)
5479  {
5480  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5481  {
5482  *pSumFreeSize = suballocItem->size;
5483  }
5484  else
5485  {
5486  if(suballocItem->hAllocation->CanBecomeLost() &&
5487  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5488  {
5489  ++*itemsToMakeLostCount;
5490  *pSumItemSize = suballocItem->size;
5491  }
5492  else
5493  {
5494  return false;
5495  }
5496  }
5497 
5498  // Remaining size is too small for this request: Early return.
5499  if(m_Size - suballocItem->offset < allocSize)
5500  {
5501  return false;
5502  }
5503 
5504  // Start from offset equal to beginning of this suballocation.
5505  *pOffset = suballocItem->offset;
5506 
5507  // Apply VMA_DEBUG_MARGIN at the beginning.
5508  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5509  {
5510  *pOffset += VMA_DEBUG_MARGIN;
5511  }
5512 
5513  // Apply alignment.
5514  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5515  *pOffset = VmaAlignUp(*pOffset, alignment);
5516 
5517  // Check previous suballocations for BufferImageGranularity conflicts.
5518  // Make bigger alignment if necessary.
5519  if(bufferImageGranularity > 1)
5520  {
5521  bool bufferImageGranularityConflict = false;
5522  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5523  while(prevSuballocItem != m_Suballocations.cbegin())
5524  {
5525  --prevSuballocItem;
5526  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5527  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5528  {
5529  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5530  {
5531  bufferImageGranularityConflict = true;
5532  break;
5533  }
5534  }
5535  else
5536  // Already on previous page.
5537  break;
5538  }
5539  if(bufferImageGranularityConflict)
5540  {
5541  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5542  }
5543  }
5544 
5545  // Now that we have final *pOffset, check if we are past suballocItem.
5546  // If yes, return false - this function should be called for another suballocItem as starting point.
5547  if(*pOffset >= suballocItem->offset + suballocItem->size)
5548  {
5549  return false;
5550  }
5551 
5552  // Calculate padding at the beginning based on current offset.
5553  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5554 
5555  // Calculate required margin at the end if this is not last suballocation.
5556  VmaSuballocationList::const_iterator next = suballocItem;
5557  ++next;
5558  const VkDeviceSize requiredEndMargin =
5559  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5560 
5561  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5562  // Another early return check.
5563  if(suballocItem->offset + totalSize > m_Size)
5564  {
5565  return false;
5566  }
5567 
5568  // Advance lastSuballocItem until desired size is reached.
5569  // Update itemsToMakeLostCount.
5570  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5571  if(totalSize > suballocItem->size)
5572  {
5573  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5574  while(remainingSize > 0)
5575  {
5576  ++lastSuballocItem;
5577  if(lastSuballocItem == m_Suballocations.cend())
5578  {
5579  return false;
5580  }
5581  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5582  {
5583  *pSumFreeSize += lastSuballocItem->size;
5584  }
5585  else
5586  {
5587  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5588  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5589  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5590  {
5591  ++*itemsToMakeLostCount;
5592  *pSumItemSize += lastSuballocItem->size;
5593  }
5594  else
5595  {
5596  return false;
5597  }
5598  }
5599  remainingSize = (lastSuballocItem->size < remainingSize) ?
5600  remainingSize - lastSuballocItem->size : 0;
5601  }
5602  }
5603 
5604  // Check next suballocations for BufferImageGranularity conflicts.
5605  // If conflict exists, we must mark more allocations lost or fail.
5606  if(bufferImageGranularity > 1)
5607  {
5608  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5609  ++nextSuballocItem;
5610  while(nextSuballocItem != m_Suballocations.cend())
5611  {
5612  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5613  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5614  {
5615  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5616  {
5617  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5618  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5619  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5620  {
5621  ++*itemsToMakeLostCount;
5622  }
5623  else
5624  {
5625  return false;
5626  }
5627  }
5628  }
5629  else
5630  {
5631  // Already on next page.
5632  break;
5633  }
5634  ++nextSuballocItem;
5635  }
5636  }
5637  }
5638  else
5639  {
5640  const VmaSuballocation& suballoc = *suballocItem;
5641  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5642 
5643  *pSumFreeSize = suballoc.size;
5644 
5645  // Size of this suballocation is too small for this request: Early return.
5646  if(suballoc.size < allocSize)
5647  {
5648  return false;
5649  }
5650 
5651  // Start from offset equal to beginning of this suballocation.
5652  *pOffset = suballoc.offset;
5653 
5654  // Apply VMA_DEBUG_MARGIN at the beginning.
5655  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5656  {
5657  *pOffset += VMA_DEBUG_MARGIN;
5658  }
5659 
5660  // Apply alignment.
5661  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5662  *pOffset = VmaAlignUp(*pOffset, alignment);
5663 
5664  // Check previous suballocations for BufferImageGranularity conflicts.
5665  // Make bigger alignment if necessary.
5666  if(bufferImageGranularity > 1)
5667  {
5668  bool bufferImageGranularityConflict = false;
5669  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5670  while(prevSuballocItem != m_Suballocations.cbegin())
5671  {
5672  --prevSuballocItem;
5673  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5674  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5675  {
5676  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5677  {
5678  bufferImageGranularityConflict = true;
5679  break;
5680  }
5681  }
5682  else
5683  // Already on previous page.
5684  break;
5685  }
5686  if(bufferImageGranularityConflict)
5687  {
5688  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5689  }
5690  }
5691 
5692  // Calculate padding at the beginning based on current offset.
5693  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5694 
5695  // Calculate required margin at the end if this is not last suballocation.
5696  VmaSuballocationList::const_iterator next = suballocItem;
5697  ++next;
5698  const VkDeviceSize requiredEndMargin =
5699  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5700 
5701  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5702  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5703  {
5704  return false;
5705  }
5706 
5707  // Check next suballocations for BufferImageGranularity conflicts.
5708  // If conflict exists, allocation cannot be made here.
5709  if(bufferImageGranularity > 1)
5710  {
5711  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5712  ++nextSuballocItem;
5713  while(nextSuballocItem != m_Suballocations.cend())
5714  {
5715  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5716  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5717  {
5718  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5719  {
5720  return false;
5721  }
5722  }
5723  else
5724  {
5725  // Already on next page.
5726  break;
5727  }
5728  ++nextSuballocItem;
5729  }
5730  }
5731  }
5732 
5733  // All tests passed: Success. pOffset is already filled.
5734  return true;
5735 }
5736 
5737 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5738 {
5739  VMA_ASSERT(item != m_Suballocations.end());
5740  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5741 
5742  VmaSuballocationList::iterator nextItem = item;
5743  ++nextItem;
5744  VMA_ASSERT(nextItem != m_Suballocations.end());
5745  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5746 
5747  item->size += nextItem->size;
5748  --m_FreeCount;
5749  m_Suballocations.erase(nextItem);
5750 }
5751 
5752 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5753 {
5754  // Change this suballocation to be marked as free.
5755  VmaSuballocation& suballoc = *suballocItem;
5756  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5757  suballoc.hAllocation = VK_NULL_HANDLE;
5758 
5759  // Update totals.
5760  ++m_FreeCount;
5761  m_SumFreeSize += suballoc.size;
5762 
5763  // Merge with previous and/or next suballocation if it's also free.
5764  bool mergeWithNext = false;
5765  bool mergeWithPrev = false;
5766 
5767  VmaSuballocationList::iterator nextItem = suballocItem;
5768  ++nextItem;
5769  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5770  {
5771  mergeWithNext = true;
5772  }
5773 
5774  VmaSuballocationList::iterator prevItem = suballocItem;
5775  if(suballocItem != m_Suballocations.begin())
5776  {
5777  --prevItem;
5778  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5779  {
5780  mergeWithPrev = true;
5781  }
5782  }
5783 
5784  if(mergeWithNext)
5785  {
5786  UnregisterFreeSuballocation(nextItem);
5787  MergeFreeWithNext(suballocItem);
5788  }
5789 
5790  if(mergeWithPrev)
5791  {
5792  UnregisterFreeSuballocation(prevItem);
5793  MergeFreeWithNext(prevItem);
5794  RegisterFreeSuballocation(prevItem);
5795  return prevItem;
5796  }
5797  else
5798  {
5799  RegisterFreeSuballocation(suballocItem);
5800  return suballocItem;
5801  }
5802 }
5803 
5804 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5805 {
5806  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5807  VMA_ASSERT(item->size > 0);
5808 
5809  // You may want to enable this validation at the beginning or at the end of
5810  // this function, depending on what do you want to check.
5811  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5812 
5813  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5814  {
5815  if(m_FreeSuballocationsBySize.empty())
5816  {
5817  m_FreeSuballocationsBySize.push_back(item);
5818  }
5819  else
5820  {
5821  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5822  }
5823  }
5824 
5825  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5826 }
5827 
5828 
5829 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5830 {
5831  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5832  VMA_ASSERT(item->size > 0);
5833 
5834  // You may want to enable this validation at the beginning or at the end of
5835  // this function, depending on what do you want to check.
5836  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5837 
5838  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5839  {
5840  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5841  m_FreeSuballocationsBySize.data(),
5842  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5843  item,
5844  VmaSuballocationItemSizeLess());
5845  for(size_t index = it - m_FreeSuballocationsBySize.data();
5846  index < m_FreeSuballocationsBySize.size();
5847  ++index)
5848  {
5849  if(m_FreeSuballocationsBySize[index] == item)
5850  {
5851  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5852  return;
5853  }
5854  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5855  }
5856  VMA_ASSERT(0 && "Not found.");
5857  }
5858 
5859  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5860 }
5861 
5863 // class VmaDeviceMemoryMapping
5864 
5865 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5866  m_MapCount(0),
5867  m_pMappedData(VMA_NULL)
5868 {
5869 }
5870 
5871 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5872 {
5873  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5874 }
5875 
5876 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
5877 {
5878  if(count == 0)
5879  {
5880  return VK_SUCCESS;
5881  }
5882 
5883  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5884  if(m_MapCount != 0)
5885  {
5886  m_MapCount += count;
5887  VMA_ASSERT(m_pMappedData != VMA_NULL);
5888  if(ppData != VMA_NULL)
5889  {
5890  *ppData = m_pMappedData;
5891  }
5892  return VK_SUCCESS;
5893  }
5894  else
5895  {
5896  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5897  hAllocator->m_hDevice,
5898  hMemory,
5899  0, // offset
5900  VK_WHOLE_SIZE,
5901  0, // flags
5902  &m_pMappedData);
5903  if(result == VK_SUCCESS)
5904  {
5905  if(ppData != VMA_NULL)
5906  {
5907  *ppData = m_pMappedData;
5908  }
5909  m_MapCount = count;
5910  }
5911  return result;
5912  }
5913 }
5914 
5915 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
5916 {
5917  if(count == 0)
5918  {
5919  return;
5920  }
5921 
5922  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5923  if(m_MapCount >= count)
5924  {
5925  m_MapCount -= count;
5926  if(m_MapCount == 0)
5927  {
5928  m_pMappedData = VMA_NULL;
5929  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5930  }
5931  }
5932  else
5933  {
5934  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5935  }
5936 }
5937 
5939 // class VmaDeviceMemoryBlock
5940 
5941 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5942  m_MemoryTypeIndex(UINT32_MAX),
5943  m_hMemory(VK_NULL_HANDLE),
5944  m_Metadata(hAllocator)
5945 {
5946 }
5947 
5948 void VmaDeviceMemoryBlock::Init(
5949  uint32_t newMemoryTypeIndex,
5950  VkDeviceMemory newMemory,
5951  VkDeviceSize newSize)
5952 {
5953  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5954 
5955  m_MemoryTypeIndex = newMemoryTypeIndex;
5956  m_hMemory = newMemory;
5957 
5958  m_Metadata.Init(newSize);
5959 }
5960 
5961 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
5962 {
5963  // This is the most important assert in the entire library.
5964  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
5965  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
5966 
5967  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
5968  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
5969  m_hMemory = VK_NULL_HANDLE;
5970 }
5971 
5972 bool VmaDeviceMemoryBlock::Validate() const
5973 {
5974  if((m_hMemory == VK_NULL_HANDLE) ||
5975  (m_Metadata.GetSize() == 0))
5976  {
5977  return false;
5978  }
5979 
5980  return m_Metadata.Validate();
5981 }
5982 
5983 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
5984 {
5985  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
5986 }
5987 
5988 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
5989 {
5990  m_Mapping.Unmap(hAllocator, m_hMemory, count);
5991 }
5992 
5993 static void InitStatInfo(VmaStatInfo& outInfo)
5994 {
5995  memset(&outInfo, 0, sizeof(outInfo));
5996  outInfo.allocationSizeMin = UINT64_MAX;
5997  outInfo.unusedRangeSizeMin = UINT64_MAX;
5998 }
5999 
6000 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6001 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6002 {
6003  inoutInfo.blockCount += srcInfo.blockCount;
6004  inoutInfo.allocationCount += srcInfo.allocationCount;
6005  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6006  inoutInfo.usedBytes += srcInfo.usedBytes;
6007  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6008  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6009  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6010  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6011  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6012 }
6013 
6014 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6015 {
6016  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6017  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6018  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6019  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6020 }
6021 
6022 VmaPool_T::VmaPool_T(
6023  VmaAllocator hAllocator,
6024  const VmaPoolCreateInfo& createInfo) :
6025  m_BlockVector(
6026  hAllocator,
6027  createInfo.memoryTypeIndex,
6028  createInfo.blockSize,
6029  createInfo.minBlockCount,
6030  createInfo.maxBlockCount,
6031  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6032  createInfo.frameInUseCount,
6033  true) // isCustomPool
6034 {
6035 }
6036 
6037 VmaPool_T::~VmaPool_T()
6038 {
6039 }
6040 
6041 #if VMA_STATS_STRING_ENABLED
6042 
6043 #endif // #if VMA_STATS_STRING_ENABLED
6044 
6045 VmaBlockVector::VmaBlockVector(
6046  VmaAllocator hAllocator,
6047  uint32_t memoryTypeIndex,
6048  VkDeviceSize preferredBlockSize,
6049  size_t minBlockCount,
6050  size_t maxBlockCount,
6051  VkDeviceSize bufferImageGranularity,
6052  uint32_t frameInUseCount,
6053  bool isCustomPool) :
6054  m_hAllocator(hAllocator),
6055  m_MemoryTypeIndex(memoryTypeIndex),
6056  m_PreferredBlockSize(preferredBlockSize),
6057  m_MinBlockCount(minBlockCount),
6058  m_MaxBlockCount(maxBlockCount),
6059  m_BufferImageGranularity(bufferImageGranularity),
6060  m_FrameInUseCount(frameInUseCount),
6061  m_IsCustomPool(isCustomPool),
6062  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6063  m_HasEmptyBlock(false),
6064  m_pDefragmentator(VMA_NULL)
6065 {
6066 }
6067 
6068 VmaBlockVector::~VmaBlockVector()
6069 {
6070  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6071 
6072  for(size_t i = m_Blocks.size(); i--; )
6073  {
6074  m_Blocks[i]->Destroy(m_hAllocator);
6075  vma_delete(m_hAllocator, m_Blocks[i]);
6076  }
6077 }
6078 
6079 VkResult VmaBlockVector::CreateMinBlocks()
6080 {
6081  for(size_t i = 0; i < m_MinBlockCount; ++i)
6082  {
6083  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6084  if(res != VK_SUCCESS)
6085  {
6086  return res;
6087  }
6088  }
6089  return VK_SUCCESS;
6090 }
6091 
6092 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6093 {
6094  pStats->size = 0;
6095  pStats->unusedSize = 0;
6096  pStats->allocationCount = 0;
6097  pStats->unusedRangeCount = 0;
6098  pStats->unusedRangeSizeMax = 0;
6099 
6100  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6101 
6102  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6103  {
6104  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6105  VMA_ASSERT(pBlock);
6106  VMA_HEAVY_ASSERT(pBlock->Validate());
6107  pBlock->m_Metadata.AddPoolStats(*pStats);
6108  }
6109 }
6110 
6111 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6112 
6113 VkResult VmaBlockVector::Allocate(
6114  VmaPool hCurrentPool,
6115  uint32_t currentFrameIndex,
6116  const VkMemoryRequirements& vkMemReq,
6117  const VmaAllocationCreateInfo& createInfo,
6118  VmaSuballocationType suballocType,
6119  VmaAllocation* pAllocation)
6120 {
6121  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6122  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6123 
6124  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6125 
6126  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6127  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6128  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6129  {
6130  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6131  VMA_ASSERT(pCurrBlock);
6132  VmaAllocationRequest currRequest = {};
6133  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6134  currentFrameIndex,
6135  m_FrameInUseCount,
6136  m_BufferImageGranularity,
6137  vkMemReq.size,
6138  vkMemReq.alignment,
6139  suballocType,
6140  false, // canMakeOtherLost
6141  &currRequest))
6142  {
6143  // Allocate from pCurrBlock.
6144  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6145 
6146  if(mapped)
6147  {
6148  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6149  if(res != VK_SUCCESS)
6150  {
6151  return res;
6152  }
6153  }
6154 
6155  // We no longer have an empty Allocation.
6156  if(pCurrBlock->m_Metadata.IsEmpty())
6157  {
6158  m_HasEmptyBlock = false;
6159  }
6160 
6161  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6162  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6163  (*pAllocation)->InitBlockAllocation(
6164  hCurrentPool,
6165  pCurrBlock,
6166  currRequest.offset,
6167  vkMemReq.alignment,
6168  vkMemReq.size,
6169  suballocType,
6170  mapped,
6171  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6172  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6173  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6174  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6175  return VK_SUCCESS;
6176  }
6177  }
6178 
6179  const bool canCreateNewBlock =
6180  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6181  (m_Blocks.size() < m_MaxBlockCount);
6182 
6183  // 2. Try to create new block.
6184  if(canCreateNewBlock)
6185  {
6186  // Calculate optimal size for new block.
6187  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6188  uint32_t newBlockSizeShift = 0;
6189  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6190 
6191  // Allocating blocks of other sizes is allowed only in default pools.
6192  // In custom pools block size is fixed.
6193  if(m_IsCustomPool == false)
6194  {
6195  // Allocate 1/8, 1/4, 1/2 as first blocks.
6196  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6197  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6198  {
6199  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6200  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6201  {
6202  newBlockSize = smallerNewBlockSize;
6203  ++newBlockSizeShift;
6204  }
6205  else
6206  {
6207  break;
6208  }
6209  }
6210  }
6211 
6212  size_t newBlockIndex = 0;
6213  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6214  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6215  if(m_IsCustomPool == false)
6216  {
6217  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6218  {
6219  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6220  if(smallerNewBlockSize >= vkMemReq.size)
6221  {
6222  newBlockSize = smallerNewBlockSize;
6223  ++newBlockSizeShift;
6224  res = CreateBlock(newBlockSize, &newBlockIndex);
6225  }
6226  else
6227  {
6228  break;
6229  }
6230  }
6231  }
6232 
6233  if(res == VK_SUCCESS)
6234  {
6235  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6236  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6237 
6238  if(mapped)
6239  {
6240  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6241  if(res != VK_SUCCESS)
6242  {
6243  return res;
6244  }
6245  }
6246 
6247  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6248  VmaAllocationRequest allocRequest;
6249  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6250  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6251  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6252  (*pAllocation)->InitBlockAllocation(
6253  hCurrentPool,
6254  pBlock,
6255  allocRequest.offset,
6256  vkMemReq.alignment,
6257  vkMemReq.size,
6258  suballocType,
6259  mapped,
6260  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6261  VMA_HEAVY_ASSERT(pBlock->Validate());
6262  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6263  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6264  return VK_SUCCESS;
6265  }
6266  }
6267 
6268  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6269 
6270  // 3. Try to allocate from existing blocks with making other allocations lost.
6271  if(canMakeOtherLost)
6272  {
6273  uint32_t tryIndex = 0;
6274  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6275  {
6276  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6277  VmaAllocationRequest bestRequest = {};
6278  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6279 
6280  // 1. Search existing allocations.
6281  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6282  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6283  {
6284  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6285  VMA_ASSERT(pCurrBlock);
6286  VmaAllocationRequest currRequest = {};
6287  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6288  currentFrameIndex,
6289  m_FrameInUseCount,
6290  m_BufferImageGranularity,
6291  vkMemReq.size,
6292  vkMemReq.alignment,
6293  suballocType,
6294  canMakeOtherLost,
6295  &currRequest))
6296  {
6297  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6298  if(pBestRequestBlock == VMA_NULL ||
6299  currRequestCost < bestRequestCost)
6300  {
6301  pBestRequestBlock = pCurrBlock;
6302  bestRequest = currRequest;
6303  bestRequestCost = currRequestCost;
6304 
6305  if(bestRequestCost == 0)
6306  {
6307  break;
6308  }
6309  }
6310  }
6311  }
6312 
6313  if(pBestRequestBlock != VMA_NULL)
6314  {
6315  if(mapped)
6316  {
6317  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6318  if(res != VK_SUCCESS)
6319  {
6320  return res;
6321  }
6322  }
6323 
6324  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6325  currentFrameIndex,
6326  m_FrameInUseCount,
6327  &bestRequest))
6328  {
6329  // We no longer have an empty Allocation.
6330  if(pBestRequestBlock->m_Metadata.IsEmpty())
6331  {
6332  m_HasEmptyBlock = false;
6333  }
6334  // Allocate from this pBlock.
6335  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6336  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6337  (*pAllocation)->InitBlockAllocation(
6338  hCurrentPool,
6339  pBestRequestBlock,
6340  bestRequest.offset,
6341  vkMemReq.alignment,
6342  vkMemReq.size,
6343  suballocType,
6344  mapped,
6345  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6346  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6347  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6348  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6349  return VK_SUCCESS;
6350  }
6351  // else: Some allocations must have been touched while we are here. Next try.
6352  }
6353  else
6354  {
6355  // Could not find place in any of the blocks - break outer loop.
6356  break;
6357  }
6358  }
6359  /* Maximum number of tries exceeded - a very unlike event when many other
6360  threads are simultaneously touching allocations making it impossible to make
6361  lost at the same time as we try to allocate. */
6362  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6363  {
6364  return VK_ERROR_TOO_MANY_OBJECTS;
6365  }
6366  }
6367 
6368  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6369 }
6370 
6371 void VmaBlockVector::Free(
6372  VmaAllocation hAllocation)
6373 {
6374  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6375 
6376  // Scope for lock.
6377  {
6378  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6379 
6380  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6381 
6382  if(hAllocation->IsPersistentMap())
6383  {
6384  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6385  }
6386 
6387  pBlock->m_Metadata.Free(hAllocation);
6388  VMA_HEAVY_ASSERT(pBlock->Validate());
6389 
6390  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6391 
6392  // pBlock became empty after this deallocation.
6393  if(pBlock->m_Metadata.IsEmpty())
6394  {
6395  // Already has empty Allocation. We don't want to have two, so delete this one.
6396  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6397  {
6398  pBlockToDelete = pBlock;
6399  Remove(pBlock);
6400  }
6401  // We now have first empty Allocation.
6402  else
6403  {
6404  m_HasEmptyBlock = true;
6405  }
6406  }
6407  // pBlock didn't become empty, but we have another empty block - find and free that one.
6408  // (This is optional, heuristics.)
6409  else if(m_HasEmptyBlock)
6410  {
6411  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6412  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6413  {
6414  pBlockToDelete = pLastBlock;
6415  m_Blocks.pop_back();
6416  m_HasEmptyBlock = false;
6417  }
6418  }
6419 
6420  IncrementallySortBlocks();
6421  }
6422 
6423  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6424  // lock, for performance reason.
6425  if(pBlockToDelete != VMA_NULL)
6426  {
6427  VMA_DEBUG_LOG(" Deleted empty allocation");
6428  pBlockToDelete->Destroy(m_hAllocator);
6429  vma_delete(m_hAllocator, pBlockToDelete);
6430  }
6431 }
6432 
6433 size_t VmaBlockVector::CalcMaxBlockSize() const
6434 {
6435  size_t result = 0;
6436  for(size_t i = m_Blocks.size(); i--; )
6437  {
6438  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6439  if(result >= m_PreferredBlockSize)
6440  {
6441  break;
6442  }
6443  }
6444  return result;
6445 }
6446 
6447 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6448 {
6449  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6450  {
6451  if(m_Blocks[blockIndex] == pBlock)
6452  {
6453  VmaVectorRemove(m_Blocks, blockIndex);
6454  return;
6455  }
6456  }
6457  VMA_ASSERT(0);
6458 }
6459 
6460 void VmaBlockVector::IncrementallySortBlocks()
6461 {
6462  // Bubble sort only until first swap.
6463  for(size_t i = 1; i < m_Blocks.size(); ++i)
6464  {
6465  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6466  {
6467  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6468  return;
6469  }
6470  }
6471 }
6472 
6473 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6474 {
6475  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6476  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6477  allocInfo.allocationSize = blockSize;
6478  VkDeviceMemory mem = VK_NULL_HANDLE;
6479  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6480  if(res < 0)
6481  {
6482  return res;
6483  }
6484 
6485  // New VkDeviceMemory successfully created.
6486 
6487  // Create new Allocation for it.
6488  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6489  pBlock->Init(
6490  m_MemoryTypeIndex,
6491  mem,
6492  allocInfo.allocationSize);
6493 
6494  m_Blocks.push_back(pBlock);
6495  if(pNewBlockIndex != VMA_NULL)
6496  {
6497  *pNewBlockIndex = m_Blocks.size() - 1;
6498  }
6499 
6500  return VK_SUCCESS;
6501 }
6502 
6503 #if VMA_STATS_STRING_ENABLED
6504 
6505 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6506 {
6507  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6508 
6509  json.BeginObject();
6510 
6511  if(m_IsCustomPool)
6512  {
6513  json.WriteString("MemoryTypeIndex");
6514  json.WriteNumber(m_MemoryTypeIndex);
6515 
6516  json.WriteString("BlockSize");
6517  json.WriteNumber(m_PreferredBlockSize);
6518 
6519  json.WriteString("BlockCount");
6520  json.BeginObject(true);
6521  if(m_MinBlockCount > 0)
6522  {
6523  json.WriteString("Min");
6524  json.WriteNumber((uint64_t)m_MinBlockCount);
6525  }
6526  if(m_MaxBlockCount < SIZE_MAX)
6527  {
6528  json.WriteString("Max");
6529  json.WriteNumber((uint64_t)m_MaxBlockCount);
6530  }
6531  json.WriteString("Cur");
6532  json.WriteNumber((uint64_t)m_Blocks.size());
6533  json.EndObject();
6534 
6535  if(m_FrameInUseCount > 0)
6536  {
6537  json.WriteString("FrameInUseCount");
6538  json.WriteNumber(m_FrameInUseCount);
6539  }
6540  }
6541  else
6542  {
6543  json.WriteString("PreferredBlockSize");
6544  json.WriteNumber(m_PreferredBlockSize);
6545  }
6546 
6547  json.WriteString("Blocks");
6548  json.BeginArray();
6549  for(size_t i = 0; i < m_Blocks.size(); ++i)
6550  {
6551  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6552  }
6553  json.EndArray();
6554 
6555  json.EndObject();
6556 }
6557 
6558 #endif // #if VMA_STATS_STRING_ENABLED
6559 
6560 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6561  VmaAllocator hAllocator,
6562  uint32_t currentFrameIndex)
6563 {
6564  if(m_pDefragmentator == VMA_NULL)
6565  {
6566  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6567  hAllocator,
6568  this,
6569  currentFrameIndex);
6570  }
6571 
6572  return m_pDefragmentator;
6573 }
6574 
6575 VkResult VmaBlockVector::Defragment(
6576  VmaDefragmentationStats* pDefragmentationStats,
6577  VkDeviceSize& maxBytesToMove,
6578  uint32_t& maxAllocationsToMove)
6579 {
6580  if(m_pDefragmentator == VMA_NULL)
6581  {
6582  return VK_SUCCESS;
6583  }
6584 
6585  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6586 
6587  // Defragment.
6588  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6589 
6590  // Accumulate statistics.
6591  if(pDefragmentationStats != VMA_NULL)
6592  {
6593  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6594  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6595  pDefragmentationStats->bytesMoved += bytesMoved;
6596  pDefragmentationStats->allocationsMoved += allocationsMoved;
6597  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6598  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6599  maxBytesToMove -= bytesMoved;
6600  maxAllocationsToMove -= allocationsMoved;
6601  }
6602 
6603  // Free empty blocks.
6604  m_HasEmptyBlock = false;
6605  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6606  {
6607  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6608  if(pBlock->m_Metadata.IsEmpty())
6609  {
6610  if(m_Blocks.size() > m_MinBlockCount)
6611  {
6612  if(pDefragmentationStats != VMA_NULL)
6613  {
6614  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6615  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6616  }
6617 
6618  VmaVectorRemove(m_Blocks, blockIndex);
6619  pBlock->Destroy(m_hAllocator);
6620  vma_delete(m_hAllocator, pBlock);
6621  }
6622  else
6623  {
6624  m_HasEmptyBlock = true;
6625  }
6626  }
6627  }
6628 
6629  return result;
6630 }
6631 
6632 void VmaBlockVector::DestroyDefragmentator()
6633 {
6634  if(m_pDefragmentator != VMA_NULL)
6635  {
6636  vma_delete(m_hAllocator, m_pDefragmentator);
6637  m_pDefragmentator = VMA_NULL;
6638  }
6639 }
6640 
6641 void VmaBlockVector::MakePoolAllocationsLost(
6642  uint32_t currentFrameIndex,
6643  size_t* pLostAllocationCount)
6644 {
6645  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6646  size_t lostAllocationCount = 0;
6647  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6648  {
6649  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6650  VMA_ASSERT(pBlock);
6651  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6652  }
6653  if(pLostAllocationCount != VMA_NULL)
6654  {
6655  *pLostAllocationCount = lostAllocationCount;
6656  }
6657 }
6658 
6659 void VmaBlockVector::AddStats(VmaStats* pStats)
6660 {
6661  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6662  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6663 
6664  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6665 
6666  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6667  {
6668  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6669  VMA_ASSERT(pBlock);
6670  VMA_HEAVY_ASSERT(pBlock->Validate());
6671  VmaStatInfo allocationStatInfo;
6672  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6673  VmaAddStatInfo(pStats->total, allocationStatInfo);
6674  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6675  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6676  }
6677 }
6678 
6680 // VmaDefragmentator members definition
6681 
6682 VmaDefragmentator::VmaDefragmentator(
6683  VmaAllocator hAllocator,
6684  VmaBlockVector* pBlockVector,
6685  uint32_t currentFrameIndex) :
6686  m_hAllocator(hAllocator),
6687  m_pBlockVector(pBlockVector),
6688  m_CurrentFrameIndex(currentFrameIndex),
6689  m_BytesMoved(0),
6690  m_AllocationsMoved(0),
6691  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6692  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6693 {
6694 }
6695 
6696 VmaDefragmentator::~VmaDefragmentator()
6697 {
6698  for(size_t i = m_Blocks.size(); i--; )
6699  {
6700  vma_delete(m_hAllocator, m_Blocks[i]);
6701  }
6702 }
6703 
6704 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6705 {
6706  AllocationInfo allocInfo;
6707  allocInfo.m_hAllocation = hAlloc;
6708  allocInfo.m_pChanged = pChanged;
6709  m_Allocations.push_back(allocInfo);
6710 }
6711 
6712 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6713 {
6714  // It has already been mapped for defragmentation.
6715  if(m_pMappedDataForDefragmentation)
6716  {
6717  *ppMappedData = m_pMappedDataForDefragmentation;
6718  return VK_SUCCESS;
6719  }
6720 
6721  // It is originally mapped.
6722  if(m_pBlock->m_Mapping.GetMappedData())
6723  {
6724  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6725  return VK_SUCCESS;
6726  }
6727 
6728  // Map on first usage.
6729  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6730  *ppMappedData = m_pMappedDataForDefragmentation;
6731  return res;
6732 }
6733 
6734 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6735 {
6736  if(m_pMappedDataForDefragmentation != VMA_NULL)
6737  {
6738  m_pBlock->Unmap(hAllocator, 1);
6739  }
6740 }
6741 
6742 VkResult VmaDefragmentator::DefragmentRound(
6743  VkDeviceSize maxBytesToMove,
6744  uint32_t maxAllocationsToMove)
6745 {
6746  if(m_Blocks.empty())
6747  {
6748  return VK_SUCCESS;
6749  }
6750 
6751  size_t srcBlockIndex = m_Blocks.size() - 1;
6752  size_t srcAllocIndex = SIZE_MAX;
6753  for(;;)
6754  {
6755  // 1. Find next allocation to move.
6756  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6757  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6758  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6759  {
6760  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6761  {
6762  // Finished: no more allocations to process.
6763  if(srcBlockIndex == 0)
6764  {
6765  return VK_SUCCESS;
6766  }
6767  else
6768  {
6769  --srcBlockIndex;
6770  srcAllocIndex = SIZE_MAX;
6771  }
6772  }
6773  else
6774  {
6775  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6776  }
6777  }
6778 
6779  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6780  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6781 
6782  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6783  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6784  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6785  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6786 
6787  // 2. Try to find new place for this allocation in preceding or current block.
6788  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6789  {
6790  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6791  VmaAllocationRequest dstAllocRequest;
6792  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6793  m_CurrentFrameIndex,
6794  m_pBlockVector->GetFrameInUseCount(),
6795  m_pBlockVector->GetBufferImageGranularity(),
6796  size,
6797  alignment,
6798  suballocType,
6799  false, // canMakeOtherLost
6800  &dstAllocRequest) &&
6801  MoveMakesSense(
6802  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6803  {
6804  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6805 
6806  // Reached limit on number of allocations or bytes to move.
6807  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6808  (m_BytesMoved + size > maxBytesToMove))
6809  {
6810  return VK_INCOMPLETE;
6811  }
6812 
6813  void* pDstMappedData = VMA_NULL;
6814  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6815  if(res != VK_SUCCESS)
6816  {
6817  return res;
6818  }
6819 
6820  void* pSrcMappedData = VMA_NULL;
6821  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6822  if(res != VK_SUCCESS)
6823  {
6824  return res;
6825  }
6826 
6827  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6828  memcpy(
6829  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6830  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6831  static_cast<size_t>(size));
6832 
6833  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6834  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6835 
6836  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6837 
6838  if(allocInfo.m_pChanged != VMA_NULL)
6839  {
6840  *allocInfo.m_pChanged = VK_TRUE;
6841  }
6842 
6843  ++m_AllocationsMoved;
6844  m_BytesMoved += size;
6845 
6846  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6847 
6848  break;
6849  }
6850  }
6851 
6852  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6853 
6854  if(srcAllocIndex > 0)
6855  {
6856  --srcAllocIndex;
6857  }
6858  else
6859  {
6860  if(srcBlockIndex > 0)
6861  {
6862  --srcBlockIndex;
6863  srcAllocIndex = SIZE_MAX;
6864  }
6865  else
6866  {
6867  return VK_SUCCESS;
6868  }
6869  }
6870  }
6871 }
6872 
6873 VkResult VmaDefragmentator::Defragment(
6874  VkDeviceSize maxBytesToMove,
6875  uint32_t maxAllocationsToMove)
6876 {
6877  if(m_Allocations.empty())
6878  {
6879  return VK_SUCCESS;
6880  }
6881 
6882  // Create block info for each block.
6883  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6884  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6885  {
6886  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6887  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6888  m_Blocks.push_back(pBlockInfo);
6889  }
6890 
6891  // Sort them by m_pBlock pointer value.
6892  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6893 
6894  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6895  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6896  {
6897  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6898  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6899  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6900  {
6901  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6902  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6903  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6904  {
6905  (*it)->m_Allocations.push_back(allocInfo);
6906  }
6907  else
6908  {
6909  VMA_ASSERT(0);
6910  }
6911  }
6912  }
6913  m_Allocations.clear();
6914 
6915  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6916  {
6917  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6918  pBlockInfo->CalcHasNonMovableAllocations();
6919  pBlockInfo->SortAllocationsBySizeDescecnding();
6920  }
6921 
6922  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6923  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6924 
6925  // Execute defragmentation rounds (the main part).
6926  VkResult result = VK_SUCCESS;
6927  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6928  {
6929  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6930  }
6931 
6932  // Unmap blocks that were mapped for defragmentation.
6933  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6934  {
6935  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6936  }
6937 
6938  return result;
6939 }
6940 
6941 bool VmaDefragmentator::MoveMakesSense(
6942  size_t dstBlockIndex, VkDeviceSize dstOffset,
6943  size_t srcBlockIndex, VkDeviceSize srcOffset)
6944 {
6945  if(dstBlockIndex < srcBlockIndex)
6946  {
6947  return true;
6948  }
6949  if(dstBlockIndex > srcBlockIndex)
6950  {
6951  return false;
6952  }
6953  if(dstOffset < srcOffset)
6954  {
6955  return true;
6956  }
6957  return false;
6958 }
6959 
6961 // VmaAllocator_T
6962 
6963 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
6964  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
6965  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
6966  m_hDevice(pCreateInfo->device),
6967  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
6968  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
6969  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
6970  m_PreferredLargeHeapBlockSize(0),
6971  m_PhysicalDevice(pCreateInfo->physicalDevice),
6972  m_CurrentFrameIndex(0),
6973  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
6974 {
6975  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
6976 
6977  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
6978  memset(&m_MemProps, 0, sizeof(m_MemProps));
6979  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
6980 
6981  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
6982  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
6983 
6984  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
6985  {
6986  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
6987  }
6988 
6989  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
6990  {
6991  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
6992  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
6993  }
6994 
6995  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
6996 
6997  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
6998  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
6999 
7000  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7001  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7002 
7003  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7004  {
7005  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7006  {
7007  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7008  if(limit != VK_WHOLE_SIZE)
7009  {
7010  m_HeapSizeLimit[heapIndex] = limit;
7011  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7012  {
7013  m_MemProps.memoryHeaps[heapIndex].size = limit;
7014  }
7015  }
7016  }
7017  }
7018 
7019  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7020  {
7021  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7022 
7023  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7024  this,
7025  memTypeIndex,
7026  preferredBlockSize,
7027  0,
7028  SIZE_MAX,
7029  GetBufferImageGranularity(),
7030  pCreateInfo->frameInUseCount,
7031  false); // isCustomPool
7032  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7033  // becase minBlockCount is 0.
7034  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7035  }
7036 }
7037 
7038 VmaAllocator_T::~VmaAllocator_T()
7039 {
7040  VMA_ASSERT(m_Pools.empty());
7041 
7042  for(size_t i = GetMemoryTypeCount(); i--; )
7043  {
7044  vma_delete(this, m_pDedicatedAllocations[i]);
7045  vma_delete(this, m_pBlockVectors[i]);
7046  }
7047 }
7048 
7049 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7050 {
7051 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7052  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7053  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7054  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7055  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7056  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7057  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7058  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7059  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7060  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7061  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7062  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7063  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7064  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7065  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7066  if(m_UseKhrDedicatedAllocation)
7067  {
7068  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7069  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7070  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7071  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7072  }
7073 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7074 
7075 #define VMA_COPY_IF_NOT_NULL(funcName) \
7076  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7077 
7078  if(pVulkanFunctions != VMA_NULL)
7079  {
7080  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7081  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7082  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7083  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7084  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7085  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7086  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7087  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7088  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7089  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7090  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7091  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7092  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7093  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7094  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7095  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7096  }
7097 
7098 #undef VMA_COPY_IF_NOT_NULL
7099 
7100  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7101  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7102  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7103  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7104  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7105  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7106  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7107  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7108  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7109  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7110  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7111  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7112  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7113  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7114  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7115  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7116  if(m_UseKhrDedicatedAllocation)
7117  {
7118  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7119  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7120  }
7121 }
7122 
7123 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7124 {
7125  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7126  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7127  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7128  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7129 }
7130 
7131 VkResult VmaAllocator_T::AllocateMemoryOfType(
7132  const VkMemoryRequirements& vkMemReq,
7133  bool dedicatedAllocation,
7134  VkBuffer dedicatedBuffer,
7135  VkImage dedicatedImage,
7136  const VmaAllocationCreateInfo& createInfo,
7137  uint32_t memTypeIndex,
7138  VmaSuballocationType suballocType,
7139  VmaAllocation* pAllocation)
7140 {
7141  VMA_ASSERT(pAllocation != VMA_NULL);
7142  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7143 
7144  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7145 
7146  // If memory type is not HOST_VISIBLE, disable MAPPED.
7147  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7148  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7149  {
7150  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7151  }
7152 
7153  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7154  VMA_ASSERT(blockVector);
7155 
7156  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7157  bool preferDedicatedMemory =
7158  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7159  dedicatedAllocation ||
7160  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7161  vkMemReq.size > preferredBlockSize / 2;
7162 
7163  if(preferDedicatedMemory &&
7164  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7165  finalCreateInfo.pool == VK_NULL_HANDLE)
7166  {
7168  }
7169 
7170  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7171  {
7172  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7173  {
7174  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7175  }
7176  else
7177  {
7178  return AllocateDedicatedMemory(
7179  vkMemReq.size,
7180  suballocType,
7181  memTypeIndex,
7182  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7183  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7184  finalCreateInfo.pUserData,
7185  dedicatedBuffer,
7186  dedicatedImage,
7187  pAllocation);
7188  }
7189  }
7190  else
7191  {
7192  VkResult res = blockVector->Allocate(
7193  VK_NULL_HANDLE, // hCurrentPool
7194  m_CurrentFrameIndex.load(),
7195  vkMemReq,
7196  finalCreateInfo,
7197  suballocType,
7198  pAllocation);
7199  if(res == VK_SUCCESS)
7200  {
7201  return res;
7202  }
7203 
7204  // 5. Try dedicated memory.
7205  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7206  {
7207  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7208  }
7209  else
7210  {
7211  res = AllocateDedicatedMemory(
7212  vkMemReq.size,
7213  suballocType,
7214  memTypeIndex,
7215  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7216  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7217  finalCreateInfo.pUserData,
7218  dedicatedBuffer,
7219  dedicatedImage,
7220  pAllocation);
7221  if(res == VK_SUCCESS)
7222  {
7223  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7224  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7225  return VK_SUCCESS;
7226  }
7227  else
7228  {
7229  // Everything failed: Return error code.
7230  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7231  return res;
7232  }
7233  }
7234  }
7235 }
7236 
7237 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7238  VkDeviceSize size,
7239  VmaSuballocationType suballocType,
7240  uint32_t memTypeIndex,
7241  bool map,
7242  bool isUserDataString,
7243  void* pUserData,
7244  VkBuffer dedicatedBuffer,
7245  VkImage dedicatedImage,
7246  VmaAllocation* pAllocation)
7247 {
7248  VMA_ASSERT(pAllocation);
7249 
7250  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7251  allocInfo.memoryTypeIndex = memTypeIndex;
7252  allocInfo.allocationSize = size;
7253 
7254  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7255  if(m_UseKhrDedicatedAllocation)
7256  {
7257  if(dedicatedBuffer != VK_NULL_HANDLE)
7258  {
7259  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7260  dedicatedAllocInfo.buffer = dedicatedBuffer;
7261  allocInfo.pNext = &dedicatedAllocInfo;
7262  }
7263  else if(dedicatedImage != VK_NULL_HANDLE)
7264  {
7265  dedicatedAllocInfo.image = dedicatedImage;
7266  allocInfo.pNext = &dedicatedAllocInfo;
7267  }
7268  }
7269 
7270  // Allocate VkDeviceMemory.
7271  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7272  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7273  if(res < 0)
7274  {
7275  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7276  return res;
7277  }
7278 
7279  void* pMappedData = VMA_NULL;
7280  if(map)
7281  {
7282  res = (*m_VulkanFunctions.vkMapMemory)(
7283  m_hDevice,
7284  hMemory,
7285  0,
7286  VK_WHOLE_SIZE,
7287  0,
7288  &pMappedData);
7289  if(res < 0)
7290  {
7291  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7292  FreeVulkanMemory(memTypeIndex, size, hMemory);
7293  return res;
7294  }
7295  }
7296 
7297  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7298  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7299  (*pAllocation)->SetUserData(this, pUserData);
7300 
7301  // Register it in m_pDedicatedAllocations.
7302  {
7303  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7304  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7305  VMA_ASSERT(pDedicatedAllocations);
7306  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7307  }
7308 
7309  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7310 
7311  return VK_SUCCESS;
7312 }
7313 
7314 void VmaAllocator_T::GetBufferMemoryRequirements(
7315  VkBuffer hBuffer,
7316  VkMemoryRequirements& memReq,
7317  bool& requiresDedicatedAllocation,
7318  bool& prefersDedicatedAllocation) const
7319 {
7320  if(m_UseKhrDedicatedAllocation)
7321  {
7322  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7323  memReqInfo.buffer = hBuffer;
7324 
7325  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7326 
7327  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7328  memReq2.pNext = &memDedicatedReq;
7329 
7330  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7331 
7332  memReq = memReq2.memoryRequirements;
7333  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7334  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7335  }
7336  else
7337  {
7338  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7339  requiresDedicatedAllocation = false;
7340  prefersDedicatedAllocation = false;
7341  }
7342 }
7343 
7344 void VmaAllocator_T::GetImageMemoryRequirements(
7345  VkImage hImage,
7346  VkMemoryRequirements& memReq,
7347  bool& requiresDedicatedAllocation,
7348  bool& prefersDedicatedAllocation) const
7349 {
7350  if(m_UseKhrDedicatedAllocation)
7351  {
7352  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7353  memReqInfo.image = hImage;
7354 
7355  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7356 
7357  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7358  memReq2.pNext = &memDedicatedReq;
7359 
7360  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7361 
7362  memReq = memReq2.memoryRequirements;
7363  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7364  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7365  }
7366  else
7367  {
7368  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7369  requiresDedicatedAllocation = false;
7370  prefersDedicatedAllocation = false;
7371  }
7372 }
7373 
7374 VkResult VmaAllocator_T::AllocateMemory(
7375  const VkMemoryRequirements& vkMemReq,
7376  bool requiresDedicatedAllocation,
7377  bool prefersDedicatedAllocation,
7378  VkBuffer dedicatedBuffer,
7379  VkImage dedicatedImage,
7380  const VmaAllocationCreateInfo& createInfo,
7381  VmaSuballocationType suballocType,
7382  VmaAllocation* pAllocation)
7383 {
7384  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7385  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7386  {
7387  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7388  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7389  }
7390  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7392  {
7393  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7394  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7395  }
7396  if(requiresDedicatedAllocation)
7397  {
7398  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7399  {
7400  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7401  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7402  }
7403  if(createInfo.pool != VK_NULL_HANDLE)
7404  {
7405  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7406  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7407  }
7408  }
7409  if((createInfo.pool != VK_NULL_HANDLE) &&
7410  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7411  {
7412  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7413  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7414  }
7415 
7416  if(createInfo.pool != VK_NULL_HANDLE)
7417  {
7418  return createInfo.pool->m_BlockVector.Allocate(
7419  createInfo.pool,
7420  m_CurrentFrameIndex.load(),
7421  vkMemReq,
7422  createInfo,
7423  suballocType,
7424  pAllocation);
7425  }
7426  else
7427  {
7428  // Bit mask of memory Vulkan types acceptable for this allocation.
7429  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7430  uint32_t memTypeIndex = UINT32_MAX;
7431  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7432  if(res == VK_SUCCESS)
7433  {
7434  res = AllocateMemoryOfType(
7435  vkMemReq,
7436  requiresDedicatedAllocation || prefersDedicatedAllocation,
7437  dedicatedBuffer,
7438  dedicatedImage,
7439  createInfo,
7440  memTypeIndex,
7441  suballocType,
7442  pAllocation);
7443  // Succeeded on first try.
7444  if(res == VK_SUCCESS)
7445  {
7446  return res;
7447  }
7448  // Allocation from this memory type failed. Try other compatible memory types.
7449  else
7450  {
7451  for(;;)
7452  {
7453  // Remove old memTypeIndex from list of possibilities.
7454  memoryTypeBits &= ~(1u << memTypeIndex);
7455  // Find alternative memTypeIndex.
7456  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7457  if(res == VK_SUCCESS)
7458  {
7459  res = AllocateMemoryOfType(
7460  vkMemReq,
7461  requiresDedicatedAllocation || prefersDedicatedAllocation,
7462  dedicatedBuffer,
7463  dedicatedImage,
7464  createInfo,
7465  memTypeIndex,
7466  suballocType,
7467  pAllocation);
7468  // Allocation from this alternative memory type succeeded.
7469  if(res == VK_SUCCESS)
7470  {
7471  return res;
7472  }
7473  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7474  }
7475  // No other matching memory type index could be found.
7476  else
7477  {
7478  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7479  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7480  }
7481  }
7482  }
7483  }
7484  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7485  else
7486  return res;
7487  }
7488 }
7489 
7490 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7491 {
7492  VMA_ASSERT(allocation);
7493 
7494  if(allocation->CanBecomeLost() == false ||
7495  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7496  {
7497  switch(allocation->GetType())
7498  {
7499  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7500  {
7501  VmaBlockVector* pBlockVector = VMA_NULL;
7502  VmaPool hPool = allocation->GetPool();
7503  if(hPool != VK_NULL_HANDLE)
7504  {
7505  pBlockVector = &hPool->m_BlockVector;
7506  }
7507  else
7508  {
7509  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7510  pBlockVector = m_pBlockVectors[memTypeIndex];
7511  }
7512  pBlockVector->Free(allocation);
7513  }
7514  break;
7515  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7516  FreeDedicatedMemory(allocation);
7517  break;
7518  default:
7519  VMA_ASSERT(0);
7520  }
7521  }
7522 
7523  allocation->SetUserData(this, VMA_NULL);
7524  vma_delete(this, allocation);
7525 }
7526 
7527 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7528 {
7529  // Initialize.
7530  InitStatInfo(pStats->total);
7531  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7532  InitStatInfo(pStats->memoryType[i]);
7533  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7534  InitStatInfo(pStats->memoryHeap[i]);
7535 
7536  // Process default pools.
7537  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7538  {
7539  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7540  VMA_ASSERT(pBlockVector);
7541  pBlockVector->AddStats(pStats);
7542  }
7543 
7544  // Process custom pools.
7545  {
7546  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7547  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7548  {
7549  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7550  }
7551  }
7552 
7553  // Process dedicated allocations.
7554  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7555  {
7556  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7557  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7558  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7559  VMA_ASSERT(pDedicatedAllocVector);
7560  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7561  {
7562  VmaStatInfo allocationStatInfo;
7563  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7564  VmaAddStatInfo(pStats->total, allocationStatInfo);
7565  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7566  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7567  }
7568  }
7569 
7570  // Postprocess.
7571  VmaPostprocessCalcStatInfo(pStats->total);
7572  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7573  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7574  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7575  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7576 }
7577 
7578 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7579 
7580 VkResult VmaAllocator_T::Defragment(
7581  VmaAllocation* pAllocations,
7582  size_t allocationCount,
7583  VkBool32* pAllocationsChanged,
7584  const VmaDefragmentationInfo* pDefragmentationInfo,
7585  VmaDefragmentationStats* pDefragmentationStats)
7586 {
7587  if(pAllocationsChanged != VMA_NULL)
7588  {
7589  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7590  }
7591  if(pDefragmentationStats != VMA_NULL)
7592  {
7593  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7594  }
7595 
7596  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7597 
7598  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7599 
7600  const size_t poolCount = m_Pools.size();
7601 
7602  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7603  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7604  {
7605  VmaAllocation hAlloc = pAllocations[allocIndex];
7606  VMA_ASSERT(hAlloc);
7607  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7608  // DedicatedAlloc cannot be defragmented.
7609  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7610  // Only HOST_VISIBLE memory types can be defragmented.
7611  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7612  // Lost allocation cannot be defragmented.
7613  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7614  {
7615  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7616 
7617  const VmaPool hAllocPool = hAlloc->GetPool();
7618  // This allocation belongs to custom pool.
7619  if(hAllocPool != VK_NULL_HANDLE)
7620  {
7621  pAllocBlockVector = &hAllocPool->GetBlockVector();
7622  }
7623  // This allocation belongs to general pool.
7624  else
7625  {
7626  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7627  }
7628 
7629  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7630 
7631  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7632  &pAllocationsChanged[allocIndex] : VMA_NULL;
7633  pDefragmentator->AddAllocation(hAlloc, pChanged);
7634  }
7635  }
7636 
7637  VkResult result = VK_SUCCESS;
7638 
7639  // ======== Main processing.
7640 
7641  VkDeviceSize maxBytesToMove = SIZE_MAX;
7642  uint32_t maxAllocationsToMove = UINT32_MAX;
7643  if(pDefragmentationInfo != VMA_NULL)
7644  {
7645  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7646  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7647  }
7648 
7649  // Process standard memory.
7650  for(uint32_t memTypeIndex = 0;
7651  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7652  ++memTypeIndex)
7653  {
7654  // Only HOST_VISIBLE memory types can be defragmented.
7655  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7656  {
7657  result = m_pBlockVectors[memTypeIndex]->Defragment(
7658  pDefragmentationStats,
7659  maxBytesToMove,
7660  maxAllocationsToMove);
7661  }
7662  }
7663 
7664  // Process custom pools.
7665  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7666  {
7667  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7668  pDefragmentationStats,
7669  maxBytesToMove,
7670  maxAllocationsToMove);
7671  }
7672 
7673  // ======== Destroy defragmentators.
7674 
7675  // Process custom pools.
7676  for(size_t poolIndex = poolCount; poolIndex--; )
7677  {
7678  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7679  }
7680 
7681  // Process standard memory.
7682  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7683  {
7684  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7685  {
7686  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7687  }
7688  }
7689 
7690  return result;
7691 }
7692 
7693 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7694 {
7695  if(hAllocation->CanBecomeLost())
7696  {
7697  /*
7698  Warning: This is a carefully designed algorithm.
7699  Do not modify unless you really know what you're doing :)
7700  */
7701  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7702  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7703  for(;;)
7704  {
7705  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7706  {
7707  pAllocationInfo->memoryType = UINT32_MAX;
7708  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7709  pAllocationInfo->offset = 0;
7710  pAllocationInfo->size = hAllocation->GetSize();
7711  pAllocationInfo->pMappedData = VMA_NULL;
7712  pAllocationInfo->pUserData = hAllocation->GetUserData();
7713  return;
7714  }
7715  else if(localLastUseFrameIndex == localCurrFrameIndex)
7716  {
7717  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7718  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7719  pAllocationInfo->offset = hAllocation->GetOffset();
7720  pAllocationInfo->size = hAllocation->GetSize();
7721  pAllocationInfo->pMappedData = VMA_NULL;
7722  pAllocationInfo->pUserData = hAllocation->GetUserData();
7723  return;
7724  }
7725  else // Last use time earlier than current time.
7726  {
7727  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7728  {
7729  localLastUseFrameIndex = localCurrFrameIndex;
7730  }
7731  }
7732  }
7733  }
7734  else
7735  {
7736  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7737  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7738  pAllocationInfo->offset = hAllocation->GetOffset();
7739  pAllocationInfo->size = hAllocation->GetSize();
7740  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7741  pAllocationInfo->pUserData = hAllocation->GetUserData();
7742  }
7743 }
7744 
7745 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7746 {
7747  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7748 
7749  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7750 
7751  if(newCreateInfo.maxBlockCount == 0)
7752  {
7753  newCreateInfo.maxBlockCount = SIZE_MAX;
7754  }
7755  if(newCreateInfo.blockSize == 0)
7756  {
7757  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7758  }
7759 
7760  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7761 
7762  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7763  if(res != VK_SUCCESS)
7764  {
7765  vma_delete(this, *pPool);
7766  *pPool = VMA_NULL;
7767  return res;
7768  }
7769 
7770  // Add to m_Pools.
7771  {
7772  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7773  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7774  }
7775 
7776  return VK_SUCCESS;
7777 }
7778 
7779 void VmaAllocator_T::DestroyPool(VmaPool pool)
7780 {
7781  // Remove from m_Pools.
7782  {
7783  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7784  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7785  VMA_ASSERT(success && "Pool not found in Allocator.");
7786  }
7787 
7788  vma_delete(this, pool);
7789 }
7790 
7791 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7792 {
7793  pool->m_BlockVector.GetPoolStats(pPoolStats);
7794 }
7795 
7796 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7797 {
7798  m_CurrentFrameIndex.store(frameIndex);
7799 }
7800 
7801 void VmaAllocator_T::MakePoolAllocationsLost(
7802  VmaPool hPool,
7803  size_t* pLostAllocationCount)
7804 {
7805  hPool->m_BlockVector.MakePoolAllocationsLost(
7806  m_CurrentFrameIndex.load(),
7807  pLostAllocationCount);
7808 }
7809 
7810 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7811 {
7812  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7813  (*pAllocation)->InitLost();
7814 }
7815 
7816 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7817 {
7818  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7819 
7820  VkResult res;
7821  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7822  {
7823  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7824  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7825  {
7826  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7827  if(res == VK_SUCCESS)
7828  {
7829  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7830  }
7831  }
7832  else
7833  {
7834  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7835  }
7836  }
7837  else
7838  {
7839  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7840  }
7841 
7842  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7843  {
7844  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7845  }
7846 
7847  return res;
7848 }
7849 
7850 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7851 {
7852  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7853  {
7854  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7855  }
7856 
7857  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7858 
7859  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7860  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7861  {
7862  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7863  m_HeapSizeLimit[heapIndex] += size;
7864  }
7865 }
7866 
7867 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7868 {
7869  if(hAllocation->CanBecomeLost())
7870  {
7871  return VK_ERROR_MEMORY_MAP_FAILED;
7872  }
7873 
7874  switch(hAllocation->GetType())
7875  {
7876  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7877  {
7878  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7879  char *pBytes = VMA_NULL;
7880  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
7881  if(res == VK_SUCCESS)
7882  {
7883  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7884  hAllocation->BlockAllocMap();
7885  }
7886  return res;
7887  }
7888  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7889  return hAllocation->DedicatedAllocMap(this, ppData);
7890  default:
7891  VMA_ASSERT(0);
7892  return VK_ERROR_MEMORY_MAP_FAILED;
7893  }
7894 }
7895 
7896 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7897 {
7898  switch(hAllocation->GetType())
7899  {
7900  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7901  {
7902  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7903  hAllocation->BlockAllocUnmap();
7904  pBlock->Unmap(this, 1);
7905  }
7906  break;
7907  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7908  hAllocation->DedicatedAllocUnmap(this);
7909  break;
7910  default:
7911  VMA_ASSERT(0);
7912  }
7913 }
7914 
7915 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7916 {
7917  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7918 
7919  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7920  {
7921  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7922  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7923  VMA_ASSERT(pDedicatedAllocations);
7924  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
7925  VMA_ASSERT(success);
7926  }
7927 
7928  VkDeviceMemory hMemory = allocation->GetMemory();
7929 
7930  if(allocation->GetMappedData() != VMA_NULL)
7931  {
7932  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
7933  }
7934 
7935  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
7936 
7937  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
7938 }
7939 
7940 #if VMA_STATS_STRING_ENABLED
7941 
7942 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
7943 {
7944  bool dedicatedAllocationsStarted = false;
7945  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7946  {
7947  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7948  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7949  VMA_ASSERT(pDedicatedAllocVector);
7950  if(pDedicatedAllocVector->empty() == false)
7951  {
7952  if(dedicatedAllocationsStarted == false)
7953  {
7954  dedicatedAllocationsStarted = true;
7955  json.WriteString("DedicatedAllocations");
7956  json.BeginObject();
7957  }
7958 
7959  json.BeginString("Type ");
7960  json.ContinueString(memTypeIndex);
7961  json.EndString();
7962 
7963  json.BeginArray();
7964 
7965  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
7966  {
7967  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
7968  json.BeginObject(true);
7969 
7970  json.WriteString("Type");
7971  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
7972 
7973  json.WriteString("Size");
7974  json.WriteNumber(hAlloc->GetSize());
7975 
7976  const void* pUserData = hAlloc->GetUserData();
7977  if(pUserData != VMA_NULL)
7978  {
7979  json.WriteString("UserData");
7980  if(hAlloc->IsUserDataString())
7981  {
7982  json.WriteString((const char*)pUserData);
7983  }
7984  else
7985  {
7986  json.BeginString();
7987  json.ContinueString_Pointer(pUserData);
7988  json.EndString();
7989  }
7990  }
7991 
7992  json.EndObject();
7993  }
7994 
7995  json.EndArray();
7996  }
7997  }
7998  if(dedicatedAllocationsStarted)
7999  {
8000  json.EndObject();
8001  }
8002 
8003  {
8004  bool allocationsStarted = false;
8005  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8006  {
8007  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8008  {
8009  if(allocationsStarted == false)
8010  {
8011  allocationsStarted = true;
8012  json.WriteString("DefaultPools");
8013  json.BeginObject();
8014  }
8015 
8016  json.BeginString("Type ");
8017  json.ContinueString(memTypeIndex);
8018  json.EndString();
8019 
8020  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8021  }
8022  }
8023  if(allocationsStarted)
8024  {
8025  json.EndObject();
8026  }
8027  }
8028 
8029  {
8030  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8031  const size_t poolCount = m_Pools.size();
8032  if(poolCount > 0)
8033  {
8034  json.WriteString("Pools");
8035  json.BeginArray();
8036  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8037  {
8038  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8039  }
8040  json.EndArray();
8041  }
8042  }
8043 }
8044 
8045 #endif // #if VMA_STATS_STRING_ENABLED
8046 
8047 static VkResult AllocateMemoryForImage(
8048  VmaAllocator allocator,
8049  VkImage image,
8050  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8051  VmaSuballocationType suballocType,
8052  VmaAllocation* pAllocation)
8053 {
8054  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8055 
8056  VkMemoryRequirements vkMemReq = {};
8057  bool requiresDedicatedAllocation = false;
8058  bool prefersDedicatedAllocation = false;
8059  allocator->GetImageMemoryRequirements(image, vkMemReq,
8060  requiresDedicatedAllocation, prefersDedicatedAllocation);
8061 
8062  return allocator->AllocateMemory(
8063  vkMemReq,
8064  requiresDedicatedAllocation,
8065  prefersDedicatedAllocation,
8066  VK_NULL_HANDLE, // dedicatedBuffer
8067  image, // dedicatedImage
8068  *pAllocationCreateInfo,
8069  suballocType,
8070  pAllocation);
8071 }
8072 
8074 // Public interface
8075 
8076 VkResult vmaCreateAllocator(
8077  const VmaAllocatorCreateInfo* pCreateInfo,
8078  VmaAllocator* pAllocator)
8079 {
8080  VMA_ASSERT(pCreateInfo && pAllocator);
8081  VMA_DEBUG_LOG("vmaCreateAllocator");
8082  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8083  return VK_SUCCESS;
8084 }
8085 
8086 void vmaDestroyAllocator(
8087  VmaAllocator allocator)
8088 {
8089  if(allocator != VK_NULL_HANDLE)
8090  {
8091  VMA_DEBUG_LOG("vmaDestroyAllocator");
8092  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8093  vma_delete(&allocationCallbacks, allocator);
8094  }
8095 }
8096 
8098  VmaAllocator allocator,
8099  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8100 {
8101  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8102  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8103 }
8104 
8106  VmaAllocator allocator,
8107  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8108 {
8109  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8110  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8111 }
8112 
8114  VmaAllocator allocator,
8115  uint32_t memoryTypeIndex,
8116  VkMemoryPropertyFlags* pFlags)
8117 {
8118  VMA_ASSERT(allocator && pFlags);
8119  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8120  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8121 }
8122 
8124  VmaAllocator allocator,
8125  uint32_t frameIndex)
8126 {
8127  VMA_ASSERT(allocator);
8128  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8129 
8130  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8131 
8132  allocator->SetCurrentFrameIndex(frameIndex);
8133 }
8134 
8135 void vmaCalculateStats(
8136  VmaAllocator allocator,
8137  VmaStats* pStats)
8138 {
8139  VMA_ASSERT(allocator && pStats);
8140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8141  allocator->CalculateStats(pStats);
8142 }
8143 
8144 #if VMA_STATS_STRING_ENABLED
8145 
8146 void vmaBuildStatsString(
8147  VmaAllocator allocator,
8148  char** ppStatsString,
8149  VkBool32 detailedMap)
8150 {
8151  VMA_ASSERT(allocator && ppStatsString);
8152  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8153 
8154  VmaStringBuilder sb(allocator);
8155  {
8156  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8157  json.BeginObject();
8158 
8159  VmaStats stats;
8160  allocator->CalculateStats(&stats);
8161 
8162  json.WriteString("Total");
8163  VmaPrintStatInfo(json, stats.total);
8164 
8165  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8166  {
8167  json.BeginString("Heap ");
8168  json.ContinueString(heapIndex);
8169  json.EndString();
8170  json.BeginObject();
8171 
8172  json.WriteString("Size");
8173  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8174 
8175  json.WriteString("Flags");
8176  json.BeginArray(true);
8177  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8178  {
8179  json.WriteString("DEVICE_LOCAL");
8180  }
8181  json.EndArray();
8182 
8183  if(stats.memoryHeap[heapIndex].blockCount > 0)
8184  {
8185  json.WriteString("Stats");
8186  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8187  }
8188 
8189  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8190  {
8191  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8192  {
8193  json.BeginString("Type ");
8194  json.ContinueString(typeIndex);
8195  json.EndString();
8196 
8197  json.BeginObject();
8198 
8199  json.WriteString("Flags");
8200  json.BeginArray(true);
8201  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8202  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8203  {
8204  json.WriteString("DEVICE_LOCAL");
8205  }
8206  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8207  {
8208  json.WriteString("HOST_VISIBLE");
8209  }
8210  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8211  {
8212  json.WriteString("HOST_COHERENT");
8213  }
8214  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8215  {
8216  json.WriteString("HOST_CACHED");
8217  }
8218  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8219  {
8220  json.WriteString("LAZILY_ALLOCATED");
8221  }
8222  json.EndArray();
8223 
8224  if(stats.memoryType[typeIndex].blockCount > 0)
8225  {
8226  json.WriteString("Stats");
8227  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8228  }
8229 
8230  json.EndObject();
8231  }
8232  }
8233 
8234  json.EndObject();
8235  }
8236  if(detailedMap == VK_TRUE)
8237  {
8238  allocator->PrintDetailedMap(json);
8239  }
8240 
8241  json.EndObject();
8242  }
8243 
8244  const size_t len = sb.GetLength();
8245  char* const pChars = vma_new_array(allocator, char, len + 1);
8246  if(len > 0)
8247  {
8248  memcpy(pChars, sb.GetData(), len);
8249  }
8250  pChars[len] = '\0';
8251  *ppStatsString = pChars;
8252 }
8253 
8254 void vmaFreeStatsString(
8255  VmaAllocator allocator,
8256  char* pStatsString)
8257 {
8258  if(pStatsString != VMA_NULL)
8259  {
8260  VMA_ASSERT(allocator);
8261  size_t len = strlen(pStatsString);
8262  vma_delete_array(allocator, pStatsString, len + 1);
8263  }
8264 }
8265 
8266 #endif // #if VMA_STATS_STRING_ENABLED
8267 
8268 /*
8269 This function is not protected by any mutex because it just reads immutable data.
8270 */
8271 VkResult vmaFindMemoryTypeIndex(
8272  VmaAllocator allocator,
8273  uint32_t memoryTypeBits,
8274  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8275  uint32_t* pMemoryTypeIndex)
8276 {
8277  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8278  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8279  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8280 
8281  if(pAllocationCreateInfo->memoryTypeBits != 0)
8282  {
8283  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8284  }
8285 
8286  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8287  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8288 
8289  // Convert usage to requiredFlags and preferredFlags.
8290  switch(pAllocationCreateInfo->usage)
8291  {
8293  break;
8295  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8296  break;
8298  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8299  break;
8301  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8302  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8303  break;
8305  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8306  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8307  break;
8308  default:
8309  break;
8310  }
8311 
8312  *pMemoryTypeIndex = UINT32_MAX;
8313  uint32_t minCost = UINT32_MAX;
8314  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8315  memTypeIndex < allocator->GetMemoryTypeCount();
8316  ++memTypeIndex, memTypeBit <<= 1)
8317  {
8318  // This memory type is acceptable according to memoryTypeBits bitmask.
8319  if((memTypeBit & memoryTypeBits) != 0)
8320  {
8321  const VkMemoryPropertyFlags currFlags =
8322  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8323  // This memory type contains requiredFlags.
8324  if((requiredFlags & ~currFlags) == 0)
8325  {
8326  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8327  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8328  // Remember memory type with lowest cost.
8329  if(currCost < minCost)
8330  {
8331  *pMemoryTypeIndex = memTypeIndex;
8332  if(currCost == 0)
8333  {
8334  return VK_SUCCESS;
8335  }
8336  minCost = currCost;
8337  }
8338  }
8339  }
8340  }
8341  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8342 }
8343 
8344 VkResult vmaCreatePool(
8345  VmaAllocator allocator,
8346  const VmaPoolCreateInfo* pCreateInfo,
8347  VmaPool* pPool)
8348 {
8349  VMA_ASSERT(allocator && pCreateInfo && pPool);
8350 
8351  VMA_DEBUG_LOG("vmaCreatePool");
8352 
8353  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8354 
8355  return allocator->CreatePool(pCreateInfo, pPool);
8356 }
8357 
8358 void vmaDestroyPool(
8359  VmaAllocator allocator,
8360  VmaPool pool)
8361 {
8362  VMA_ASSERT(allocator);
8363 
8364  if(pool == VK_NULL_HANDLE)
8365  {
8366  return;
8367  }
8368 
8369  VMA_DEBUG_LOG("vmaDestroyPool");
8370 
8371  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8372 
8373  allocator->DestroyPool(pool);
8374 }
8375 
8376 void vmaGetPoolStats(
8377  VmaAllocator allocator,
8378  VmaPool pool,
8379  VmaPoolStats* pPoolStats)
8380 {
8381  VMA_ASSERT(allocator && pool && pPoolStats);
8382 
8383  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8384 
8385  allocator->GetPoolStats(pool, pPoolStats);
8386 }
8387 
8389  VmaAllocator allocator,
8390  VmaPool pool,
8391  size_t* pLostAllocationCount)
8392 {
8393  VMA_ASSERT(allocator && pool);
8394 
8395  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8396 
8397  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8398 }
8399 
8400 VkResult vmaAllocateMemory(
8401  VmaAllocator allocator,
8402  const VkMemoryRequirements* pVkMemoryRequirements,
8403  const VmaAllocationCreateInfo* pCreateInfo,
8404  VmaAllocation* pAllocation,
8405  VmaAllocationInfo* pAllocationInfo)
8406 {
8407  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8408 
8409  VMA_DEBUG_LOG("vmaAllocateMemory");
8410 
8411  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8412 
8413  VkResult result = allocator->AllocateMemory(
8414  *pVkMemoryRequirements,
8415  false, // requiresDedicatedAllocation
8416  false, // prefersDedicatedAllocation
8417  VK_NULL_HANDLE, // dedicatedBuffer
8418  VK_NULL_HANDLE, // dedicatedImage
8419  *pCreateInfo,
8420  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8421  pAllocation);
8422 
8423  if(pAllocationInfo && result == VK_SUCCESS)
8424  {
8425  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8426  }
8427 
8428  return result;
8429 }
8430 
8432  VmaAllocator allocator,
8433  VkBuffer buffer,
8434  const VmaAllocationCreateInfo* pCreateInfo,
8435  VmaAllocation* pAllocation,
8436  VmaAllocationInfo* pAllocationInfo)
8437 {
8438  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8439 
8440  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8441 
8442  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8443 
8444  VkMemoryRequirements vkMemReq = {};
8445  bool requiresDedicatedAllocation = false;
8446  bool prefersDedicatedAllocation = false;
8447  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8448  requiresDedicatedAllocation,
8449  prefersDedicatedAllocation);
8450 
8451  VkResult result = allocator->AllocateMemory(
8452  vkMemReq,
8453  requiresDedicatedAllocation,
8454  prefersDedicatedAllocation,
8455  buffer, // dedicatedBuffer
8456  VK_NULL_HANDLE, // dedicatedImage
8457  *pCreateInfo,
8458  VMA_SUBALLOCATION_TYPE_BUFFER,
8459  pAllocation);
8460 
8461  if(pAllocationInfo && result == VK_SUCCESS)
8462  {
8463  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8464  }
8465 
8466  return result;
8467 }
8468 
8469 VkResult vmaAllocateMemoryForImage(
8470  VmaAllocator allocator,
8471  VkImage image,
8472  const VmaAllocationCreateInfo* pCreateInfo,
8473  VmaAllocation* pAllocation,
8474  VmaAllocationInfo* pAllocationInfo)
8475 {
8476  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8477 
8478  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8479 
8480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8481 
8482  VkResult result = AllocateMemoryForImage(
8483  allocator,
8484  image,
8485  pCreateInfo,
8486  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8487  pAllocation);
8488 
8489  if(pAllocationInfo && result == VK_SUCCESS)
8490  {
8491  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8492  }
8493 
8494  return result;
8495 }
8496 
8497 void vmaFreeMemory(
8498  VmaAllocator allocator,
8499  VmaAllocation allocation)
8500 {
8501  VMA_ASSERT(allocator && allocation);
8502 
8503  VMA_DEBUG_LOG("vmaFreeMemory");
8504 
8505  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8506 
8507  allocator->FreeMemory(allocation);
8508 }
8509 
8511  VmaAllocator allocator,
8512  VmaAllocation allocation,
8513  VmaAllocationInfo* pAllocationInfo)
8514 {
8515  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8516 
8517  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8518 
8519  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8520 }
8521 
8523  VmaAllocator allocator,
8524  VmaAllocation allocation,
8525  void* pUserData)
8526 {
8527  VMA_ASSERT(allocator && allocation);
8528 
8529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8530 
8531  allocation->SetUserData(allocator, pUserData);
8532 }
8533 
8535  VmaAllocator allocator,
8536  VmaAllocation* pAllocation)
8537 {
8538  VMA_ASSERT(allocator && pAllocation);
8539 
8540  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8541 
8542  allocator->CreateLostAllocation(pAllocation);
8543 }
8544 
8545 VkResult vmaMapMemory(
8546  VmaAllocator allocator,
8547  VmaAllocation allocation,
8548  void** ppData)
8549 {
8550  VMA_ASSERT(allocator && allocation && ppData);
8551 
8552  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8553 
8554  return allocator->Map(allocation, ppData);
8555 }
8556 
8557 void vmaUnmapMemory(
8558  VmaAllocator allocator,
8559  VmaAllocation allocation)
8560 {
8561  VMA_ASSERT(allocator && allocation);
8562 
8563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8564 
8565  allocator->Unmap(allocation);
8566 }
8567 
8568 VkResult vmaDefragment(
8569  VmaAllocator allocator,
8570  VmaAllocation* pAllocations,
8571  size_t allocationCount,
8572  VkBool32* pAllocationsChanged,
8573  const VmaDefragmentationInfo *pDefragmentationInfo,
8574  VmaDefragmentationStats* pDefragmentationStats)
8575 {
8576  VMA_ASSERT(allocator && pAllocations);
8577 
8578  VMA_DEBUG_LOG("vmaDefragment");
8579 
8580  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8581 
8582  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8583 }
8584 
8585 VkResult vmaCreateBuffer(
8586  VmaAllocator allocator,
8587  const VkBufferCreateInfo* pBufferCreateInfo,
8588  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8589  VkBuffer* pBuffer,
8590  VmaAllocation* pAllocation,
8591  VmaAllocationInfo* pAllocationInfo)
8592 {
8593  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8594 
8595  VMA_DEBUG_LOG("vmaCreateBuffer");
8596 
8597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8598 
8599  *pBuffer = VK_NULL_HANDLE;
8600  *pAllocation = VK_NULL_HANDLE;
8601 
8602  // 1. Create VkBuffer.
8603  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8604  allocator->m_hDevice,
8605  pBufferCreateInfo,
8606  allocator->GetAllocationCallbacks(),
8607  pBuffer);
8608  if(res >= 0)
8609  {
8610  // 2. vkGetBufferMemoryRequirements.
8611  VkMemoryRequirements vkMemReq = {};
8612  bool requiresDedicatedAllocation = false;
8613  bool prefersDedicatedAllocation = false;
8614  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8615  requiresDedicatedAllocation, prefersDedicatedAllocation);
8616 
8617  // Make sure alignment requirements for specific buffer usages reported
8618  // in Physical Device Properties are included in alignment reported by memory requirements.
8619  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8620  {
8621  VMA_ASSERT(vkMemReq.alignment %
8622  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8623  }
8624  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8625  {
8626  VMA_ASSERT(vkMemReq.alignment %
8627  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8628  }
8629  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8630  {
8631  VMA_ASSERT(vkMemReq.alignment %
8632  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8633  }
8634 
8635  // 3. Allocate memory using allocator.
8636  res = allocator->AllocateMemory(
8637  vkMemReq,
8638  requiresDedicatedAllocation,
8639  prefersDedicatedAllocation,
8640  *pBuffer, // dedicatedBuffer
8641  VK_NULL_HANDLE, // dedicatedImage
8642  *pAllocationCreateInfo,
8643  VMA_SUBALLOCATION_TYPE_BUFFER,
8644  pAllocation);
8645  if(res >= 0)
8646  {
8647  // 3. Bind buffer with memory.
8648  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8649  allocator->m_hDevice,
8650  *pBuffer,
8651  (*pAllocation)->GetMemory(),
8652  (*pAllocation)->GetOffset());
8653  if(res >= 0)
8654  {
8655  // All steps succeeded.
8656  if(pAllocationInfo != VMA_NULL)
8657  {
8658  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8659  }
8660  return VK_SUCCESS;
8661  }
8662  allocator->FreeMemory(*pAllocation);
8663  *pAllocation = VK_NULL_HANDLE;
8664  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8665  *pBuffer = VK_NULL_HANDLE;
8666  return res;
8667  }
8668  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8669  *pBuffer = VK_NULL_HANDLE;
8670  return res;
8671  }
8672  return res;
8673 }
8674 
8675 void vmaDestroyBuffer(
8676  VmaAllocator allocator,
8677  VkBuffer buffer,
8678  VmaAllocation allocation)
8679 {
8680  if(buffer != VK_NULL_HANDLE)
8681  {
8682  VMA_ASSERT(allocator);
8683 
8684  VMA_DEBUG_LOG("vmaDestroyBuffer");
8685 
8686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8687 
8688  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8689 
8690  allocator->FreeMemory(allocation);
8691  }
8692 }
8693 
8694 VkResult vmaCreateImage(
8695  VmaAllocator allocator,
8696  const VkImageCreateInfo* pImageCreateInfo,
8697  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8698  VkImage* pImage,
8699  VmaAllocation* pAllocation,
8700  VmaAllocationInfo* pAllocationInfo)
8701 {
8702  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8703 
8704  VMA_DEBUG_LOG("vmaCreateImage");
8705 
8706  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8707 
8708  *pImage = VK_NULL_HANDLE;
8709  *pAllocation = VK_NULL_HANDLE;
8710 
8711  // 1. Create VkImage.
8712  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8713  allocator->m_hDevice,
8714  pImageCreateInfo,
8715  allocator->GetAllocationCallbacks(),
8716  pImage);
8717  if(res >= 0)
8718  {
8719  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8720  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8721  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8722 
8723  // 2. Allocate memory using allocator.
8724  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8725  if(res >= 0)
8726  {
8727  // 3. Bind image with memory.
8728  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8729  allocator->m_hDevice,
8730  *pImage,
8731  (*pAllocation)->GetMemory(),
8732  (*pAllocation)->GetOffset());
8733  if(res >= 0)
8734  {
8735  // All steps succeeded.
8736  if(pAllocationInfo != VMA_NULL)
8737  {
8738  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8739  }
8740  return VK_SUCCESS;
8741  }
8742  allocator->FreeMemory(*pAllocation);
8743  *pAllocation = VK_NULL_HANDLE;
8744  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8745  *pImage = VK_NULL_HANDLE;
8746  return res;
8747  }
8748  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8749  *pImage = VK_NULL_HANDLE;
8750  return res;
8751  }
8752  return res;
8753 }
8754 
8755 void vmaDestroyImage(
8756  VmaAllocator allocator,
8757  VkImage image,
8758  VmaAllocation allocation)
8759 {
8760  if(image != VK_NULL_HANDLE)
8761  {
8762  VMA_ASSERT(allocator);
8763 
8764  VMA_DEBUG_LOG("vmaDestroyImage");
8765 
8766  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8767 
8768  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8769 
8770  allocator->FreeMemory(allocation);
8771  }
8772 }
8773 
8774 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:896
+Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
826 #include <vulkan/vulkan.h>
827 
828 VK_DEFINE_HANDLE(VmaAllocator)
829 
830 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
832  VmaAllocator allocator,
833  uint32_t memoryType,
834  VkDeviceMemory memory,
835  VkDeviceSize size);
837 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
838  VmaAllocator allocator,
839  uint32_t memoryType,
840  VkDeviceMemory memory,
841  VkDeviceSize size);
842 
850 typedef struct VmaDeviceMemoryCallbacks {
856 
886 
889 typedef VkFlags VmaAllocatorCreateFlags;
890 
895 typedef struct VmaVulkanFunctions {
896  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
897  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
898  PFN_vkAllocateMemory vkAllocateMemory;
899  PFN_vkFreeMemory vkFreeMemory;
900  PFN_vkMapMemory vkMapMemory;
901  PFN_vkUnmapMemory vkUnmapMemory;
902  PFN_vkBindBufferMemory vkBindBufferMemory;
903  PFN_vkBindImageMemory vkBindImageMemory;
904  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
905  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
906  PFN_vkCreateBuffer vkCreateBuffer;
907  PFN_vkDestroyBuffer vkDestroyBuffer;
908  PFN_vkCreateImage vkCreateImage;
909  PFN_vkDestroyImage vkDestroyImage;
910  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
911  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
913 
916 {
918  VmaAllocatorCreateFlags flags;
920 
921  VkPhysicalDevice physicalDevice;
923 
924  VkDevice device;
926 
929 
930  const VkAllocationCallbacks* pAllocationCallbacks;
932 
947  uint32_t frameInUseCount;
971  const VkDeviceSize* pHeapSizeLimit;
985 
987 VkResult vmaCreateAllocator(
988  const VmaAllocatorCreateInfo* pCreateInfo,
989  VmaAllocator* pAllocator);
990 
993  VmaAllocator allocator);
994 
1000  VmaAllocator allocator,
1001  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1002 
1008  VmaAllocator allocator,
1009  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1010 
1018  VmaAllocator allocator,
1019  uint32_t memoryTypeIndex,
1020  VkMemoryPropertyFlags* pFlags);
1021 
1031  VmaAllocator allocator,
1032  uint32_t frameIndex);
1033 
1036 typedef struct VmaStatInfo
1037 {
1039  uint32_t blockCount;
1045  VkDeviceSize usedBytes;
1047  VkDeviceSize unusedBytes;
1048  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1049  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1050 } VmaStatInfo;
1051 
1053 typedef struct VmaStats
1054 {
1055  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1056  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1058 } VmaStats;
1059 
1061 void vmaCalculateStats(
1062  VmaAllocator allocator,
1063  VmaStats* pStats);
1064 
1065 #define VMA_STATS_STRING_ENABLED 1
1066 
1067 #if VMA_STATS_STRING_ENABLED
1068 
1070 
1072 void vmaBuildStatsString(
1073  VmaAllocator allocator,
1074  char** ppStatsString,
1075  VkBool32 detailedMap);
1076 
1077 void vmaFreeStatsString(
1078  VmaAllocator allocator,
1079  char* pStatsString);
1080 
1081 #endif // #if VMA_STATS_STRING_ENABLED
1082 
1083 VK_DEFINE_HANDLE(VmaPool)
1084 
1085 typedef enum VmaMemoryUsage
1086 {
1135 } VmaMemoryUsage;
1136 
1151 
1201 
1205 
1207 {
1209  VmaAllocationCreateFlags flags;
1220  VkMemoryPropertyFlags requiredFlags;
1225  VkMemoryPropertyFlags preferredFlags;
1233  uint32_t memoryTypeBits;
1239  VmaPool pool;
1246  void* pUserData;
1248 
1265 VkResult vmaFindMemoryTypeIndex(
1266  VmaAllocator allocator,
1267  uint32_t memoryTypeBits,
1268  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1269  uint32_t* pMemoryTypeIndex);
1270 
1284  VmaAllocator allocator,
1285  const VkBufferCreateInfo* pBufferCreateInfo,
1286  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1287  uint32_t* pMemoryTypeIndex);
1288 
1302  VmaAllocator allocator,
1303  const VkImageCreateInfo* pImageCreateInfo,
1304  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1305  uint32_t* pMemoryTypeIndex);
1306 
1327 
1330 typedef VkFlags VmaPoolCreateFlags;
1331 
1334 typedef struct VmaPoolCreateInfo {
1340  VmaPoolCreateFlags flags;
1345  VkDeviceSize blockSize;
1374 
1377 typedef struct VmaPoolStats {
1380  VkDeviceSize size;
1383  VkDeviceSize unusedSize;
1396  VkDeviceSize unusedRangeSizeMax;
1397 } VmaPoolStats;
1398 
1405 VkResult vmaCreatePool(
1406  VmaAllocator allocator,
1407  const VmaPoolCreateInfo* pCreateInfo,
1408  VmaPool* pPool);
1409 
1412 void vmaDestroyPool(
1413  VmaAllocator allocator,
1414  VmaPool pool);
1415 
1422 void vmaGetPoolStats(
1423  VmaAllocator allocator,
1424  VmaPool pool,
1425  VmaPoolStats* pPoolStats);
1426 
1434  VmaAllocator allocator,
1435  VmaPool pool,
1436  size_t* pLostAllocationCount);
1437 
1438 VK_DEFINE_HANDLE(VmaAllocation)
1439 
1440 
1442 typedef struct VmaAllocationInfo {
1447  uint32_t memoryType;
1456  VkDeviceMemory deviceMemory;
1461  VkDeviceSize offset;
1466  VkDeviceSize size;
1480  void* pUserData;
1482 
1493 VkResult vmaAllocateMemory(
1494  VmaAllocator allocator,
1495  const VkMemoryRequirements* pVkMemoryRequirements,
1496  const VmaAllocationCreateInfo* pCreateInfo,
1497  VmaAllocation* pAllocation,
1498  VmaAllocationInfo* pAllocationInfo);
1499 
1507  VmaAllocator allocator,
1508  VkBuffer buffer,
1509  const VmaAllocationCreateInfo* pCreateInfo,
1510  VmaAllocation* pAllocation,
1511  VmaAllocationInfo* pAllocationInfo);
1512 
1514 VkResult vmaAllocateMemoryForImage(
1515  VmaAllocator allocator,
1516  VkImage image,
1517  const VmaAllocationCreateInfo* pCreateInfo,
1518  VmaAllocation* pAllocation,
1519  VmaAllocationInfo* pAllocationInfo);
1520 
1522 void vmaFreeMemory(
1523  VmaAllocator allocator,
1524  VmaAllocation allocation);
1525 
1531  VmaAllocator allocator,
1532  VmaAllocation allocation,
1533  VmaAllocationInfo* pAllocationInfo);
1534 
1537 bool vmaTouchAllocation(
1538  VmaAllocator allocator,
1539  VmaAllocation allocation);
1540 
1555  VmaAllocator allocator,
1556  VmaAllocation allocation,
1557  void* pUserData);
1558 
1570  VmaAllocator allocator,
1571  VmaAllocation* pAllocation);
1572 
1607 VkResult vmaMapMemory(
1608  VmaAllocator allocator,
1609  VmaAllocation allocation,
1610  void** ppData);
1611 
1616 void vmaUnmapMemory(
1617  VmaAllocator allocator,
1618  VmaAllocation allocation);
1619 
1621 typedef struct VmaDefragmentationInfo {
1626  VkDeviceSize maxBytesToMove;
1633 
1635 typedef struct VmaDefragmentationStats {
1637  VkDeviceSize bytesMoved;
1639  VkDeviceSize bytesFreed;
1645 
1728 VkResult vmaDefragment(
1729  VmaAllocator allocator,
1730  VmaAllocation* pAllocations,
1731  size_t allocationCount,
1732  VkBool32* pAllocationsChanged,
1733  const VmaDefragmentationInfo *pDefragmentationInfo,
1734  VmaDefragmentationStats* pDefragmentationStats);
1735 
1762 VkResult vmaCreateBuffer(
1763  VmaAllocator allocator,
1764  const VkBufferCreateInfo* pBufferCreateInfo,
1765  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1766  VkBuffer* pBuffer,
1767  VmaAllocation* pAllocation,
1768  VmaAllocationInfo* pAllocationInfo);
1769 
1781 void vmaDestroyBuffer(
1782  VmaAllocator allocator,
1783  VkBuffer buffer,
1784  VmaAllocation allocation);
1785 
1787 VkResult vmaCreateImage(
1788  VmaAllocator allocator,
1789  const VkImageCreateInfo* pImageCreateInfo,
1790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1791  VkImage* pImage,
1792  VmaAllocation* pAllocation,
1793  VmaAllocationInfo* pAllocationInfo);
1794 
1806 void vmaDestroyImage(
1807  VmaAllocator allocator,
1808  VkImage image,
1809  VmaAllocation allocation);
1810 
1811 #ifdef __cplusplus
1812 }
1813 #endif
1814 
1815 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
1816 
1817 // For Visual Studio IntelliSense.
1818 #ifdef __INTELLISENSE__
1819 #define VMA_IMPLEMENTATION
1820 #endif
1821 
1822 #ifdef VMA_IMPLEMENTATION
1823 #undef VMA_IMPLEMENTATION
1824 
1825 #include <cstdint>
1826 #include <cstdlib>
1827 #include <cstring>
1828 
1829 /*******************************************************************************
1830 CONFIGURATION SECTION
1831 
1832 Define some of these macros before each #include of this header or change them
1833 here if you need other then default behavior depending on your environment.
1834 */
1835 
1836 /*
1837 Define this macro to 1 to make the library fetch pointers to Vulkan functions
1838 internally, like:
1839 
1840  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
1841 
1842 Define to 0 if you are going to provide you own pointers to Vulkan functions via
1843 VmaAllocatorCreateInfo::pVulkanFunctions.
1844 */
1845 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
1846 #define VMA_STATIC_VULKAN_FUNCTIONS 1
1847 #endif
1848 
1849 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
1850 //#define VMA_USE_STL_CONTAINERS 1
1851 
1852 /* Set this macro to 1 to make the library including and using STL containers:
1853 std::pair, std::vector, std::list, std::unordered_map.
1854 
1855 Set it to 0 or undefined to make the library using its own implementation of
1856 the containers.
1857 */
1858 #if VMA_USE_STL_CONTAINERS
1859  #define VMA_USE_STL_VECTOR 1
1860  #define VMA_USE_STL_UNORDERED_MAP 1
1861  #define VMA_USE_STL_LIST 1
1862 #endif
1863 
1864 #if VMA_USE_STL_VECTOR
1865  #include <vector>
1866 #endif
1867 
1868 #if VMA_USE_STL_UNORDERED_MAP
1869  #include <unordered_map>
1870 #endif
1871 
1872 #if VMA_USE_STL_LIST
1873  #include <list>
1874 #endif
1875 
1876 /*
1877 Following headers are used in this CONFIGURATION section only, so feel free to
1878 remove them if not needed.
1879 */
1880 #include <cassert> // for assert
1881 #include <algorithm> // for min, max
1882 #include <mutex> // for std::mutex
1883 #include <atomic> // for std::atomic
1884 
1885 #if !defined(_WIN32) && !defined(__APPLE__)
1886  #include <malloc.h> // for aligned_alloc()
1887 #endif
1888 
1889 #ifndef VMA_NULL
1890  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
1891  #define VMA_NULL nullptr
1892 #endif
1893 
1894 #if defined(__APPLE__)
1895 #include <cstdlib>
1896 void *aligned_alloc(size_t alignment, size_t size)
1897 {
1898  // alignment must be >= sizeof(void*)
1899  if(alignment < sizeof(void*))
1900  {
1901  alignment = sizeof(void*);
1902  }
1903 
1904  void *pointer;
1905  if(posix_memalign(&pointer, alignment, size) == 0)
1906  return pointer;
1907  return VMA_NULL;
1908 }
1909 #endif
1910 
1911 // Normal assert to check for programmer's errors, especially in Debug configuration.
1912 #ifndef VMA_ASSERT
1913  #ifdef _DEBUG
1914  #define VMA_ASSERT(expr) assert(expr)
1915  #else
1916  #define VMA_ASSERT(expr)
1917  #endif
1918 #endif
1919 
1920 // Assert that will be called very often, like inside data structures e.g. operator[].
1921 // Making it non-empty can make program slow.
1922 #ifndef VMA_HEAVY_ASSERT
1923  #ifdef _DEBUG
1924  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
1925  #else
1926  #define VMA_HEAVY_ASSERT(expr)
1927  #endif
1928 #endif
1929 
1930 #ifndef VMA_ALIGN_OF
1931  #define VMA_ALIGN_OF(type) (__alignof(type))
1932 #endif
1933 
1934 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
1935  #if defined(_WIN32)
1936  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
1937  #else
1938  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
1939  #endif
1940 #endif
1941 
1942 #ifndef VMA_SYSTEM_FREE
1943  #if defined(_WIN32)
1944  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
1945  #else
1946  #define VMA_SYSTEM_FREE(ptr) free(ptr)
1947  #endif
1948 #endif
1949 
1950 #ifndef VMA_MIN
1951  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
1952 #endif
1953 
1954 #ifndef VMA_MAX
1955  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
1956 #endif
1957 
1958 #ifndef VMA_SWAP
1959  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
1960 #endif
1961 
1962 #ifndef VMA_SORT
1963  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
1964 #endif
1965 
1966 #ifndef VMA_DEBUG_LOG
1967  #define VMA_DEBUG_LOG(format, ...)
1968  /*
1969  #define VMA_DEBUG_LOG(format, ...) do { \
1970  printf(format, __VA_ARGS__); \
1971  printf("\n"); \
1972  } while(false)
1973  */
1974 #endif
1975 
1976 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
1977 #if VMA_STATS_STRING_ENABLED
1978  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
1979  {
1980  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
1981  }
1982  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
1983  {
1984  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
1985  }
1986  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
1987  {
1988  snprintf(outStr, strLen, "%p", ptr);
1989  }
1990 #endif
1991 
1992 #ifndef VMA_MUTEX
1993  class VmaMutex
1994  {
1995  public:
1996  VmaMutex() { }
1997  ~VmaMutex() { }
1998  void Lock() { m_Mutex.lock(); }
1999  void Unlock() { m_Mutex.unlock(); }
2000  private:
2001  std::mutex m_Mutex;
2002  };
2003  #define VMA_MUTEX VmaMutex
2004 #endif
2005 
2006 /*
2007 If providing your own implementation, you need to implement a subset of std::atomic:
2008 
2009 - Constructor(uint32_t desired)
2010 - uint32_t load() const
2011 - void store(uint32_t desired)
2012 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2013 */
2014 #ifndef VMA_ATOMIC_UINT32
2015  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2016 #endif
2017 
2018 #ifndef VMA_BEST_FIT
2019 
2031  #define VMA_BEST_FIT (1)
2032 #endif
2033 
2034 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2035 
2039  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2040 #endif
2041 
2042 #ifndef VMA_DEBUG_ALIGNMENT
2043 
2047  #define VMA_DEBUG_ALIGNMENT (1)
2048 #endif
2049 
2050 #ifndef VMA_DEBUG_MARGIN
2051 
2055  #define VMA_DEBUG_MARGIN (0)
2056 #endif
2057 
2058 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2059 
2063  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2064 #endif
2065 
2066 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2067 
2071  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2072 #endif
2073 
2074 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2075  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2077 #endif
2078 
2079 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2080  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2082 #endif
2083 
2084 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2085 
2086 /*******************************************************************************
2087 END OF CONFIGURATION
2088 */
2089 
2090 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2091  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2092 
2093 // Returns number of bits set to 1 in (v).
2094 static inline uint32_t VmaCountBitsSet(uint32_t v)
2095 {
2096  uint32_t c = v - ((v >> 1) & 0x55555555);
2097  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2098  c = ((c >> 4) + c) & 0x0F0F0F0F;
2099  c = ((c >> 8) + c) & 0x00FF00FF;
2100  c = ((c >> 16) + c) & 0x0000FFFF;
2101  return c;
2102 }
2103 
2104 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2105 // Use types like uint32_t, uint64_t as T.
2106 template <typename T>
2107 static inline T VmaAlignUp(T val, T align)
2108 {
2109  return (val + align - 1) / align * align;
2110 }
2111 
2112 // Division with mathematical rounding to nearest number.
2113 template <typename T>
2114 inline T VmaRoundDiv(T x, T y)
2115 {
2116  return (x + (y / (T)2)) / y;
2117 }
2118 
2119 #ifndef VMA_SORT
2120 
2121 template<typename Iterator, typename Compare>
2122 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2123 {
2124  Iterator centerValue = end; --centerValue;
2125  Iterator insertIndex = beg;
2126  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2127  {
2128  if(cmp(*memTypeIndex, *centerValue))
2129  {
2130  if(insertIndex != memTypeIndex)
2131  {
2132  VMA_SWAP(*memTypeIndex, *insertIndex);
2133  }
2134  ++insertIndex;
2135  }
2136  }
2137  if(insertIndex != centerValue)
2138  {
2139  VMA_SWAP(*insertIndex, *centerValue);
2140  }
2141  return insertIndex;
2142 }
2143 
2144 template<typename Iterator, typename Compare>
2145 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2146 {
2147  if(beg < end)
2148  {
2149  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2150  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2151  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2152  }
2153 }
2154 
2155 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2156 
2157 #endif // #ifndef VMA_SORT
2158 
2159 /*
2160 Returns true if two memory blocks occupy overlapping pages.
2161 ResourceA must be in less memory offset than ResourceB.
2162 
2163 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2164 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2165 */
2166 static inline bool VmaBlocksOnSamePage(
2167  VkDeviceSize resourceAOffset,
2168  VkDeviceSize resourceASize,
2169  VkDeviceSize resourceBOffset,
2170  VkDeviceSize pageSize)
2171 {
2172  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2173  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2174  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2175  VkDeviceSize resourceBStart = resourceBOffset;
2176  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2177  return resourceAEndPage == resourceBStartPage;
2178 }
2179 
2180 enum VmaSuballocationType
2181 {
2182  VMA_SUBALLOCATION_TYPE_FREE = 0,
2183  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2184  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2185  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2186  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2187  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2188  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2189 };
2190 
2191 /*
2192 Returns true if given suballocation types could conflict and must respect
2193 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2194 or linear image and another one is optimal image. If type is unknown, behave
2195 conservatively.
2196 */
2197 static inline bool VmaIsBufferImageGranularityConflict(
2198  VmaSuballocationType suballocType1,
2199  VmaSuballocationType suballocType2)
2200 {
2201  if(suballocType1 > suballocType2)
2202  {
2203  VMA_SWAP(suballocType1, suballocType2);
2204  }
2205 
2206  switch(suballocType1)
2207  {
2208  case VMA_SUBALLOCATION_TYPE_FREE:
2209  return false;
2210  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2211  return true;
2212  case VMA_SUBALLOCATION_TYPE_BUFFER:
2213  return
2214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2216  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2217  return
2218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2220  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2221  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2222  return
2223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2224  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2225  return false;
2226  default:
2227  VMA_ASSERT(0);
2228  return true;
2229  }
2230 }
2231 
2232 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2233 struct VmaMutexLock
2234 {
2235 public:
2236  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2237  m_pMutex(useMutex ? &mutex : VMA_NULL)
2238  {
2239  if(m_pMutex)
2240  {
2241  m_pMutex->Lock();
2242  }
2243  }
2244 
2245  ~VmaMutexLock()
2246  {
2247  if(m_pMutex)
2248  {
2249  m_pMutex->Unlock();
2250  }
2251  }
2252 
2253 private:
2254  VMA_MUTEX* m_pMutex;
2255 };
2256 
2257 #if VMA_DEBUG_GLOBAL_MUTEX
2258  static VMA_MUTEX gDebugGlobalMutex;
2259  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2260 #else
2261  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2262 #endif
2263 
2264 // Minimum size of a free suballocation to register it in the free suballocation collection.
2265 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2266 
2267 /*
2268 Performs binary search and returns iterator to first element that is greater or
2269 equal to (key), according to comparison (cmp).
2270 
2271 Cmp should return true if first argument is less than second argument.
2272 
2273 Returned value is the found element, if present in the collection or place where
2274 new element with value (key) should be inserted.
2275 */
2276 template <typename IterT, typename KeyT, typename CmpT>
2277 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2278 {
2279  size_t down = 0, up = (end - beg);
2280  while(down < up)
2281  {
2282  const size_t mid = (down + up) / 2;
2283  if(cmp(*(beg+mid), key))
2284  {
2285  down = mid + 1;
2286  }
2287  else
2288  {
2289  up = mid;
2290  }
2291  }
2292  return beg + down;
2293 }
2294 
2296 // Memory allocation
2297 
2298 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2299 {
2300  if((pAllocationCallbacks != VMA_NULL) &&
2301  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2302  {
2303  return (*pAllocationCallbacks->pfnAllocation)(
2304  pAllocationCallbacks->pUserData,
2305  size,
2306  alignment,
2307  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2308  }
2309  else
2310  {
2311  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2312  }
2313 }
2314 
2315 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2316 {
2317  if((pAllocationCallbacks != VMA_NULL) &&
2318  (pAllocationCallbacks->pfnFree != VMA_NULL))
2319  {
2320  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2321  }
2322  else
2323  {
2324  VMA_SYSTEM_FREE(ptr);
2325  }
2326 }
2327 
2328 template<typename T>
2329 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2330 {
2331  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2332 }
2333 
2334 template<typename T>
2335 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2336 {
2337  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2338 }
2339 
2340 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2341 
2342 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2343 
2344 template<typename T>
2345 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2346 {
2347  ptr->~T();
2348  VmaFree(pAllocationCallbacks, ptr);
2349 }
2350 
2351 template<typename T>
2352 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2353 {
2354  if(ptr != VMA_NULL)
2355  {
2356  for(size_t i = count; i--; )
2357  {
2358  ptr[i].~T();
2359  }
2360  VmaFree(pAllocationCallbacks, ptr);
2361  }
2362 }
2363 
2364 // STL-compatible allocator.
2365 template<typename T>
2366 class VmaStlAllocator
2367 {
2368 public:
2369  const VkAllocationCallbacks* const m_pCallbacks;
2370  typedef T value_type;
2371 
2372  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2373  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2374 
2375  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2376  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2377 
2378  template<typename U>
2379  bool operator==(const VmaStlAllocator<U>& rhs) const
2380  {
2381  return m_pCallbacks == rhs.m_pCallbacks;
2382  }
2383  template<typename U>
2384  bool operator!=(const VmaStlAllocator<U>& rhs) const
2385  {
2386  return m_pCallbacks != rhs.m_pCallbacks;
2387  }
2388 
2389  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2390 };
2391 
2392 #if VMA_USE_STL_VECTOR
2393 
2394 #define VmaVector std::vector
2395 
2396 template<typename T, typename allocatorT>
2397 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2398 {
2399  vec.insert(vec.begin() + index, item);
2400 }
2401 
2402 template<typename T, typename allocatorT>
2403 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2404 {
2405  vec.erase(vec.begin() + index);
2406 }
2407 
2408 #else // #if VMA_USE_STL_VECTOR
2409 
2410 /* Class with interface compatible with subset of std::vector.
2411 T must be POD because constructors and destructors are not called and memcpy is
2412 used for these objects. */
2413 template<typename T, typename AllocatorT>
2414 class VmaVector
2415 {
2416 public:
2417  typedef T value_type;
2418 
2419  VmaVector(const AllocatorT& allocator) :
2420  m_Allocator(allocator),
2421  m_pArray(VMA_NULL),
2422  m_Count(0),
2423  m_Capacity(0)
2424  {
2425  }
2426 
2427  VmaVector(size_t count, const AllocatorT& allocator) :
2428  m_Allocator(allocator),
2429  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2430  m_Count(count),
2431  m_Capacity(count)
2432  {
2433  }
2434 
2435  VmaVector(const VmaVector<T, AllocatorT>& src) :
2436  m_Allocator(src.m_Allocator),
2437  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2438  m_Count(src.m_Count),
2439  m_Capacity(src.m_Count)
2440  {
2441  if(m_Count != 0)
2442  {
2443  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2444  }
2445  }
2446 
2447  ~VmaVector()
2448  {
2449  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2450  }
2451 
2452  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2453  {
2454  if(&rhs != this)
2455  {
2456  resize(rhs.m_Count);
2457  if(m_Count != 0)
2458  {
2459  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2460  }
2461  }
2462  return *this;
2463  }
2464 
2465  bool empty() const { return m_Count == 0; }
2466  size_t size() const { return m_Count; }
2467  T* data() { return m_pArray; }
2468  const T* data() const { return m_pArray; }
2469 
2470  T& operator[](size_t index)
2471  {
2472  VMA_HEAVY_ASSERT(index < m_Count);
2473  return m_pArray[index];
2474  }
2475  const T& operator[](size_t index) const
2476  {
2477  VMA_HEAVY_ASSERT(index < m_Count);
2478  return m_pArray[index];
2479  }
2480 
2481  T& front()
2482  {
2483  VMA_HEAVY_ASSERT(m_Count > 0);
2484  return m_pArray[0];
2485  }
2486  const T& front() const
2487  {
2488  VMA_HEAVY_ASSERT(m_Count > 0);
2489  return m_pArray[0];
2490  }
2491  T& back()
2492  {
2493  VMA_HEAVY_ASSERT(m_Count > 0);
2494  return m_pArray[m_Count - 1];
2495  }
2496  const T& back() const
2497  {
2498  VMA_HEAVY_ASSERT(m_Count > 0);
2499  return m_pArray[m_Count - 1];
2500  }
2501 
2502  void reserve(size_t newCapacity, bool freeMemory = false)
2503  {
2504  newCapacity = VMA_MAX(newCapacity, m_Count);
2505 
2506  if((newCapacity < m_Capacity) && !freeMemory)
2507  {
2508  newCapacity = m_Capacity;
2509  }
2510 
2511  if(newCapacity != m_Capacity)
2512  {
2513  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2514  if(m_Count != 0)
2515  {
2516  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2517  }
2518  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2519  m_Capacity = newCapacity;
2520  m_pArray = newArray;
2521  }
2522  }
2523 
2524  void resize(size_t newCount, bool freeMemory = false)
2525  {
2526  size_t newCapacity = m_Capacity;
2527  if(newCount > m_Capacity)
2528  {
2529  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2530  }
2531  else if(freeMemory)
2532  {
2533  newCapacity = newCount;
2534  }
2535 
2536  if(newCapacity != m_Capacity)
2537  {
2538  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2539  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2540  if(elementsToCopy != 0)
2541  {
2542  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2543  }
2544  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2545  m_Capacity = newCapacity;
2546  m_pArray = newArray;
2547  }
2548 
2549  m_Count = newCount;
2550  }
2551 
2552  void clear(bool freeMemory = false)
2553  {
2554  resize(0, freeMemory);
2555  }
2556 
2557  void insert(size_t index, const T& src)
2558  {
2559  VMA_HEAVY_ASSERT(index <= m_Count);
2560  const size_t oldCount = size();
2561  resize(oldCount + 1);
2562  if(index < oldCount)
2563  {
2564  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2565  }
2566  m_pArray[index] = src;
2567  }
2568 
2569  void remove(size_t index)
2570  {
2571  VMA_HEAVY_ASSERT(index < m_Count);
2572  const size_t oldCount = size();
2573  if(index < oldCount - 1)
2574  {
2575  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2576  }
2577  resize(oldCount - 1);
2578  }
2579 
2580  void push_back(const T& src)
2581  {
2582  const size_t newIndex = size();
2583  resize(newIndex + 1);
2584  m_pArray[newIndex] = src;
2585  }
2586 
2587  void pop_back()
2588  {
2589  VMA_HEAVY_ASSERT(m_Count > 0);
2590  resize(size() - 1);
2591  }
2592 
2593  void push_front(const T& src)
2594  {
2595  insert(0, src);
2596  }
2597 
2598  void pop_front()
2599  {
2600  VMA_HEAVY_ASSERT(m_Count > 0);
2601  remove(0);
2602  }
2603 
2604  typedef T* iterator;
2605 
2606  iterator begin() { return m_pArray; }
2607  iterator end() { return m_pArray + m_Count; }
2608 
2609 private:
2610  AllocatorT m_Allocator;
2611  T* m_pArray;
2612  size_t m_Count;
2613  size_t m_Capacity;
2614 };
2615 
2616 template<typename T, typename allocatorT>
2617 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2618 {
2619  vec.insert(index, item);
2620 }
2621 
2622 template<typename T, typename allocatorT>
2623 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2624 {
2625  vec.remove(index);
2626 }
2627 
2628 #endif // #if VMA_USE_STL_VECTOR
2629 
2630 template<typename CmpLess, typename VectorT>
2631 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2632 {
2633  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2634  vector.data(),
2635  vector.data() + vector.size(),
2636  value,
2637  CmpLess()) - vector.data();
2638  VmaVectorInsert(vector, indexToInsert, value);
2639  return indexToInsert;
2640 }
2641 
2642 template<typename CmpLess, typename VectorT>
2643 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2644 {
2645  CmpLess comparator;
2646  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2647  vector.begin(),
2648  vector.end(),
2649  value,
2650  comparator);
2651  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2652  {
2653  size_t indexToRemove = it - vector.begin();
2654  VmaVectorRemove(vector, indexToRemove);
2655  return true;
2656  }
2657  return false;
2658 }
2659 
2660 template<typename CmpLess, typename VectorT>
2661 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
2662 {
2663  CmpLess comparator;
2664  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2665  vector.data(),
2666  vector.data() + vector.size(),
2667  value,
2668  comparator);
2669  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
2670  {
2671  return it - vector.begin();
2672  }
2673  else
2674  {
2675  return vector.size();
2676  }
2677 }
2678 
2680 // class VmaPoolAllocator
2681 
2682 /*
2683 Allocator for objects of type T using a list of arrays (pools) to speed up
2684 allocation. Number of elements that can be allocated is not bounded because
2685 allocator can create multiple blocks.
2686 */
2687 template<typename T>
2688 class VmaPoolAllocator
2689 {
2690 public:
2691  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
2692  ~VmaPoolAllocator();
2693  void Clear();
2694  T* Alloc();
2695  void Free(T* ptr);
2696 
2697 private:
2698  union Item
2699  {
2700  uint32_t NextFreeIndex;
2701  T Value;
2702  };
2703 
2704  struct ItemBlock
2705  {
2706  Item* pItems;
2707  uint32_t FirstFreeIndex;
2708  };
2709 
2710  const VkAllocationCallbacks* m_pAllocationCallbacks;
2711  size_t m_ItemsPerBlock;
2712  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
2713 
2714  ItemBlock& CreateNewBlock();
2715 };
2716 
2717 template<typename T>
2718 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
2719  m_pAllocationCallbacks(pAllocationCallbacks),
2720  m_ItemsPerBlock(itemsPerBlock),
2721  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
2722 {
2723  VMA_ASSERT(itemsPerBlock > 0);
2724 }
2725 
2726 template<typename T>
2727 VmaPoolAllocator<T>::~VmaPoolAllocator()
2728 {
2729  Clear();
2730 }
2731 
2732 template<typename T>
2733 void VmaPoolAllocator<T>::Clear()
2734 {
2735  for(size_t i = m_ItemBlocks.size(); i--; )
2736  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
2737  m_ItemBlocks.clear();
2738 }
2739 
2740 template<typename T>
2741 T* VmaPoolAllocator<T>::Alloc()
2742 {
2743  for(size_t i = m_ItemBlocks.size(); i--; )
2744  {
2745  ItemBlock& block = m_ItemBlocks[i];
2746  // This block has some free items: Use first one.
2747  if(block.FirstFreeIndex != UINT32_MAX)
2748  {
2749  Item* const pItem = &block.pItems[block.FirstFreeIndex];
2750  block.FirstFreeIndex = pItem->NextFreeIndex;
2751  return &pItem->Value;
2752  }
2753  }
2754 
2755  // No block has free item: Create new one and use it.
2756  ItemBlock& newBlock = CreateNewBlock();
2757  Item* const pItem = &newBlock.pItems[0];
2758  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
2759  return &pItem->Value;
2760 }
2761 
2762 template<typename T>
2763 void VmaPoolAllocator<T>::Free(T* ptr)
2764 {
2765  // Search all memory blocks to find ptr.
2766  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
2767  {
2768  ItemBlock& block = m_ItemBlocks[i];
2769 
2770  // Casting to union.
2771  Item* pItemPtr;
2772  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
2773 
2774  // Check if pItemPtr is in address range of this block.
2775  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
2776  {
2777  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
2778  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
2779  block.FirstFreeIndex = index;
2780  return;
2781  }
2782  }
2783  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
2784 }
2785 
2786 template<typename T>
2787 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
2788 {
2789  ItemBlock newBlock = {
2790  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
2791 
2792  m_ItemBlocks.push_back(newBlock);
2793 
2794  // Setup singly-linked list of all free items in this block.
2795  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
2796  newBlock.pItems[i].NextFreeIndex = i + 1;
2797  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
2798  return m_ItemBlocks.back();
2799 }
2800 
2802 // class VmaRawList, VmaList
2803 
2804 #if VMA_USE_STL_LIST
2805 
2806 #define VmaList std::list
2807 
2808 #else // #if VMA_USE_STL_LIST
2809 
2810 template<typename T>
2811 struct VmaListItem
2812 {
2813  VmaListItem* pPrev;
2814  VmaListItem* pNext;
2815  T Value;
2816 };
2817 
2818 // Doubly linked list.
2819 template<typename T>
2820 class VmaRawList
2821 {
2822 public:
2823  typedef VmaListItem<T> ItemType;
2824 
2825  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
2826  ~VmaRawList();
2827  void Clear();
2828 
2829  size_t GetCount() const { return m_Count; }
2830  bool IsEmpty() const { return m_Count == 0; }
2831 
2832  ItemType* Front() { return m_pFront; }
2833  const ItemType* Front() const { return m_pFront; }
2834  ItemType* Back() { return m_pBack; }
2835  const ItemType* Back() const { return m_pBack; }
2836 
2837  ItemType* PushBack();
2838  ItemType* PushFront();
2839  ItemType* PushBack(const T& value);
2840  ItemType* PushFront(const T& value);
2841  void PopBack();
2842  void PopFront();
2843 
2844  // Item can be null - it means PushBack.
2845  ItemType* InsertBefore(ItemType* pItem);
2846  // Item can be null - it means PushFront.
2847  ItemType* InsertAfter(ItemType* pItem);
2848 
2849  ItemType* InsertBefore(ItemType* pItem, const T& value);
2850  ItemType* InsertAfter(ItemType* pItem, const T& value);
2851 
2852  void Remove(ItemType* pItem);
2853 
2854 private:
2855  const VkAllocationCallbacks* const m_pAllocationCallbacks;
2856  VmaPoolAllocator<ItemType> m_ItemAllocator;
2857  ItemType* m_pFront;
2858  ItemType* m_pBack;
2859  size_t m_Count;
2860 
2861  // Declared not defined, to block copy constructor and assignment operator.
2862  VmaRawList(const VmaRawList<T>& src);
2863  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
2864 };
2865 
2866 template<typename T>
2867 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
2868  m_pAllocationCallbacks(pAllocationCallbacks),
2869  m_ItemAllocator(pAllocationCallbacks, 128),
2870  m_pFront(VMA_NULL),
2871  m_pBack(VMA_NULL),
2872  m_Count(0)
2873 {
2874 }
2875 
2876 template<typename T>
2877 VmaRawList<T>::~VmaRawList()
2878 {
2879  // Intentionally not calling Clear, because that would be unnecessary
2880  // computations to return all items to m_ItemAllocator as free.
2881 }
2882 
2883 template<typename T>
2884 void VmaRawList<T>::Clear()
2885 {
2886  if(IsEmpty() == false)
2887  {
2888  ItemType* pItem = m_pBack;
2889  while(pItem != VMA_NULL)
2890  {
2891  ItemType* const pPrevItem = pItem->pPrev;
2892  m_ItemAllocator.Free(pItem);
2893  pItem = pPrevItem;
2894  }
2895  m_pFront = VMA_NULL;
2896  m_pBack = VMA_NULL;
2897  m_Count = 0;
2898  }
2899 }
2900 
2901 template<typename T>
2902 VmaListItem<T>* VmaRawList<T>::PushBack()
2903 {
2904  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2905  pNewItem->pNext = VMA_NULL;
2906  if(IsEmpty())
2907  {
2908  pNewItem->pPrev = VMA_NULL;
2909  m_pFront = pNewItem;
2910  m_pBack = pNewItem;
2911  m_Count = 1;
2912  }
2913  else
2914  {
2915  pNewItem->pPrev = m_pBack;
2916  m_pBack->pNext = pNewItem;
2917  m_pBack = pNewItem;
2918  ++m_Count;
2919  }
2920  return pNewItem;
2921 }
2922 
2923 template<typename T>
2924 VmaListItem<T>* VmaRawList<T>::PushFront()
2925 {
2926  ItemType* const pNewItem = m_ItemAllocator.Alloc();
2927  pNewItem->pPrev = VMA_NULL;
2928  if(IsEmpty())
2929  {
2930  pNewItem->pNext = VMA_NULL;
2931  m_pFront = pNewItem;
2932  m_pBack = pNewItem;
2933  m_Count = 1;
2934  }
2935  else
2936  {
2937  pNewItem->pNext = m_pFront;
2938  m_pFront->pPrev = pNewItem;
2939  m_pFront = pNewItem;
2940  ++m_Count;
2941  }
2942  return pNewItem;
2943 }
2944 
2945 template<typename T>
2946 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
2947 {
2948  ItemType* const pNewItem = PushBack();
2949  pNewItem->Value = value;
2950  return pNewItem;
2951 }
2952 
2953 template<typename T>
2954 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
2955 {
2956  ItemType* const pNewItem = PushFront();
2957  pNewItem->Value = value;
2958  return pNewItem;
2959 }
2960 
2961 template<typename T>
2962 void VmaRawList<T>::PopBack()
2963 {
2964  VMA_HEAVY_ASSERT(m_Count > 0);
2965  ItemType* const pBackItem = m_pBack;
2966  ItemType* const pPrevItem = pBackItem->pPrev;
2967  if(pPrevItem != VMA_NULL)
2968  {
2969  pPrevItem->pNext = VMA_NULL;
2970  }
2971  m_pBack = pPrevItem;
2972  m_ItemAllocator.Free(pBackItem);
2973  --m_Count;
2974 }
2975 
2976 template<typename T>
2977 void VmaRawList<T>::PopFront()
2978 {
2979  VMA_HEAVY_ASSERT(m_Count > 0);
2980  ItemType* const pFrontItem = m_pFront;
2981  ItemType* const pNextItem = pFrontItem->pNext;
2982  if(pNextItem != VMA_NULL)
2983  {
2984  pNextItem->pPrev = VMA_NULL;
2985  }
2986  m_pFront = pNextItem;
2987  m_ItemAllocator.Free(pFrontItem);
2988  --m_Count;
2989 }
2990 
2991 template<typename T>
2992 void VmaRawList<T>::Remove(ItemType* pItem)
2993 {
2994  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
2995  VMA_HEAVY_ASSERT(m_Count > 0);
2996 
2997  if(pItem->pPrev != VMA_NULL)
2998  {
2999  pItem->pPrev->pNext = pItem->pNext;
3000  }
3001  else
3002  {
3003  VMA_HEAVY_ASSERT(m_pFront == pItem);
3004  m_pFront = pItem->pNext;
3005  }
3006 
3007  if(pItem->pNext != VMA_NULL)
3008  {
3009  pItem->pNext->pPrev = pItem->pPrev;
3010  }
3011  else
3012  {
3013  VMA_HEAVY_ASSERT(m_pBack == pItem);
3014  m_pBack = pItem->pPrev;
3015  }
3016 
3017  m_ItemAllocator.Free(pItem);
3018  --m_Count;
3019 }
3020 
3021 template<typename T>
3022 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3023 {
3024  if(pItem != VMA_NULL)
3025  {
3026  ItemType* const prevItem = pItem->pPrev;
3027  ItemType* const newItem = m_ItemAllocator.Alloc();
3028  newItem->pPrev = prevItem;
3029  newItem->pNext = pItem;
3030  pItem->pPrev = newItem;
3031  if(prevItem != VMA_NULL)
3032  {
3033  prevItem->pNext = newItem;
3034  }
3035  else
3036  {
3037  VMA_HEAVY_ASSERT(m_pFront == pItem);
3038  m_pFront = newItem;
3039  }
3040  ++m_Count;
3041  return newItem;
3042  }
3043  else
3044  return PushBack();
3045 }
3046 
3047 template<typename T>
3048 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3049 {
3050  if(pItem != VMA_NULL)
3051  {
3052  ItemType* const nextItem = pItem->pNext;
3053  ItemType* const newItem = m_ItemAllocator.Alloc();
3054  newItem->pNext = nextItem;
3055  newItem->pPrev = pItem;
3056  pItem->pNext = newItem;
3057  if(nextItem != VMA_NULL)
3058  {
3059  nextItem->pPrev = newItem;
3060  }
3061  else
3062  {
3063  VMA_HEAVY_ASSERT(m_pBack == pItem);
3064  m_pBack = newItem;
3065  }
3066  ++m_Count;
3067  return newItem;
3068  }
3069  else
3070  return PushFront();
3071 }
3072 
3073 template<typename T>
3074 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3075 {
3076  ItemType* const newItem = InsertBefore(pItem);
3077  newItem->Value = value;
3078  return newItem;
3079 }
3080 
3081 template<typename T>
3082 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3083 {
3084  ItemType* const newItem = InsertAfter(pItem);
3085  newItem->Value = value;
3086  return newItem;
3087 }
3088 
3089 template<typename T, typename AllocatorT>
3090 class VmaList
3091 {
3092 public:
3093  class iterator
3094  {
3095  public:
3096  iterator() :
3097  m_pList(VMA_NULL),
3098  m_pItem(VMA_NULL)
3099  {
3100  }
3101 
3102  T& operator*() const
3103  {
3104  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3105  return m_pItem->Value;
3106  }
3107  T* operator->() const
3108  {
3109  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3110  return &m_pItem->Value;
3111  }
3112 
3113  iterator& operator++()
3114  {
3115  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3116  m_pItem = m_pItem->pNext;
3117  return *this;
3118  }
3119  iterator& operator--()
3120  {
3121  if(m_pItem != VMA_NULL)
3122  {
3123  m_pItem = m_pItem->pPrev;
3124  }
3125  else
3126  {
3127  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3128  m_pItem = m_pList->Back();
3129  }
3130  return *this;
3131  }
3132 
3133  iterator operator++(int)
3134  {
3135  iterator result = *this;
3136  ++*this;
3137  return result;
3138  }
3139  iterator operator--(int)
3140  {
3141  iterator result = *this;
3142  --*this;
3143  return result;
3144  }
3145 
3146  bool operator==(const iterator& rhs) const
3147  {
3148  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3149  return m_pItem == rhs.m_pItem;
3150  }
3151  bool operator!=(const iterator& rhs) const
3152  {
3153  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3154  return m_pItem != rhs.m_pItem;
3155  }
3156 
3157  private:
3158  VmaRawList<T>* m_pList;
3159  VmaListItem<T>* m_pItem;
3160 
3161  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3162  m_pList(pList),
3163  m_pItem(pItem)
3164  {
3165  }
3166 
3167  friend class VmaList<T, AllocatorT>;
3168  };
3169 
3170  class const_iterator
3171  {
3172  public:
3173  const_iterator() :
3174  m_pList(VMA_NULL),
3175  m_pItem(VMA_NULL)
3176  {
3177  }
3178 
3179  const_iterator(const iterator& src) :
3180  m_pList(src.m_pList),
3181  m_pItem(src.m_pItem)
3182  {
3183  }
3184 
3185  const T& operator*() const
3186  {
3187  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3188  return m_pItem->Value;
3189  }
3190  const T* operator->() const
3191  {
3192  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3193  return &m_pItem->Value;
3194  }
3195 
3196  const_iterator& operator++()
3197  {
3198  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3199  m_pItem = m_pItem->pNext;
3200  return *this;
3201  }
3202  const_iterator& operator--()
3203  {
3204  if(m_pItem != VMA_NULL)
3205  {
3206  m_pItem = m_pItem->pPrev;
3207  }
3208  else
3209  {
3210  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3211  m_pItem = m_pList->Back();
3212  }
3213  return *this;
3214  }
3215 
3216  const_iterator operator++(int)
3217  {
3218  const_iterator result = *this;
3219  ++*this;
3220  return result;
3221  }
3222  const_iterator operator--(int)
3223  {
3224  const_iterator result = *this;
3225  --*this;
3226  return result;
3227  }
3228 
3229  bool operator==(const const_iterator& rhs) const
3230  {
3231  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3232  return m_pItem == rhs.m_pItem;
3233  }
3234  bool operator!=(const const_iterator& rhs) const
3235  {
3236  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3237  return m_pItem != rhs.m_pItem;
3238  }
3239 
3240  private:
3241  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3242  m_pList(pList),
3243  m_pItem(pItem)
3244  {
3245  }
3246 
3247  const VmaRawList<T>* m_pList;
3248  const VmaListItem<T>* m_pItem;
3249 
3250  friend class VmaList<T, AllocatorT>;
3251  };
3252 
3253  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3254 
3255  bool empty() const { return m_RawList.IsEmpty(); }
3256  size_t size() const { return m_RawList.GetCount(); }
3257 
3258  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3259  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3260 
3261  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3262  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3263 
3264  void clear() { m_RawList.Clear(); }
3265  void push_back(const T& value) { m_RawList.PushBack(value); }
3266  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3267  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3268 
3269 private:
3270  VmaRawList<T> m_RawList;
3271 };
3272 
3273 #endif // #if VMA_USE_STL_LIST
3274 
3276 // class VmaMap
3277 
3278 // Unused in this version.
3279 #if 0
3280 
3281 #if VMA_USE_STL_UNORDERED_MAP
3282 
3283 #define VmaPair std::pair
3284 
3285 #define VMA_MAP_TYPE(KeyT, ValueT) \
3286  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3287 
3288 #else // #if VMA_USE_STL_UNORDERED_MAP
3289 
3290 template<typename T1, typename T2>
3291 struct VmaPair
3292 {
3293  T1 first;
3294  T2 second;
3295 
3296  VmaPair() : first(), second() { }
3297  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3298 };
3299 
3300 /* Class compatible with subset of interface of std::unordered_map.
3301 KeyT, ValueT must be POD because they will be stored in VmaVector.
3302 */
3303 template<typename KeyT, typename ValueT>
3304 class VmaMap
3305 {
3306 public:
3307  typedef VmaPair<KeyT, ValueT> PairType;
3308  typedef PairType* iterator;
3309 
3310  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3311 
3312  iterator begin() { return m_Vector.begin(); }
3313  iterator end() { return m_Vector.end(); }
3314 
3315  void insert(const PairType& pair);
3316  iterator find(const KeyT& key);
3317  void erase(iterator it);
3318 
3319 private:
3320  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3321 };
3322 
3323 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3324 
3325 template<typename FirstT, typename SecondT>
3326 struct VmaPairFirstLess
3327 {
3328  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3329  {
3330  return lhs.first < rhs.first;
3331  }
3332  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3333  {
3334  return lhs.first < rhsFirst;
3335  }
3336 };
3337 
3338 template<typename KeyT, typename ValueT>
3339 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3340 {
3341  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3342  m_Vector.data(),
3343  m_Vector.data() + m_Vector.size(),
3344  pair,
3345  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3346  VmaVectorInsert(m_Vector, indexToInsert, pair);
3347 }
3348 
3349 template<typename KeyT, typename ValueT>
3350 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3351 {
3352  PairType* it = VmaBinaryFindFirstNotLess(
3353  m_Vector.data(),
3354  m_Vector.data() + m_Vector.size(),
3355  key,
3356  VmaPairFirstLess<KeyT, ValueT>());
3357  if((it != m_Vector.end()) && (it->first == key))
3358  {
3359  return it;
3360  }
3361  else
3362  {
3363  return m_Vector.end();
3364  }
3365 }
3366 
3367 template<typename KeyT, typename ValueT>
3368 void VmaMap<KeyT, ValueT>::erase(iterator it)
3369 {
3370  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3371 }
3372 
3373 #endif // #if VMA_USE_STL_UNORDERED_MAP
3374 
3375 #endif // #if 0
3376 
3378 
3379 class VmaDeviceMemoryBlock;
3380 
3381 struct VmaAllocation_T
3382 {
3383 private:
3384  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3385 
3386  enum FLAGS
3387  {
3388  FLAG_USER_DATA_STRING = 0x01,
3389  };
3390 
3391 public:
3392  enum ALLOCATION_TYPE
3393  {
3394  ALLOCATION_TYPE_NONE,
3395  ALLOCATION_TYPE_BLOCK,
3396  ALLOCATION_TYPE_DEDICATED,
3397  };
3398 
3399  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3400  m_Alignment(1),
3401  m_Size(0),
3402  m_pUserData(VMA_NULL),
3403  m_LastUseFrameIndex(currentFrameIndex),
3404  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3405  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3406  m_MapCount(0),
3407  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3408  {
3409  }
3410 
3411  ~VmaAllocation_T()
3412  {
3413  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3414 
3415  // Check if owned string was freed.
3416  VMA_ASSERT(m_pUserData == VMA_NULL);
3417  }
3418 
3419  void InitBlockAllocation(
3420  VmaPool hPool,
3421  VmaDeviceMemoryBlock* block,
3422  VkDeviceSize offset,
3423  VkDeviceSize alignment,
3424  VkDeviceSize size,
3425  VmaSuballocationType suballocationType,
3426  bool mapped,
3427  bool canBecomeLost)
3428  {
3429  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3430  VMA_ASSERT(block != VMA_NULL);
3431  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3432  m_Alignment = alignment;
3433  m_Size = size;
3434  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3435  m_SuballocationType = (uint8_t)suballocationType;
3436  m_BlockAllocation.m_hPool = hPool;
3437  m_BlockAllocation.m_Block = block;
3438  m_BlockAllocation.m_Offset = offset;
3439  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3440  }
3441 
3442  void InitLost()
3443  {
3444  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3445  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3446  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3447  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3448  m_BlockAllocation.m_Block = VMA_NULL;
3449  m_BlockAllocation.m_Offset = 0;
3450  m_BlockAllocation.m_CanBecomeLost = true;
3451  }
3452 
3453  void ChangeBlockAllocation(
3454  VmaAllocator hAllocator,
3455  VmaDeviceMemoryBlock* block,
3456  VkDeviceSize offset);
3457 
3458  // pMappedData not null means allocation is created with MAPPED flag.
3459  void InitDedicatedAllocation(
3460  uint32_t memoryTypeIndex,
3461  VkDeviceMemory hMemory,
3462  VmaSuballocationType suballocationType,
3463  void* pMappedData,
3464  VkDeviceSize size)
3465  {
3466  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3467  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3468  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3469  m_Alignment = 0;
3470  m_Size = size;
3471  m_SuballocationType = (uint8_t)suballocationType;
3472  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3473  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3474  m_DedicatedAllocation.m_hMemory = hMemory;
3475  m_DedicatedAllocation.m_pMappedData = pMappedData;
3476  }
3477 
3478  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3479  VkDeviceSize GetAlignment() const { return m_Alignment; }
3480  VkDeviceSize GetSize() const { return m_Size; }
3481  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3482  void* GetUserData() const { return m_pUserData; }
3483  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3484  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3485 
3486  VmaDeviceMemoryBlock* GetBlock() const
3487  {
3488  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3489  return m_BlockAllocation.m_Block;
3490  }
3491  VkDeviceSize GetOffset() const;
3492  VkDeviceMemory GetMemory() const;
3493  uint32_t GetMemoryTypeIndex() const;
3494  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3495  void* GetMappedData() const;
3496  bool CanBecomeLost() const;
3497  VmaPool GetPool() const;
3498 
3499  uint32_t GetLastUseFrameIndex() const
3500  {
3501  return m_LastUseFrameIndex.load();
3502  }
3503  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3504  {
3505  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3506  }
3507  /*
3508  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3509  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3510  - Else, returns false.
3511 
3512  If hAllocation is already lost, assert - you should not call it then.
3513  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3514  */
3515  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3516 
3517  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3518  {
3519  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3520  outInfo.blockCount = 1;
3521  outInfo.allocationCount = 1;
3522  outInfo.unusedRangeCount = 0;
3523  outInfo.usedBytes = m_Size;
3524  outInfo.unusedBytes = 0;
3525  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3526  outInfo.unusedRangeSizeMin = UINT64_MAX;
3527  outInfo.unusedRangeSizeMax = 0;
3528  }
3529 
3530  void BlockAllocMap();
3531  void BlockAllocUnmap();
3532  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3533  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3534 
3535 private:
3536  VkDeviceSize m_Alignment;
3537  VkDeviceSize m_Size;
3538  void* m_pUserData;
3539  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3540  uint8_t m_Type; // ALLOCATION_TYPE
3541  uint8_t m_SuballocationType; // VmaSuballocationType
3542  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3543  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3544  uint8_t m_MapCount;
3545  uint8_t m_Flags; // enum FLAGS
3546 
3547  // Allocation out of VmaDeviceMemoryBlock.
3548  struct BlockAllocation
3549  {
3550  VmaPool m_hPool; // Null if belongs to general memory.
3551  VmaDeviceMemoryBlock* m_Block;
3552  VkDeviceSize m_Offset;
3553  bool m_CanBecomeLost;
3554  };
3555 
3556  // Allocation for an object that has its own private VkDeviceMemory.
3557  struct DedicatedAllocation
3558  {
3559  uint32_t m_MemoryTypeIndex;
3560  VkDeviceMemory m_hMemory;
3561  void* m_pMappedData; // Not null means memory is mapped.
3562  };
3563 
3564  union
3565  {
3566  // Allocation out of VmaDeviceMemoryBlock.
3567  BlockAllocation m_BlockAllocation;
3568  // Allocation for an object that has its own private VkDeviceMemory.
3569  DedicatedAllocation m_DedicatedAllocation;
3570  };
3571 
3572  void FreeUserDataString(VmaAllocator hAllocator);
3573 };
3574 
3575 /*
3576 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3577 allocated memory block or free.
3578 */
3579 struct VmaSuballocation
3580 {
3581  VkDeviceSize offset;
3582  VkDeviceSize size;
3583  VmaAllocation hAllocation;
3584  VmaSuballocationType type;
3585 };
3586 
3587 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3588 
3589 // Cost of one additional allocation lost, as equivalent in bytes.
3590 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3591 
3592 /*
3593 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3594 
3595 If canMakeOtherLost was false:
3596 - item points to a FREE suballocation.
3597 - itemsToMakeLostCount is 0.
3598 
3599 If canMakeOtherLost was true:
3600 - item points to first of sequence of suballocations, which are either FREE,
3601  or point to VmaAllocations that can become lost.
3602 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3603  the requested allocation to succeed.
3604 */
3605 struct VmaAllocationRequest
3606 {
3607  VkDeviceSize offset;
3608  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3609  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3610  VmaSuballocationList::iterator item;
3611  size_t itemsToMakeLostCount;
3612 
3613  VkDeviceSize CalcCost() const
3614  {
3615  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3616  }
3617 };
3618 
3619 /*
3620 Data structure used for bookkeeping of allocations and unused ranges of memory
3621 in a single VkDeviceMemory block.
3622 */
3623 class VmaBlockMetadata
3624 {
3625 public:
3626  VmaBlockMetadata(VmaAllocator hAllocator);
3627  ~VmaBlockMetadata();
3628  void Init(VkDeviceSize size);
3629 
3630  // Validates all data structures inside this object. If not valid, returns false.
3631  bool Validate() const;
3632  VkDeviceSize GetSize() const { return m_Size; }
3633  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3634  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3635  VkDeviceSize GetUnusedRangeSizeMax() const;
3636  // Returns true if this block is empty - contains only single free suballocation.
3637  bool IsEmpty() const;
3638 
3639  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3640  void AddPoolStats(VmaPoolStats& inoutStats) const;
3641 
3642 #if VMA_STATS_STRING_ENABLED
3643  void PrintDetailedMap(class VmaJsonWriter& json) const;
3644 #endif
3645 
3646  // Creates trivial request for case when block is empty.
3647  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3648 
3649  // Tries to find a place for suballocation with given parameters inside this block.
3650  // If succeeded, fills pAllocationRequest and returns true.
3651  // If failed, returns false.
3652  bool CreateAllocationRequest(
3653  uint32_t currentFrameIndex,
3654  uint32_t frameInUseCount,
3655  VkDeviceSize bufferImageGranularity,
3656  VkDeviceSize allocSize,
3657  VkDeviceSize allocAlignment,
3658  VmaSuballocationType allocType,
3659  bool canMakeOtherLost,
3660  VmaAllocationRequest* pAllocationRequest);
3661 
3662  bool MakeRequestedAllocationsLost(
3663  uint32_t currentFrameIndex,
3664  uint32_t frameInUseCount,
3665  VmaAllocationRequest* pAllocationRequest);
3666 
3667  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3668 
3669  // Makes actual allocation based on request. Request must already be checked and valid.
3670  void Alloc(
3671  const VmaAllocationRequest& request,
3672  VmaSuballocationType type,
3673  VkDeviceSize allocSize,
3674  VmaAllocation hAllocation);
3675 
3676  // Frees suballocation assigned to given memory region.
3677  void Free(const VmaAllocation allocation);
3678  void FreeAtOffset(VkDeviceSize offset);
3679 
3680 private:
3681  VkDeviceSize m_Size;
3682  uint32_t m_FreeCount;
3683  VkDeviceSize m_SumFreeSize;
3684  VmaSuballocationList m_Suballocations;
3685  // Suballocations that are free and have size greater than certain threshold.
3686  // Sorted by size, ascending.
3687  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
3688 
3689  bool ValidateFreeSuballocationList() const;
3690 
3691  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
3692  // If yes, fills pOffset and returns true. If no, returns false.
3693  bool CheckAllocation(
3694  uint32_t currentFrameIndex,
3695  uint32_t frameInUseCount,
3696  VkDeviceSize bufferImageGranularity,
3697  VkDeviceSize allocSize,
3698  VkDeviceSize allocAlignment,
3699  VmaSuballocationType allocType,
3700  VmaSuballocationList::const_iterator suballocItem,
3701  bool canMakeOtherLost,
3702  VkDeviceSize* pOffset,
3703  size_t* itemsToMakeLostCount,
3704  VkDeviceSize* pSumFreeSize,
3705  VkDeviceSize* pSumItemSize) const;
3706  // Given free suballocation, it merges it with following one, which must also be free.
3707  void MergeFreeWithNext(VmaSuballocationList::iterator item);
3708  // Releases given suballocation, making it free.
3709  // Merges it with adjacent free suballocations if applicable.
3710  // Returns iterator to new free suballocation at this place.
3711  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
3712  // Given free suballocation, it inserts it into sorted list of
3713  // m_FreeSuballocationsBySize if it's suitable.
3714  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
3715  // Given free suballocation, it removes it from sorted list of
3716  // m_FreeSuballocationsBySize if it's suitable.
3717  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
3718 };
3719 
3720 // Helper class that represents mapped memory. Synchronized internally.
3721 class VmaDeviceMemoryMapping
3722 {
3723 public:
3724  VmaDeviceMemoryMapping();
3725  ~VmaDeviceMemoryMapping();
3726 
3727  void* GetMappedData() const { return m_pMappedData; }
3728 
3729  // ppData can be null.
3730  VkResult Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData);
3731  void Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count);
3732 
3733 private:
3734  VMA_MUTEX m_Mutex;
3735  uint32_t m_MapCount;
3736  void* m_pMappedData;
3737 };
3738 
3739 /*
3740 Represents a single block of device memory (`VkDeviceMemory`) with all the
3741 data about its regions (aka suballocations, `VmaAllocation`), assigned and free.
3742 
3743 Thread-safety: This class must be externally synchronized.
3744 */
3745 class VmaDeviceMemoryBlock
3746 {
3747 public:
3748  uint32_t m_MemoryTypeIndex;
3749  VkDeviceMemory m_hMemory;
3750  VmaDeviceMemoryMapping m_Mapping;
3751  VmaBlockMetadata m_Metadata;
3752 
3753  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
3754 
3755  ~VmaDeviceMemoryBlock()
3756  {
3757  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
3758  }
3759 
3760  // Always call after construction.
3761  void Init(
3762  uint32_t newMemoryTypeIndex,
3763  VkDeviceMemory newMemory,
3764  VkDeviceSize newSize);
3765  // Always call before destruction.
3766  void Destroy(VmaAllocator allocator);
3767 
3768  // Validates all data structures inside this object. If not valid, returns false.
3769  bool Validate() const;
3770 
3771  // ppData can be null.
3772  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
3773  void Unmap(VmaAllocator hAllocator, uint32_t count);
3774 };
3775 
3776 struct VmaPointerLess
3777 {
3778  bool operator()(const void* lhs, const void* rhs) const
3779  {
3780  return lhs < rhs;
3781  }
3782 };
3783 
3784 class VmaDefragmentator;
3785 
3786 /*
3787 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
3788 Vulkan memory type.
3789 
3790 Synchronized internally with a mutex.
3791 */
3792 struct VmaBlockVector
3793 {
3794  VmaBlockVector(
3795  VmaAllocator hAllocator,
3796  uint32_t memoryTypeIndex,
3797  VkDeviceSize preferredBlockSize,
3798  size_t minBlockCount,
3799  size_t maxBlockCount,
3800  VkDeviceSize bufferImageGranularity,
3801  uint32_t frameInUseCount,
3802  bool isCustomPool);
3803  ~VmaBlockVector();
3804 
3805  VkResult CreateMinBlocks();
3806 
3807  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
3808  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
3809  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
3810  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
3811 
3812  void GetPoolStats(VmaPoolStats* pStats);
3813 
3814  bool IsEmpty() const { return m_Blocks.empty(); }
3815 
3816  VkResult Allocate(
3817  VmaPool hCurrentPool,
3818  uint32_t currentFrameIndex,
3819  const VkMemoryRequirements& vkMemReq,
3820  const VmaAllocationCreateInfo& createInfo,
3821  VmaSuballocationType suballocType,
3822  VmaAllocation* pAllocation);
3823 
3824  void Free(
3825  VmaAllocation hAllocation);
3826 
3827  // Adds statistics of this BlockVector to pStats.
3828  void AddStats(VmaStats* pStats);
3829 
3830 #if VMA_STATS_STRING_ENABLED
3831  void PrintDetailedMap(class VmaJsonWriter& json);
3832 #endif
3833 
3834  void MakePoolAllocationsLost(
3835  uint32_t currentFrameIndex,
3836  size_t* pLostAllocationCount);
3837 
3838  VmaDefragmentator* EnsureDefragmentator(
3839  VmaAllocator hAllocator,
3840  uint32_t currentFrameIndex);
3841 
3842  VkResult Defragment(
3843  VmaDefragmentationStats* pDefragmentationStats,
3844  VkDeviceSize& maxBytesToMove,
3845  uint32_t& maxAllocationsToMove);
3846 
3847  void DestroyDefragmentator();
3848 
3849 private:
3850  friend class VmaDefragmentator;
3851 
3852  const VmaAllocator m_hAllocator;
3853  const uint32_t m_MemoryTypeIndex;
3854  const VkDeviceSize m_PreferredBlockSize;
3855  const size_t m_MinBlockCount;
3856  const size_t m_MaxBlockCount;
3857  const VkDeviceSize m_BufferImageGranularity;
3858  const uint32_t m_FrameInUseCount;
3859  const bool m_IsCustomPool;
3860  VMA_MUTEX m_Mutex;
3861  // Incrementally sorted by sumFreeSize, ascending.
3862  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
3863  /* There can be at most one allocation that is completely empty - a
3864  hysteresis to avoid pessimistic case of alternating creation and destruction
3865  of a VkDeviceMemory. */
3866  bool m_HasEmptyBlock;
3867  VmaDefragmentator* m_pDefragmentator;
3868 
3869  size_t CalcMaxBlockSize() const;
3870 
3871  // Finds and removes given block from vector.
3872  void Remove(VmaDeviceMemoryBlock* pBlock);
3873 
3874  // Performs single step in sorting m_Blocks. They may not be fully sorted
3875  // after this call.
3876  void IncrementallySortBlocks();
3877 
3878  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
3879 };
3880 
3881 struct VmaPool_T
3882 {
3883 public:
3884  VmaBlockVector m_BlockVector;
3885 
3886  // Takes ownership.
3887  VmaPool_T(
3888  VmaAllocator hAllocator,
3889  const VmaPoolCreateInfo& createInfo);
3890  ~VmaPool_T();
3891 
3892  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
3893 
3894 #if VMA_STATS_STRING_ENABLED
3895  //void PrintDetailedMap(class VmaStringBuilder& sb);
3896 #endif
3897 };
3898 
3899 class VmaDefragmentator
3900 {
3901  const VmaAllocator m_hAllocator;
3902  VmaBlockVector* const m_pBlockVector;
3903  uint32_t m_CurrentFrameIndex;
3904  VkDeviceSize m_BytesMoved;
3905  uint32_t m_AllocationsMoved;
3906 
3907  struct AllocationInfo
3908  {
3909  VmaAllocation m_hAllocation;
3910  VkBool32* m_pChanged;
3911 
3912  AllocationInfo() :
3913  m_hAllocation(VK_NULL_HANDLE),
3914  m_pChanged(VMA_NULL)
3915  {
3916  }
3917  };
3918 
3919  struct AllocationInfoSizeGreater
3920  {
3921  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
3922  {
3923  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
3924  }
3925  };
3926 
3927  // Used between AddAllocation and Defragment.
3928  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3929 
3930  struct BlockInfo
3931  {
3932  VmaDeviceMemoryBlock* m_pBlock;
3933  bool m_HasNonMovableAllocations;
3934  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
3935 
3936  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
3937  m_pBlock(VMA_NULL),
3938  m_HasNonMovableAllocations(true),
3939  m_Allocations(pAllocationCallbacks),
3940  m_pMappedDataForDefragmentation(VMA_NULL)
3941  {
3942  }
3943 
3944  void CalcHasNonMovableAllocations()
3945  {
3946  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
3947  const size_t defragmentAllocCount = m_Allocations.size();
3948  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
3949  }
3950 
3951  void SortAllocationsBySizeDescecnding()
3952  {
3953  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
3954  }
3955 
3956  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
3957  void Unmap(VmaAllocator hAllocator);
3958 
3959  private:
3960  // Not null if mapped for defragmentation only, not originally mapped.
3961  void* m_pMappedDataForDefragmentation;
3962  };
3963 
3964  struct BlockPointerLess
3965  {
3966  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
3967  {
3968  return pLhsBlockInfo->m_pBlock < pRhsBlock;
3969  }
3970  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3971  {
3972  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
3973  }
3974  };
3975 
3976  // 1. Blocks with some non-movable allocations go first.
3977  // 2. Blocks with smaller sumFreeSize go first.
3978  struct BlockInfoCompareMoveDestination
3979  {
3980  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
3981  {
3982  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
3983  {
3984  return true;
3985  }
3986  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
3987  {
3988  return false;
3989  }
3990  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
3991  {
3992  return true;
3993  }
3994  return false;
3995  }
3996  };
3997 
3998  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
3999  BlockInfoVector m_Blocks;
4000 
4001  VkResult DefragmentRound(
4002  VkDeviceSize maxBytesToMove,
4003  uint32_t maxAllocationsToMove);
4004 
4005  static bool MoveMakesSense(
4006  size_t dstBlockIndex, VkDeviceSize dstOffset,
4007  size_t srcBlockIndex, VkDeviceSize srcOffset);
4008 
4009 public:
4010  VmaDefragmentator(
4011  VmaAllocator hAllocator,
4012  VmaBlockVector* pBlockVector,
4013  uint32_t currentFrameIndex);
4014 
4015  ~VmaDefragmentator();
4016 
4017  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4018  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4019 
4020  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4021 
4022  VkResult Defragment(
4023  VkDeviceSize maxBytesToMove,
4024  uint32_t maxAllocationsToMove);
4025 };
4026 
4027 // Main allocator object.
4028 struct VmaAllocator_T
4029 {
4030  bool m_UseMutex;
4031  bool m_UseKhrDedicatedAllocation;
4032  VkDevice m_hDevice;
4033  bool m_AllocationCallbacksSpecified;
4034  VkAllocationCallbacks m_AllocationCallbacks;
4035  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4036 
4037  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4038  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4039  VMA_MUTEX m_HeapSizeLimitMutex;
4040 
4041  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4042  VkPhysicalDeviceMemoryProperties m_MemProps;
4043 
4044  // Default pools.
4045  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4046 
4047  // Each vector is sorted by memory (handle value).
4048  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4049  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4050  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4051 
4052  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4053  ~VmaAllocator_T();
4054 
4055  const VkAllocationCallbacks* GetAllocationCallbacks() const
4056  {
4057  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4058  }
4059  const VmaVulkanFunctions& GetVulkanFunctions() const
4060  {
4061  return m_VulkanFunctions;
4062  }
4063 
4064  VkDeviceSize GetBufferImageGranularity() const
4065  {
4066  return VMA_MAX(
4067  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4068  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4069  }
4070 
4071  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4072  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4073 
4074  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4075  {
4076  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4077  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4078  }
4079 
4080  void GetBufferMemoryRequirements(
4081  VkBuffer hBuffer,
4082  VkMemoryRequirements& memReq,
4083  bool& requiresDedicatedAllocation,
4084  bool& prefersDedicatedAllocation) const;
4085  void GetImageMemoryRequirements(
4086  VkImage hImage,
4087  VkMemoryRequirements& memReq,
4088  bool& requiresDedicatedAllocation,
4089  bool& prefersDedicatedAllocation) const;
4090 
4091  // Main allocation function.
4092  VkResult AllocateMemory(
4093  const VkMemoryRequirements& vkMemReq,
4094  bool requiresDedicatedAllocation,
4095  bool prefersDedicatedAllocation,
4096  VkBuffer dedicatedBuffer,
4097  VkImage dedicatedImage,
4098  const VmaAllocationCreateInfo& createInfo,
4099  VmaSuballocationType suballocType,
4100  VmaAllocation* pAllocation);
4101 
4102  // Main deallocation function.
4103  void FreeMemory(const VmaAllocation allocation);
4104 
4105  void CalculateStats(VmaStats* pStats);
4106 
4107 #if VMA_STATS_STRING_ENABLED
4108  void PrintDetailedMap(class VmaJsonWriter& json);
4109 #endif
4110 
4111  VkResult Defragment(
4112  VmaAllocation* pAllocations,
4113  size_t allocationCount,
4114  VkBool32* pAllocationsChanged,
4115  const VmaDefragmentationInfo* pDefragmentationInfo,
4116  VmaDefragmentationStats* pDefragmentationStats);
4117 
4118  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4119  bool TouchAllocation(VmaAllocation hAllocation);
4120 
4121  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4122  void DestroyPool(VmaPool pool);
4123  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4124 
4125  void SetCurrentFrameIndex(uint32_t frameIndex);
4126 
4127  void MakePoolAllocationsLost(
4128  VmaPool hPool,
4129  size_t* pLostAllocationCount);
4130 
4131  void CreateLostAllocation(VmaAllocation* pAllocation);
4132 
4133  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4134  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4135 
4136  VkResult Map(VmaAllocation hAllocation, void** ppData);
4137  void Unmap(VmaAllocation hAllocation);
4138 
4139 private:
4140  VkDeviceSize m_PreferredLargeHeapBlockSize;
4141 
4142  VkPhysicalDevice m_PhysicalDevice;
4143  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4144 
4145  VMA_MUTEX m_PoolsMutex;
4146  // Protected by m_PoolsMutex. Sorted by pointer value.
4147  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4148 
4149  VmaVulkanFunctions m_VulkanFunctions;
4150 
4151  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4152 
4153  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4154 
4155  VkResult AllocateMemoryOfType(
4156  const VkMemoryRequirements& vkMemReq,
4157  bool dedicatedAllocation,
4158  VkBuffer dedicatedBuffer,
4159  VkImage dedicatedImage,
4160  const VmaAllocationCreateInfo& createInfo,
4161  uint32_t memTypeIndex,
4162  VmaSuballocationType suballocType,
4163  VmaAllocation* pAllocation);
4164 
4165  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4166  VkResult AllocateDedicatedMemory(
4167  VkDeviceSize size,
4168  VmaSuballocationType suballocType,
4169  uint32_t memTypeIndex,
4170  bool map,
4171  bool isUserDataString,
4172  void* pUserData,
4173  VkBuffer dedicatedBuffer,
4174  VkImage dedicatedImage,
4175  VmaAllocation* pAllocation);
4176 
4177  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4178  void FreeDedicatedMemory(VmaAllocation allocation);
4179 };
4180 
4182 // Memory allocation #2 after VmaAllocator_T definition
4183 
4184 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4185 {
4186  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4187 }
4188 
4189 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4190 {
4191  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4192 }
4193 
4194 template<typename T>
4195 static T* VmaAllocate(VmaAllocator hAllocator)
4196 {
4197  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4198 }
4199 
4200 template<typename T>
4201 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4202 {
4203  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4204 }
4205 
4206 template<typename T>
4207 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4208 {
4209  if(ptr != VMA_NULL)
4210  {
4211  ptr->~T();
4212  VmaFree(hAllocator, ptr);
4213  }
4214 }
4215 
4216 template<typename T>
4217 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4218 {
4219  if(ptr != VMA_NULL)
4220  {
4221  for(size_t i = count; i--; )
4222  ptr[i].~T();
4223  VmaFree(hAllocator, ptr);
4224  }
4225 }
4226 
4228 // VmaStringBuilder
4229 
4230 #if VMA_STATS_STRING_ENABLED
4231 
4232 class VmaStringBuilder
4233 {
4234 public:
4235  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4236  size_t GetLength() const { return m_Data.size(); }
4237  const char* GetData() const { return m_Data.data(); }
4238 
4239  void Add(char ch) { m_Data.push_back(ch); }
4240  void Add(const char* pStr);
4241  void AddNewLine() { Add('\n'); }
4242  void AddNumber(uint32_t num);
4243  void AddNumber(uint64_t num);
4244  void AddPointer(const void* ptr);
4245 
4246 private:
4247  VmaVector< char, VmaStlAllocator<char> > m_Data;
4248 };
4249 
4250 void VmaStringBuilder::Add(const char* pStr)
4251 {
4252  const size_t strLen = strlen(pStr);
4253  if(strLen > 0)
4254  {
4255  const size_t oldCount = m_Data.size();
4256  m_Data.resize(oldCount + strLen);
4257  memcpy(m_Data.data() + oldCount, pStr, strLen);
4258  }
4259 }
4260 
4261 void VmaStringBuilder::AddNumber(uint32_t num)
4262 {
4263  char buf[11];
4264  VmaUint32ToStr(buf, sizeof(buf), num);
4265  Add(buf);
4266 }
4267 
4268 void VmaStringBuilder::AddNumber(uint64_t num)
4269 {
4270  char buf[21];
4271  VmaUint64ToStr(buf, sizeof(buf), num);
4272  Add(buf);
4273 }
4274 
4275 void VmaStringBuilder::AddPointer(const void* ptr)
4276 {
4277  char buf[21];
4278  VmaPtrToStr(buf, sizeof(buf), ptr);
4279  Add(buf);
4280 }
4281 
4282 #endif // #if VMA_STATS_STRING_ENABLED
4283 
4285 // VmaJsonWriter
4286 
4287 #if VMA_STATS_STRING_ENABLED
4288 
4289 class VmaJsonWriter
4290 {
4291 public:
4292  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4293  ~VmaJsonWriter();
4294 
4295  void BeginObject(bool singleLine = false);
4296  void EndObject();
4297 
4298  void BeginArray(bool singleLine = false);
4299  void EndArray();
4300 
4301  void WriteString(const char* pStr);
4302  void BeginString(const char* pStr = VMA_NULL);
4303  void ContinueString(const char* pStr);
4304  void ContinueString(uint32_t n);
4305  void ContinueString(uint64_t n);
4306  void ContinueString_Pointer(const void* ptr);
4307  void EndString(const char* pStr = VMA_NULL);
4308 
4309  void WriteNumber(uint32_t n);
4310  void WriteNumber(uint64_t n);
4311  void WriteBool(bool b);
4312  void WriteNull();
4313 
4314 private:
4315  static const char* const INDENT;
4316 
4317  enum COLLECTION_TYPE
4318  {
4319  COLLECTION_TYPE_OBJECT,
4320  COLLECTION_TYPE_ARRAY,
4321  };
4322  struct StackItem
4323  {
4324  COLLECTION_TYPE type;
4325  uint32_t valueCount;
4326  bool singleLineMode;
4327  };
4328 
4329  VmaStringBuilder& m_SB;
4330  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4331  bool m_InsideString;
4332 
4333  void BeginValue(bool isString);
4334  void WriteIndent(bool oneLess = false);
4335 };
4336 
4337 const char* const VmaJsonWriter::INDENT = " ";
4338 
4339 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4340  m_SB(sb),
4341  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4342  m_InsideString(false)
4343 {
4344 }
4345 
4346 VmaJsonWriter::~VmaJsonWriter()
4347 {
4348  VMA_ASSERT(!m_InsideString);
4349  VMA_ASSERT(m_Stack.empty());
4350 }
4351 
4352 void VmaJsonWriter::BeginObject(bool singleLine)
4353 {
4354  VMA_ASSERT(!m_InsideString);
4355 
4356  BeginValue(false);
4357  m_SB.Add('{');
4358 
4359  StackItem item;
4360  item.type = COLLECTION_TYPE_OBJECT;
4361  item.valueCount = 0;
4362  item.singleLineMode = singleLine;
4363  m_Stack.push_back(item);
4364 }
4365 
4366 void VmaJsonWriter::EndObject()
4367 {
4368  VMA_ASSERT(!m_InsideString);
4369 
4370  WriteIndent(true);
4371  m_SB.Add('}');
4372 
4373  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4374  m_Stack.pop_back();
4375 }
4376 
4377 void VmaJsonWriter::BeginArray(bool singleLine)
4378 {
4379  VMA_ASSERT(!m_InsideString);
4380 
4381  BeginValue(false);
4382  m_SB.Add('[');
4383 
4384  StackItem item;
4385  item.type = COLLECTION_TYPE_ARRAY;
4386  item.valueCount = 0;
4387  item.singleLineMode = singleLine;
4388  m_Stack.push_back(item);
4389 }
4390 
4391 void VmaJsonWriter::EndArray()
4392 {
4393  VMA_ASSERT(!m_InsideString);
4394 
4395  WriteIndent(true);
4396  m_SB.Add(']');
4397 
4398  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4399  m_Stack.pop_back();
4400 }
4401 
4402 void VmaJsonWriter::WriteString(const char* pStr)
4403 {
4404  BeginString(pStr);
4405  EndString();
4406 }
4407 
4408 void VmaJsonWriter::BeginString(const char* pStr)
4409 {
4410  VMA_ASSERT(!m_InsideString);
4411 
4412  BeginValue(true);
4413  m_SB.Add('"');
4414  m_InsideString = true;
4415  if(pStr != VMA_NULL && pStr[0] != '\0')
4416  {
4417  ContinueString(pStr);
4418  }
4419 }
4420 
4421 void VmaJsonWriter::ContinueString(const char* pStr)
4422 {
4423  VMA_ASSERT(m_InsideString);
4424 
4425  const size_t strLen = strlen(pStr);
4426  for(size_t i = 0; i < strLen; ++i)
4427  {
4428  char ch = pStr[i];
4429  if(ch == '\'')
4430  {
4431  m_SB.Add("\\\\");
4432  }
4433  else if(ch == '"')
4434  {
4435  m_SB.Add("\\\"");
4436  }
4437  else if(ch >= 32)
4438  {
4439  m_SB.Add(ch);
4440  }
4441  else switch(ch)
4442  {
4443  case '\b':
4444  m_SB.Add("\\b");
4445  break;
4446  case '\f':
4447  m_SB.Add("\\f");
4448  break;
4449  case '\n':
4450  m_SB.Add("\\n");
4451  break;
4452  case '\r':
4453  m_SB.Add("\\r");
4454  break;
4455  case '\t':
4456  m_SB.Add("\\t");
4457  break;
4458  default:
4459  VMA_ASSERT(0 && "Character not currently supported.");
4460  break;
4461  }
4462  }
4463 }
4464 
4465 void VmaJsonWriter::ContinueString(uint32_t n)
4466 {
4467  VMA_ASSERT(m_InsideString);
4468  m_SB.AddNumber(n);
4469 }
4470 
4471 void VmaJsonWriter::ContinueString(uint64_t n)
4472 {
4473  VMA_ASSERT(m_InsideString);
4474  m_SB.AddNumber(n);
4475 }
4476 
4477 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4478 {
4479  VMA_ASSERT(m_InsideString);
4480  m_SB.AddPointer(ptr);
4481 }
4482 
4483 void VmaJsonWriter::EndString(const char* pStr)
4484 {
4485  VMA_ASSERT(m_InsideString);
4486  if(pStr != VMA_NULL && pStr[0] != '\0')
4487  {
4488  ContinueString(pStr);
4489  }
4490  m_SB.Add('"');
4491  m_InsideString = false;
4492 }
4493 
4494 void VmaJsonWriter::WriteNumber(uint32_t n)
4495 {
4496  VMA_ASSERT(!m_InsideString);
4497  BeginValue(false);
4498  m_SB.AddNumber(n);
4499 }
4500 
4501 void VmaJsonWriter::WriteNumber(uint64_t n)
4502 {
4503  VMA_ASSERT(!m_InsideString);
4504  BeginValue(false);
4505  m_SB.AddNumber(n);
4506 }
4507 
4508 void VmaJsonWriter::WriteBool(bool b)
4509 {
4510  VMA_ASSERT(!m_InsideString);
4511  BeginValue(false);
4512  m_SB.Add(b ? "true" : "false");
4513 }
4514 
4515 void VmaJsonWriter::WriteNull()
4516 {
4517  VMA_ASSERT(!m_InsideString);
4518  BeginValue(false);
4519  m_SB.Add("null");
4520 }
4521 
4522 void VmaJsonWriter::BeginValue(bool isString)
4523 {
4524  if(!m_Stack.empty())
4525  {
4526  StackItem& currItem = m_Stack.back();
4527  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4528  currItem.valueCount % 2 == 0)
4529  {
4530  VMA_ASSERT(isString);
4531  }
4532 
4533  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4534  currItem.valueCount % 2 != 0)
4535  {
4536  m_SB.Add(": ");
4537  }
4538  else if(currItem.valueCount > 0)
4539  {
4540  m_SB.Add(", ");
4541  WriteIndent();
4542  }
4543  else
4544  {
4545  WriteIndent();
4546  }
4547  ++currItem.valueCount;
4548  }
4549 }
4550 
4551 void VmaJsonWriter::WriteIndent(bool oneLess)
4552 {
4553  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4554  {
4555  m_SB.AddNewLine();
4556 
4557  size_t count = m_Stack.size();
4558  if(count > 0 && oneLess)
4559  {
4560  --count;
4561  }
4562  for(size_t i = 0; i < count; ++i)
4563  {
4564  m_SB.Add(INDENT);
4565  }
4566  }
4567 }
4568 
4569 #endif // #if VMA_STATS_STRING_ENABLED
4570 
4572 
4573 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4574 {
4575  if(IsUserDataString())
4576  {
4577  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4578 
4579  FreeUserDataString(hAllocator);
4580 
4581  if(pUserData != VMA_NULL)
4582  {
4583  const char* const newStrSrc = (char*)pUserData;
4584  const size_t newStrLen = strlen(newStrSrc);
4585  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4586  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4587  m_pUserData = newStrDst;
4588  }
4589  }
4590  else
4591  {
4592  m_pUserData = pUserData;
4593  }
4594 }
4595 
4596 void VmaAllocation_T::ChangeBlockAllocation(
4597  VmaAllocator hAllocator,
4598  VmaDeviceMemoryBlock* block,
4599  VkDeviceSize offset)
4600 {
4601  VMA_ASSERT(block != VMA_NULL);
4602  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4603 
4604  // Move mapping reference counter from old block to new block.
4605  if(block != m_BlockAllocation.m_Block)
4606  {
4607  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4608  if(IsPersistentMap())
4609  ++mapRefCount;
4610  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4611  block->Map(hAllocator, mapRefCount, VMA_NULL);
4612  }
4613 
4614  m_BlockAllocation.m_Block = block;
4615  m_BlockAllocation.m_Offset = offset;
4616 }
4617 
4618 VkDeviceSize VmaAllocation_T::GetOffset() const
4619 {
4620  switch(m_Type)
4621  {
4622  case ALLOCATION_TYPE_BLOCK:
4623  return m_BlockAllocation.m_Offset;
4624  case ALLOCATION_TYPE_DEDICATED:
4625  return 0;
4626  default:
4627  VMA_ASSERT(0);
4628  return 0;
4629  }
4630 }
4631 
4632 VkDeviceMemory VmaAllocation_T::GetMemory() const
4633 {
4634  switch(m_Type)
4635  {
4636  case ALLOCATION_TYPE_BLOCK:
4637  return m_BlockAllocation.m_Block->m_hMemory;
4638  case ALLOCATION_TYPE_DEDICATED:
4639  return m_DedicatedAllocation.m_hMemory;
4640  default:
4641  VMA_ASSERT(0);
4642  return VK_NULL_HANDLE;
4643  }
4644 }
4645 
4646 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4647 {
4648  switch(m_Type)
4649  {
4650  case ALLOCATION_TYPE_BLOCK:
4651  return m_BlockAllocation.m_Block->m_MemoryTypeIndex;
4652  case ALLOCATION_TYPE_DEDICATED:
4653  return m_DedicatedAllocation.m_MemoryTypeIndex;
4654  default:
4655  VMA_ASSERT(0);
4656  return UINT32_MAX;
4657  }
4658 }
4659 
4660 void* VmaAllocation_T::GetMappedData() const
4661 {
4662  switch(m_Type)
4663  {
4664  case ALLOCATION_TYPE_BLOCK:
4665  if(m_MapCount != 0)
4666  {
4667  void* pBlockData = m_BlockAllocation.m_Block->m_Mapping.GetMappedData();
4668  VMA_ASSERT(pBlockData != VMA_NULL);
4669  return (char*)pBlockData + m_BlockAllocation.m_Offset;
4670  }
4671  else
4672  {
4673  return VMA_NULL;
4674  }
4675  break;
4676  case ALLOCATION_TYPE_DEDICATED:
4677  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
4678  return m_DedicatedAllocation.m_pMappedData;
4679  default:
4680  VMA_ASSERT(0);
4681  return VMA_NULL;
4682  }
4683 }
4684 
4685 bool VmaAllocation_T::CanBecomeLost() const
4686 {
4687  switch(m_Type)
4688  {
4689  case ALLOCATION_TYPE_BLOCK:
4690  return m_BlockAllocation.m_CanBecomeLost;
4691  case ALLOCATION_TYPE_DEDICATED:
4692  return false;
4693  default:
4694  VMA_ASSERT(0);
4695  return false;
4696  }
4697 }
4698 
4699 VmaPool VmaAllocation_T::GetPool() const
4700 {
4701  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4702  return m_BlockAllocation.m_hPool;
4703 }
4704 
4705 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
4706 {
4707  VMA_ASSERT(CanBecomeLost());
4708 
4709  /*
4710  Warning: This is a carefully designed algorithm.
4711  Do not modify unless you really know what you're doing :)
4712  */
4713  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
4714  for(;;)
4715  {
4716  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
4717  {
4718  VMA_ASSERT(0);
4719  return false;
4720  }
4721  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
4722  {
4723  return false;
4724  }
4725  else // Last use time earlier than current time.
4726  {
4727  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
4728  {
4729  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
4730  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
4731  return true;
4732  }
4733  }
4734  }
4735 }
4736 
4737 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
4738 {
4739  VMA_ASSERT(IsUserDataString());
4740  if(m_pUserData != VMA_NULL)
4741  {
4742  char* const oldStr = (char*)m_pUserData;
4743  const size_t oldStrLen = strlen(oldStr);
4744  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
4745  m_pUserData = VMA_NULL;
4746  }
4747 }
4748 
4749 void VmaAllocation_T::BlockAllocMap()
4750 {
4751  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4752 
4753  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4754  {
4755  ++m_MapCount;
4756  }
4757  else
4758  {
4759  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
4760  }
4761 }
4762 
4763 void VmaAllocation_T::BlockAllocUnmap()
4764 {
4765  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
4766 
4767  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4768  {
4769  --m_MapCount;
4770  }
4771  else
4772  {
4773  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
4774  }
4775 }
4776 
4777 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
4778 {
4779  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4780 
4781  if(m_MapCount != 0)
4782  {
4783  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
4784  {
4785  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
4786  *ppData = m_DedicatedAllocation.m_pMappedData;
4787  ++m_MapCount;
4788  return VK_SUCCESS;
4789  }
4790  else
4791  {
4792  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
4793  return VK_ERROR_MEMORY_MAP_FAILED;
4794  }
4795  }
4796  else
4797  {
4798  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
4799  hAllocator->m_hDevice,
4800  m_DedicatedAllocation.m_hMemory,
4801  0, // offset
4802  VK_WHOLE_SIZE,
4803  0, // flags
4804  ppData);
4805  if(result == VK_SUCCESS)
4806  {
4807  m_DedicatedAllocation.m_pMappedData = *ppData;
4808  m_MapCount = 1;
4809  }
4810  return result;
4811  }
4812 }
4813 
4814 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
4815 {
4816  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
4817 
4818  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
4819  {
4820  --m_MapCount;
4821  if(m_MapCount == 0)
4822  {
4823  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
4824  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
4825  hAllocator->m_hDevice,
4826  m_DedicatedAllocation.m_hMemory);
4827  }
4828  }
4829  else
4830  {
4831  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
4832  }
4833 }
4834 
4835 #if VMA_STATS_STRING_ENABLED
4836 
4837 // Correspond to values of enum VmaSuballocationType.
4838 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
4839  "FREE",
4840  "UNKNOWN",
4841  "BUFFER",
4842  "IMAGE_UNKNOWN",
4843  "IMAGE_LINEAR",
4844  "IMAGE_OPTIMAL",
4845 };
4846 
4847 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
4848 {
4849  json.BeginObject();
4850 
4851  json.WriteString("Blocks");
4852  json.WriteNumber(stat.blockCount);
4853 
4854  json.WriteString("Allocations");
4855  json.WriteNumber(stat.allocationCount);
4856 
4857  json.WriteString("UnusedRanges");
4858  json.WriteNumber(stat.unusedRangeCount);
4859 
4860  json.WriteString("UsedBytes");
4861  json.WriteNumber(stat.usedBytes);
4862 
4863  json.WriteString("UnusedBytes");
4864  json.WriteNumber(stat.unusedBytes);
4865 
4866  if(stat.allocationCount > 1)
4867  {
4868  json.WriteString("AllocationSize");
4869  json.BeginObject(true);
4870  json.WriteString("Min");
4871  json.WriteNumber(stat.allocationSizeMin);
4872  json.WriteString("Avg");
4873  json.WriteNumber(stat.allocationSizeAvg);
4874  json.WriteString("Max");
4875  json.WriteNumber(stat.allocationSizeMax);
4876  json.EndObject();
4877  }
4878 
4879  if(stat.unusedRangeCount > 1)
4880  {
4881  json.WriteString("UnusedRangeSize");
4882  json.BeginObject(true);
4883  json.WriteString("Min");
4884  json.WriteNumber(stat.unusedRangeSizeMin);
4885  json.WriteString("Avg");
4886  json.WriteNumber(stat.unusedRangeSizeAvg);
4887  json.WriteString("Max");
4888  json.WriteNumber(stat.unusedRangeSizeMax);
4889  json.EndObject();
4890  }
4891 
4892  json.EndObject();
4893 }
4894 
4895 #endif // #if VMA_STATS_STRING_ENABLED
4896 
4897 struct VmaSuballocationItemSizeLess
4898 {
4899  bool operator()(
4900  const VmaSuballocationList::iterator lhs,
4901  const VmaSuballocationList::iterator rhs) const
4902  {
4903  return lhs->size < rhs->size;
4904  }
4905  bool operator()(
4906  const VmaSuballocationList::iterator lhs,
4907  VkDeviceSize rhsSize) const
4908  {
4909  return lhs->size < rhsSize;
4910  }
4911 };
4912 
4914 // class VmaBlockMetadata
4915 
4916 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
4917  m_Size(0),
4918  m_FreeCount(0),
4919  m_SumFreeSize(0),
4920  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
4921  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
4922 {
4923 }
4924 
4925 VmaBlockMetadata::~VmaBlockMetadata()
4926 {
4927 }
4928 
4929 void VmaBlockMetadata::Init(VkDeviceSize size)
4930 {
4931  m_Size = size;
4932  m_FreeCount = 1;
4933  m_SumFreeSize = size;
4934 
4935  VmaSuballocation suballoc = {};
4936  suballoc.offset = 0;
4937  suballoc.size = size;
4938  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
4939  suballoc.hAllocation = VK_NULL_HANDLE;
4940 
4941  m_Suballocations.push_back(suballoc);
4942  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
4943  --suballocItem;
4944  m_FreeSuballocationsBySize.push_back(suballocItem);
4945 }
4946 
4947 bool VmaBlockMetadata::Validate() const
4948 {
4949  if(m_Suballocations.empty())
4950  {
4951  return false;
4952  }
4953 
4954  // Expected offset of new suballocation as calculates from previous ones.
4955  VkDeviceSize calculatedOffset = 0;
4956  // Expected number of free suballocations as calculated from traversing their list.
4957  uint32_t calculatedFreeCount = 0;
4958  // Expected sum size of free suballocations as calculated from traversing their list.
4959  VkDeviceSize calculatedSumFreeSize = 0;
4960  // Expected number of free suballocations that should be registered in
4961  // m_FreeSuballocationsBySize calculated from traversing their list.
4962  size_t freeSuballocationsToRegister = 0;
4963  // True if previous visisted suballocation was free.
4964  bool prevFree = false;
4965 
4966  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
4967  suballocItem != m_Suballocations.cend();
4968  ++suballocItem)
4969  {
4970  const VmaSuballocation& subAlloc = *suballocItem;
4971 
4972  // Actual offset of this suballocation doesn't match expected one.
4973  if(subAlloc.offset != calculatedOffset)
4974  {
4975  return false;
4976  }
4977 
4978  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
4979  // Two adjacent free suballocations are invalid. They should be merged.
4980  if(prevFree && currFree)
4981  {
4982  return false;
4983  }
4984 
4985  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
4986  {
4987  return false;
4988  }
4989 
4990  if(currFree)
4991  {
4992  calculatedSumFreeSize += subAlloc.size;
4993  ++calculatedFreeCount;
4994  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
4995  {
4996  ++freeSuballocationsToRegister;
4997  }
4998  }
4999  else
5000  {
5001  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5002  {
5003  return false;
5004  }
5005  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5006  {
5007  return false;
5008  }
5009  }
5010 
5011  calculatedOffset += subAlloc.size;
5012  prevFree = currFree;
5013  }
5014 
5015  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5016  // match expected one.
5017  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5018  {
5019  return false;
5020  }
5021 
5022  VkDeviceSize lastSize = 0;
5023  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5024  {
5025  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5026 
5027  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5028  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5029  {
5030  return false;
5031  }
5032  // They must be sorted by size ascending.
5033  if(suballocItem->size < lastSize)
5034  {
5035  return false;
5036  }
5037 
5038  lastSize = suballocItem->size;
5039  }
5040 
5041  // Check if totals match calculacted values.
5042  if(!ValidateFreeSuballocationList() ||
5043  (calculatedOffset != m_Size) ||
5044  (calculatedSumFreeSize != m_SumFreeSize) ||
5045  (calculatedFreeCount != m_FreeCount))
5046  {
5047  return false;
5048  }
5049 
5050  return true;
5051 }
5052 
5053 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5054 {
5055  if(!m_FreeSuballocationsBySize.empty())
5056  {
5057  return m_FreeSuballocationsBySize.back()->size;
5058  }
5059  else
5060  {
5061  return 0;
5062  }
5063 }
5064 
5065 bool VmaBlockMetadata::IsEmpty() const
5066 {
5067  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5068 }
5069 
5070 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5071 {
5072  outInfo.blockCount = 1;
5073 
5074  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5075  outInfo.allocationCount = rangeCount - m_FreeCount;
5076  outInfo.unusedRangeCount = m_FreeCount;
5077 
5078  outInfo.unusedBytes = m_SumFreeSize;
5079  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5080 
5081  outInfo.allocationSizeMin = UINT64_MAX;
5082  outInfo.allocationSizeMax = 0;
5083  outInfo.unusedRangeSizeMin = UINT64_MAX;
5084  outInfo.unusedRangeSizeMax = 0;
5085 
5086  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5087  suballocItem != m_Suballocations.cend();
5088  ++suballocItem)
5089  {
5090  const VmaSuballocation& suballoc = *suballocItem;
5091  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5092  {
5093  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5094  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5095  }
5096  else
5097  {
5098  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5099  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5100  }
5101  }
5102 }
5103 
5104 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5105 {
5106  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5107 
5108  inoutStats.size += m_Size;
5109  inoutStats.unusedSize += m_SumFreeSize;
5110  inoutStats.allocationCount += rangeCount - m_FreeCount;
5111  inoutStats.unusedRangeCount += m_FreeCount;
5112  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5113 }
5114 
5115 #if VMA_STATS_STRING_ENABLED
5116 
5117 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5118 {
5119  json.BeginObject();
5120 
5121  json.WriteString("TotalBytes");
5122  json.WriteNumber(m_Size);
5123 
5124  json.WriteString("UnusedBytes");
5125  json.WriteNumber(m_SumFreeSize);
5126 
5127  json.WriteString("Allocations");
5128  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5129 
5130  json.WriteString("UnusedRanges");
5131  json.WriteNumber(m_FreeCount);
5132 
5133  json.WriteString("Suballocations");
5134  json.BeginArray();
5135  size_t i = 0;
5136  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5137  suballocItem != m_Suballocations.cend();
5138  ++suballocItem, ++i)
5139  {
5140  json.BeginObject(true);
5141 
5142  json.WriteString("Type");
5143  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5144 
5145  json.WriteString("Size");
5146  json.WriteNumber(suballocItem->size);
5147 
5148  json.WriteString("Offset");
5149  json.WriteNumber(suballocItem->offset);
5150 
5151  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5152  {
5153  const void* pUserData = suballocItem->hAllocation->GetUserData();
5154  if(pUserData != VMA_NULL)
5155  {
5156  json.WriteString("UserData");
5157  if(suballocItem->hAllocation->IsUserDataString())
5158  {
5159  json.WriteString((const char*)pUserData);
5160  }
5161  else
5162  {
5163  json.BeginString();
5164  json.ContinueString_Pointer(pUserData);
5165  json.EndString();
5166  }
5167  }
5168  }
5169 
5170  json.EndObject();
5171  }
5172  json.EndArray();
5173 
5174  json.EndObject();
5175 }
5176 
5177 #endif // #if VMA_STATS_STRING_ENABLED
5178 
5179 /*
5180 How many suitable free suballocations to analyze before choosing best one.
5181 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5182  be chosen.
5183 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5184  suballocations will be analized and best one will be chosen.
5185 - Any other value is also acceptable.
5186 */
5187 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5188 
5189 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5190 {
5191  VMA_ASSERT(IsEmpty());
5192  pAllocationRequest->offset = 0;
5193  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5194  pAllocationRequest->sumItemSize = 0;
5195  pAllocationRequest->item = m_Suballocations.begin();
5196  pAllocationRequest->itemsToMakeLostCount = 0;
5197 }
5198 
5199 bool VmaBlockMetadata::CreateAllocationRequest(
5200  uint32_t currentFrameIndex,
5201  uint32_t frameInUseCount,
5202  VkDeviceSize bufferImageGranularity,
5203  VkDeviceSize allocSize,
5204  VkDeviceSize allocAlignment,
5205  VmaSuballocationType allocType,
5206  bool canMakeOtherLost,
5207  VmaAllocationRequest* pAllocationRequest)
5208 {
5209  VMA_ASSERT(allocSize > 0);
5210  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5211  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5212  VMA_HEAVY_ASSERT(Validate());
5213 
5214  // There is not enough total free space in this block to fullfill the request: Early return.
5215  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5216  {
5217  return false;
5218  }
5219 
5220  // New algorithm, efficiently searching freeSuballocationsBySize.
5221  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5222  if(freeSuballocCount > 0)
5223  {
5224  if(VMA_BEST_FIT)
5225  {
5226  // Find first free suballocation with size not less than allocSize.
5227  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5228  m_FreeSuballocationsBySize.data(),
5229  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5230  allocSize,
5231  VmaSuballocationItemSizeLess());
5232  size_t index = it - m_FreeSuballocationsBySize.data();
5233  for(; index < freeSuballocCount; ++index)
5234  {
5235  if(CheckAllocation(
5236  currentFrameIndex,
5237  frameInUseCount,
5238  bufferImageGranularity,
5239  allocSize,
5240  allocAlignment,
5241  allocType,
5242  m_FreeSuballocationsBySize[index],
5243  false, // canMakeOtherLost
5244  &pAllocationRequest->offset,
5245  &pAllocationRequest->itemsToMakeLostCount,
5246  &pAllocationRequest->sumFreeSize,
5247  &pAllocationRequest->sumItemSize))
5248  {
5249  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5250  return true;
5251  }
5252  }
5253  }
5254  else
5255  {
5256  // Search staring from biggest suballocations.
5257  for(size_t index = freeSuballocCount; index--; )
5258  {
5259  if(CheckAllocation(
5260  currentFrameIndex,
5261  frameInUseCount,
5262  bufferImageGranularity,
5263  allocSize,
5264  allocAlignment,
5265  allocType,
5266  m_FreeSuballocationsBySize[index],
5267  false, // canMakeOtherLost
5268  &pAllocationRequest->offset,
5269  &pAllocationRequest->itemsToMakeLostCount,
5270  &pAllocationRequest->sumFreeSize,
5271  &pAllocationRequest->sumItemSize))
5272  {
5273  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5274  return true;
5275  }
5276  }
5277  }
5278  }
5279 
5280  if(canMakeOtherLost)
5281  {
5282  // Brute-force algorithm. TODO: Come up with something better.
5283 
5284  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5285  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5286 
5287  VmaAllocationRequest tmpAllocRequest = {};
5288  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5289  suballocIt != m_Suballocations.end();
5290  ++suballocIt)
5291  {
5292  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5293  suballocIt->hAllocation->CanBecomeLost())
5294  {
5295  if(CheckAllocation(
5296  currentFrameIndex,
5297  frameInUseCount,
5298  bufferImageGranularity,
5299  allocSize,
5300  allocAlignment,
5301  allocType,
5302  suballocIt,
5303  canMakeOtherLost,
5304  &tmpAllocRequest.offset,
5305  &tmpAllocRequest.itemsToMakeLostCount,
5306  &tmpAllocRequest.sumFreeSize,
5307  &tmpAllocRequest.sumItemSize))
5308  {
5309  tmpAllocRequest.item = suballocIt;
5310 
5311  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5312  {
5313  *pAllocationRequest = tmpAllocRequest;
5314  }
5315  }
5316  }
5317  }
5318 
5319  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5320  {
5321  return true;
5322  }
5323  }
5324 
5325  return false;
5326 }
5327 
5328 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5329  uint32_t currentFrameIndex,
5330  uint32_t frameInUseCount,
5331  VmaAllocationRequest* pAllocationRequest)
5332 {
5333  while(pAllocationRequest->itemsToMakeLostCount > 0)
5334  {
5335  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5336  {
5337  ++pAllocationRequest->item;
5338  }
5339  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5340  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5341  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5342  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5343  {
5344  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5345  --pAllocationRequest->itemsToMakeLostCount;
5346  }
5347  else
5348  {
5349  return false;
5350  }
5351  }
5352 
5353  VMA_HEAVY_ASSERT(Validate());
5354  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5355  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5356 
5357  return true;
5358 }
5359 
5360 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5361 {
5362  uint32_t lostAllocationCount = 0;
5363  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5364  it != m_Suballocations.end();
5365  ++it)
5366  {
5367  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5368  it->hAllocation->CanBecomeLost() &&
5369  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5370  {
5371  it = FreeSuballocation(it);
5372  ++lostAllocationCount;
5373  }
5374  }
5375  return lostAllocationCount;
5376 }
5377 
5378 void VmaBlockMetadata::Alloc(
5379  const VmaAllocationRequest& request,
5380  VmaSuballocationType type,
5381  VkDeviceSize allocSize,
5382  VmaAllocation hAllocation)
5383 {
5384  VMA_ASSERT(request.item != m_Suballocations.end());
5385  VmaSuballocation& suballoc = *request.item;
5386  // Given suballocation is a free block.
5387  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5388  // Given offset is inside this suballocation.
5389  VMA_ASSERT(request.offset >= suballoc.offset);
5390  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5391  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5392  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5393 
5394  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5395  // it to become used.
5396  UnregisterFreeSuballocation(request.item);
5397 
5398  suballoc.offset = request.offset;
5399  suballoc.size = allocSize;
5400  suballoc.type = type;
5401  suballoc.hAllocation = hAllocation;
5402 
5403  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5404  if(paddingEnd)
5405  {
5406  VmaSuballocation paddingSuballoc = {};
5407  paddingSuballoc.offset = request.offset + allocSize;
5408  paddingSuballoc.size = paddingEnd;
5409  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5410  VmaSuballocationList::iterator next = request.item;
5411  ++next;
5412  const VmaSuballocationList::iterator paddingEndItem =
5413  m_Suballocations.insert(next, paddingSuballoc);
5414  RegisterFreeSuballocation(paddingEndItem);
5415  }
5416 
5417  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5418  if(paddingBegin)
5419  {
5420  VmaSuballocation paddingSuballoc = {};
5421  paddingSuballoc.offset = request.offset - paddingBegin;
5422  paddingSuballoc.size = paddingBegin;
5423  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5424  const VmaSuballocationList::iterator paddingBeginItem =
5425  m_Suballocations.insert(request.item, paddingSuballoc);
5426  RegisterFreeSuballocation(paddingBeginItem);
5427  }
5428 
5429  // Update totals.
5430  m_FreeCount = m_FreeCount - 1;
5431  if(paddingBegin > 0)
5432  {
5433  ++m_FreeCount;
5434  }
5435  if(paddingEnd > 0)
5436  {
5437  ++m_FreeCount;
5438  }
5439  m_SumFreeSize -= allocSize;
5440 }
5441 
5442 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5443 {
5444  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5445  suballocItem != m_Suballocations.end();
5446  ++suballocItem)
5447  {
5448  VmaSuballocation& suballoc = *suballocItem;
5449  if(suballoc.hAllocation == allocation)
5450  {
5451  FreeSuballocation(suballocItem);
5452  VMA_HEAVY_ASSERT(Validate());
5453  return;
5454  }
5455  }
5456  VMA_ASSERT(0 && "Not found!");
5457 }
5458 
5459 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5460 {
5461  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5462  suballocItem != m_Suballocations.end();
5463  ++suballocItem)
5464  {
5465  VmaSuballocation& suballoc = *suballocItem;
5466  if(suballoc.offset == offset)
5467  {
5468  FreeSuballocation(suballocItem);
5469  return;
5470  }
5471  }
5472  VMA_ASSERT(0 && "Not found!");
5473 }
5474 
5475 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5476 {
5477  VkDeviceSize lastSize = 0;
5478  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5479  {
5480  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5481 
5482  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5483  {
5484  VMA_ASSERT(0);
5485  return false;
5486  }
5487  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5488  {
5489  VMA_ASSERT(0);
5490  return false;
5491  }
5492  if(it->size < lastSize)
5493  {
5494  VMA_ASSERT(0);
5495  return false;
5496  }
5497 
5498  lastSize = it->size;
5499  }
5500  return true;
5501 }
5502 
5503 bool VmaBlockMetadata::CheckAllocation(
5504  uint32_t currentFrameIndex,
5505  uint32_t frameInUseCount,
5506  VkDeviceSize bufferImageGranularity,
5507  VkDeviceSize allocSize,
5508  VkDeviceSize allocAlignment,
5509  VmaSuballocationType allocType,
5510  VmaSuballocationList::const_iterator suballocItem,
5511  bool canMakeOtherLost,
5512  VkDeviceSize* pOffset,
5513  size_t* itemsToMakeLostCount,
5514  VkDeviceSize* pSumFreeSize,
5515  VkDeviceSize* pSumItemSize) const
5516 {
5517  VMA_ASSERT(allocSize > 0);
5518  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5519  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5520  VMA_ASSERT(pOffset != VMA_NULL);
5521 
5522  *itemsToMakeLostCount = 0;
5523  *pSumFreeSize = 0;
5524  *pSumItemSize = 0;
5525 
5526  if(canMakeOtherLost)
5527  {
5528  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5529  {
5530  *pSumFreeSize = suballocItem->size;
5531  }
5532  else
5533  {
5534  if(suballocItem->hAllocation->CanBecomeLost() &&
5535  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5536  {
5537  ++*itemsToMakeLostCount;
5538  *pSumItemSize = suballocItem->size;
5539  }
5540  else
5541  {
5542  return false;
5543  }
5544  }
5545 
5546  // Remaining size is too small for this request: Early return.
5547  if(m_Size - suballocItem->offset < allocSize)
5548  {
5549  return false;
5550  }
5551 
5552  // Start from offset equal to beginning of this suballocation.
5553  *pOffset = suballocItem->offset;
5554 
5555  // Apply VMA_DEBUG_MARGIN at the beginning.
5556  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5557  {
5558  *pOffset += VMA_DEBUG_MARGIN;
5559  }
5560 
5561  // Apply alignment.
5562  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5563  *pOffset = VmaAlignUp(*pOffset, alignment);
5564 
5565  // Check previous suballocations for BufferImageGranularity conflicts.
5566  // Make bigger alignment if necessary.
5567  if(bufferImageGranularity > 1)
5568  {
5569  bool bufferImageGranularityConflict = false;
5570  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5571  while(prevSuballocItem != m_Suballocations.cbegin())
5572  {
5573  --prevSuballocItem;
5574  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5575  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5576  {
5577  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5578  {
5579  bufferImageGranularityConflict = true;
5580  break;
5581  }
5582  }
5583  else
5584  // Already on previous page.
5585  break;
5586  }
5587  if(bufferImageGranularityConflict)
5588  {
5589  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5590  }
5591  }
5592 
5593  // Now that we have final *pOffset, check if we are past suballocItem.
5594  // If yes, return false - this function should be called for another suballocItem as starting point.
5595  if(*pOffset >= suballocItem->offset + suballocItem->size)
5596  {
5597  return false;
5598  }
5599 
5600  // Calculate padding at the beginning based on current offset.
5601  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5602 
5603  // Calculate required margin at the end if this is not last suballocation.
5604  VmaSuballocationList::const_iterator next = suballocItem;
5605  ++next;
5606  const VkDeviceSize requiredEndMargin =
5607  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5608 
5609  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5610  // Another early return check.
5611  if(suballocItem->offset + totalSize > m_Size)
5612  {
5613  return false;
5614  }
5615 
5616  // Advance lastSuballocItem until desired size is reached.
5617  // Update itemsToMakeLostCount.
5618  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5619  if(totalSize > suballocItem->size)
5620  {
5621  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5622  while(remainingSize > 0)
5623  {
5624  ++lastSuballocItem;
5625  if(lastSuballocItem == m_Suballocations.cend())
5626  {
5627  return false;
5628  }
5629  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5630  {
5631  *pSumFreeSize += lastSuballocItem->size;
5632  }
5633  else
5634  {
5635  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5636  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5637  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5638  {
5639  ++*itemsToMakeLostCount;
5640  *pSumItemSize += lastSuballocItem->size;
5641  }
5642  else
5643  {
5644  return false;
5645  }
5646  }
5647  remainingSize = (lastSuballocItem->size < remainingSize) ?
5648  remainingSize - lastSuballocItem->size : 0;
5649  }
5650  }
5651 
5652  // Check next suballocations for BufferImageGranularity conflicts.
5653  // If conflict exists, we must mark more allocations lost or fail.
5654  if(bufferImageGranularity > 1)
5655  {
5656  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
5657  ++nextSuballocItem;
5658  while(nextSuballocItem != m_Suballocations.cend())
5659  {
5660  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5661  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5662  {
5663  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5664  {
5665  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
5666  if(nextSuballoc.hAllocation->CanBecomeLost() &&
5667  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5668  {
5669  ++*itemsToMakeLostCount;
5670  }
5671  else
5672  {
5673  return false;
5674  }
5675  }
5676  }
5677  else
5678  {
5679  // Already on next page.
5680  break;
5681  }
5682  ++nextSuballocItem;
5683  }
5684  }
5685  }
5686  else
5687  {
5688  const VmaSuballocation& suballoc = *suballocItem;
5689  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5690 
5691  *pSumFreeSize = suballoc.size;
5692 
5693  // Size of this suballocation is too small for this request: Early return.
5694  if(suballoc.size < allocSize)
5695  {
5696  return false;
5697  }
5698 
5699  // Start from offset equal to beginning of this suballocation.
5700  *pOffset = suballoc.offset;
5701 
5702  // Apply VMA_DEBUG_MARGIN at the beginning.
5703  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5704  {
5705  *pOffset += VMA_DEBUG_MARGIN;
5706  }
5707 
5708  // Apply alignment.
5709  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5710  *pOffset = VmaAlignUp(*pOffset, alignment);
5711 
5712  // Check previous suballocations for BufferImageGranularity conflicts.
5713  // Make bigger alignment if necessary.
5714  if(bufferImageGranularity > 1)
5715  {
5716  bool bufferImageGranularityConflict = false;
5717  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5718  while(prevSuballocItem != m_Suballocations.cbegin())
5719  {
5720  --prevSuballocItem;
5721  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5722  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5723  {
5724  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5725  {
5726  bufferImageGranularityConflict = true;
5727  break;
5728  }
5729  }
5730  else
5731  // Already on previous page.
5732  break;
5733  }
5734  if(bufferImageGranularityConflict)
5735  {
5736  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5737  }
5738  }
5739 
5740  // Calculate padding at the beginning based on current offset.
5741  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
5742 
5743  // Calculate required margin at the end if this is not last suballocation.
5744  VmaSuballocationList::const_iterator next = suballocItem;
5745  ++next;
5746  const VkDeviceSize requiredEndMargin =
5747  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5748 
5749  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
5750  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
5751  {
5752  return false;
5753  }
5754 
5755  // Check next suballocations for BufferImageGranularity conflicts.
5756  // If conflict exists, allocation cannot be made here.
5757  if(bufferImageGranularity > 1)
5758  {
5759  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
5760  ++nextSuballocItem;
5761  while(nextSuballocItem != m_Suballocations.cend())
5762  {
5763  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
5764  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
5765  {
5766  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
5767  {
5768  return false;
5769  }
5770  }
5771  else
5772  {
5773  // Already on next page.
5774  break;
5775  }
5776  ++nextSuballocItem;
5777  }
5778  }
5779  }
5780 
5781  // All tests passed: Success. pOffset is already filled.
5782  return true;
5783 }
5784 
5785 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
5786 {
5787  VMA_ASSERT(item != m_Suballocations.end());
5788  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5789 
5790  VmaSuballocationList::iterator nextItem = item;
5791  ++nextItem;
5792  VMA_ASSERT(nextItem != m_Suballocations.end());
5793  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
5794 
5795  item->size += nextItem->size;
5796  --m_FreeCount;
5797  m_Suballocations.erase(nextItem);
5798 }
5799 
5800 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
5801 {
5802  // Change this suballocation to be marked as free.
5803  VmaSuballocation& suballoc = *suballocItem;
5804  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5805  suballoc.hAllocation = VK_NULL_HANDLE;
5806 
5807  // Update totals.
5808  ++m_FreeCount;
5809  m_SumFreeSize += suballoc.size;
5810 
5811  // Merge with previous and/or next suballocation if it's also free.
5812  bool mergeWithNext = false;
5813  bool mergeWithPrev = false;
5814 
5815  VmaSuballocationList::iterator nextItem = suballocItem;
5816  ++nextItem;
5817  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
5818  {
5819  mergeWithNext = true;
5820  }
5821 
5822  VmaSuballocationList::iterator prevItem = suballocItem;
5823  if(suballocItem != m_Suballocations.begin())
5824  {
5825  --prevItem;
5826  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5827  {
5828  mergeWithPrev = true;
5829  }
5830  }
5831 
5832  if(mergeWithNext)
5833  {
5834  UnregisterFreeSuballocation(nextItem);
5835  MergeFreeWithNext(suballocItem);
5836  }
5837 
5838  if(mergeWithPrev)
5839  {
5840  UnregisterFreeSuballocation(prevItem);
5841  MergeFreeWithNext(prevItem);
5842  RegisterFreeSuballocation(prevItem);
5843  return prevItem;
5844  }
5845  else
5846  {
5847  RegisterFreeSuballocation(suballocItem);
5848  return suballocItem;
5849  }
5850 }
5851 
5852 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
5853 {
5854  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5855  VMA_ASSERT(item->size > 0);
5856 
5857  // You may want to enable this validation at the beginning or at the end of
5858  // this function, depending on what do you want to check.
5859  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5860 
5861  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5862  {
5863  if(m_FreeSuballocationsBySize.empty())
5864  {
5865  m_FreeSuballocationsBySize.push_back(item);
5866  }
5867  else
5868  {
5869  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
5870  }
5871  }
5872 
5873  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5874 }
5875 
5876 
5877 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
5878 {
5879  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
5880  VMA_ASSERT(item->size > 0);
5881 
5882  // You may want to enable this validation at the beginning or at the end of
5883  // this function, depending on what do you want to check.
5884  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5885 
5886  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5887  {
5888  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5889  m_FreeSuballocationsBySize.data(),
5890  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
5891  item,
5892  VmaSuballocationItemSizeLess());
5893  for(size_t index = it - m_FreeSuballocationsBySize.data();
5894  index < m_FreeSuballocationsBySize.size();
5895  ++index)
5896  {
5897  if(m_FreeSuballocationsBySize[index] == item)
5898  {
5899  VmaVectorRemove(m_FreeSuballocationsBySize, index);
5900  return;
5901  }
5902  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
5903  }
5904  VMA_ASSERT(0 && "Not found.");
5905  }
5906 
5907  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
5908 }
5909 
5911 // class VmaDeviceMemoryMapping
5912 
5913 VmaDeviceMemoryMapping::VmaDeviceMemoryMapping() :
5914  m_MapCount(0),
5915  m_pMappedData(VMA_NULL)
5916 {
5917 }
5918 
5919 VmaDeviceMemoryMapping::~VmaDeviceMemoryMapping()
5920 {
5921  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5922 }
5923 
5924 VkResult VmaDeviceMemoryMapping::Map(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count, void **ppData)
5925 {
5926  if(count == 0)
5927  {
5928  return VK_SUCCESS;
5929  }
5930 
5931  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5932  if(m_MapCount != 0)
5933  {
5934  m_MapCount += count;
5935  VMA_ASSERT(m_pMappedData != VMA_NULL);
5936  if(ppData != VMA_NULL)
5937  {
5938  *ppData = m_pMappedData;
5939  }
5940  return VK_SUCCESS;
5941  }
5942  else
5943  {
5944  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5945  hAllocator->m_hDevice,
5946  hMemory,
5947  0, // offset
5948  VK_WHOLE_SIZE,
5949  0, // flags
5950  &m_pMappedData);
5951  if(result == VK_SUCCESS)
5952  {
5953  if(ppData != VMA_NULL)
5954  {
5955  *ppData = m_pMappedData;
5956  }
5957  m_MapCount = count;
5958  }
5959  return result;
5960  }
5961 }
5962 
5963 void VmaDeviceMemoryMapping::Unmap(VmaAllocator hAllocator, VkDeviceMemory hMemory, uint32_t count)
5964 {
5965  if(count == 0)
5966  {
5967  return;
5968  }
5969 
5970  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
5971  if(m_MapCount >= count)
5972  {
5973  m_MapCount -= count;
5974  if(m_MapCount == 0)
5975  {
5976  m_pMappedData = VMA_NULL;
5977  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, hMemory);
5978  }
5979  }
5980  else
5981  {
5982  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
5983  }
5984 }
5985 
5987 // class VmaDeviceMemoryBlock
5988 
5989 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
5990  m_MemoryTypeIndex(UINT32_MAX),
5991  m_hMemory(VK_NULL_HANDLE),
5992  m_Metadata(hAllocator)
5993 {
5994 }
5995 
5996 void VmaDeviceMemoryBlock::Init(
5997  uint32_t newMemoryTypeIndex,
5998  VkDeviceMemory newMemory,
5999  VkDeviceSize newSize)
6000 {
6001  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6002 
6003  m_MemoryTypeIndex = newMemoryTypeIndex;
6004  m_hMemory = newMemory;
6005 
6006  m_Metadata.Init(newSize);
6007 }
6008 
6009 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6010 {
6011  // This is the most important assert in the entire library.
6012  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6013  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6014 
6015  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6016  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6017  m_hMemory = VK_NULL_HANDLE;
6018 }
6019 
6020 bool VmaDeviceMemoryBlock::Validate() const
6021 {
6022  if((m_hMemory == VK_NULL_HANDLE) ||
6023  (m_Metadata.GetSize() == 0))
6024  {
6025  return false;
6026  }
6027 
6028  return m_Metadata.Validate();
6029 }
6030 
6031 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6032 {
6033  return m_Mapping.Map(hAllocator, m_hMemory, count, ppData);
6034 }
6035 
6036 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6037 {
6038  m_Mapping.Unmap(hAllocator, m_hMemory, count);
6039 }
6040 
6041 static void InitStatInfo(VmaStatInfo& outInfo)
6042 {
6043  memset(&outInfo, 0, sizeof(outInfo));
6044  outInfo.allocationSizeMin = UINT64_MAX;
6045  outInfo.unusedRangeSizeMin = UINT64_MAX;
6046 }
6047 
6048 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6049 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6050 {
6051  inoutInfo.blockCount += srcInfo.blockCount;
6052  inoutInfo.allocationCount += srcInfo.allocationCount;
6053  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6054  inoutInfo.usedBytes += srcInfo.usedBytes;
6055  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6056  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6057  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6058  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6059  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6060 }
6061 
6062 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6063 {
6064  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6065  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6066  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6067  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6068 }
6069 
6070 VmaPool_T::VmaPool_T(
6071  VmaAllocator hAllocator,
6072  const VmaPoolCreateInfo& createInfo) :
6073  m_BlockVector(
6074  hAllocator,
6075  createInfo.memoryTypeIndex,
6076  createInfo.blockSize,
6077  createInfo.minBlockCount,
6078  createInfo.maxBlockCount,
6079  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6080  createInfo.frameInUseCount,
6081  true) // isCustomPool
6082 {
6083 }
6084 
6085 VmaPool_T::~VmaPool_T()
6086 {
6087 }
6088 
6089 #if VMA_STATS_STRING_ENABLED
6090 
6091 #endif // #if VMA_STATS_STRING_ENABLED
6092 
6093 VmaBlockVector::VmaBlockVector(
6094  VmaAllocator hAllocator,
6095  uint32_t memoryTypeIndex,
6096  VkDeviceSize preferredBlockSize,
6097  size_t minBlockCount,
6098  size_t maxBlockCount,
6099  VkDeviceSize bufferImageGranularity,
6100  uint32_t frameInUseCount,
6101  bool isCustomPool) :
6102  m_hAllocator(hAllocator),
6103  m_MemoryTypeIndex(memoryTypeIndex),
6104  m_PreferredBlockSize(preferredBlockSize),
6105  m_MinBlockCount(minBlockCount),
6106  m_MaxBlockCount(maxBlockCount),
6107  m_BufferImageGranularity(bufferImageGranularity),
6108  m_FrameInUseCount(frameInUseCount),
6109  m_IsCustomPool(isCustomPool),
6110  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6111  m_HasEmptyBlock(false),
6112  m_pDefragmentator(VMA_NULL)
6113 {
6114 }
6115 
6116 VmaBlockVector::~VmaBlockVector()
6117 {
6118  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6119 
6120  for(size_t i = m_Blocks.size(); i--; )
6121  {
6122  m_Blocks[i]->Destroy(m_hAllocator);
6123  vma_delete(m_hAllocator, m_Blocks[i]);
6124  }
6125 }
6126 
6127 VkResult VmaBlockVector::CreateMinBlocks()
6128 {
6129  for(size_t i = 0; i < m_MinBlockCount; ++i)
6130  {
6131  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6132  if(res != VK_SUCCESS)
6133  {
6134  return res;
6135  }
6136  }
6137  return VK_SUCCESS;
6138 }
6139 
6140 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6141 {
6142  pStats->size = 0;
6143  pStats->unusedSize = 0;
6144  pStats->allocationCount = 0;
6145  pStats->unusedRangeCount = 0;
6146  pStats->unusedRangeSizeMax = 0;
6147 
6148  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6149 
6150  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6151  {
6152  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6153  VMA_ASSERT(pBlock);
6154  VMA_HEAVY_ASSERT(pBlock->Validate());
6155  pBlock->m_Metadata.AddPoolStats(*pStats);
6156  }
6157 }
6158 
6159 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6160 
6161 VkResult VmaBlockVector::Allocate(
6162  VmaPool hCurrentPool,
6163  uint32_t currentFrameIndex,
6164  const VkMemoryRequirements& vkMemReq,
6165  const VmaAllocationCreateInfo& createInfo,
6166  VmaSuballocationType suballocType,
6167  VmaAllocation* pAllocation)
6168 {
6169  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6170  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6171 
6172  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6173 
6174  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6175  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6176  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6177  {
6178  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6179  VMA_ASSERT(pCurrBlock);
6180  VmaAllocationRequest currRequest = {};
6181  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6182  currentFrameIndex,
6183  m_FrameInUseCount,
6184  m_BufferImageGranularity,
6185  vkMemReq.size,
6186  vkMemReq.alignment,
6187  suballocType,
6188  false, // canMakeOtherLost
6189  &currRequest))
6190  {
6191  // Allocate from pCurrBlock.
6192  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6193 
6194  if(mapped)
6195  {
6196  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6197  if(res != VK_SUCCESS)
6198  {
6199  return res;
6200  }
6201  }
6202 
6203  // We no longer have an empty Allocation.
6204  if(pCurrBlock->m_Metadata.IsEmpty())
6205  {
6206  m_HasEmptyBlock = false;
6207  }
6208 
6209  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6210  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6211  (*pAllocation)->InitBlockAllocation(
6212  hCurrentPool,
6213  pCurrBlock,
6214  currRequest.offset,
6215  vkMemReq.alignment,
6216  vkMemReq.size,
6217  suballocType,
6218  mapped,
6219  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6220  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6221  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6222  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6223  return VK_SUCCESS;
6224  }
6225  }
6226 
6227  const bool canCreateNewBlock =
6228  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6229  (m_Blocks.size() < m_MaxBlockCount);
6230 
6231  // 2. Try to create new block.
6232  if(canCreateNewBlock)
6233  {
6234  // Calculate optimal size for new block.
6235  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6236  uint32_t newBlockSizeShift = 0;
6237  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6238 
6239  // Allocating blocks of other sizes is allowed only in default pools.
6240  // In custom pools block size is fixed.
6241  if(m_IsCustomPool == false)
6242  {
6243  // Allocate 1/8, 1/4, 1/2 as first blocks.
6244  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6245  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6246  {
6247  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6248  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6249  {
6250  newBlockSize = smallerNewBlockSize;
6251  ++newBlockSizeShift;
6252  }
6253  else
6254  {
6255  break;
6256  }
6257  }
6258  }
6259 
6260  size_t newBlockIndex = 0;
6261  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6262  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6263  if(m_IsCustomPool == false)
6264  {
6265  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6266  {
6267  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6268  if(smallerNewBlockSize >= vkMemReq.size)
6269  {
6270  newBlockSize = smallerNewBlockSize;
6271  ++newBlockSizeShift;
6272  res = CreateBlock(newBlockSize, &newBlockIndex);
6273  }
6274  else
6275  {
6276  break;
6277  }
6278  }
6279  }
6280 
6281  if(res == VK_SUCCESS)
6282  {
6283  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6284  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6285 
6286  if(mapped)
6287  {
6288  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6289  if(res != VK_SUCCESS)
6290  {
6291  return res;
6292  }
6293  }
6294 
6295  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6296  VmaAllocationRequest allocRequest;
6297  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6298  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6299  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6300  (*pAllocation)->InitBlockAllocation(
6301  hCurrentPool,
6302  pBlock,
6303  allocRequest.offset,
6304  vkMemReq.alignment,
6305  vkMemReq.size,
6306  suballocType,
6307  mapped,
6308  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6309  VMA_HEAVY_ASSERT(pBlock->Validate());
6310  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6311  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6312  return VK_SUCCESS;
6313  }
6314  }
6315 
6316  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6317 
6318  // 3. Try to allocate from existing blocks with making other allocations lost.
6319  if(canMakeOtherLost)
6320  {
6321  uint32_t tryIndex = 0;
6322  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6323  {
6324  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6325  VmaAllocationRequest bestRequest = {};
6326  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6327 
6328  // 1. Search existing allocations.
6329  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6330  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6331  {
6332  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6333  VMA_ASSERT(pCurrBlock);
6334  VmaAllocationRequest currRequest = {};
6335  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6336  currentFrameIndex,
6337  m_FrameInUseCount,
6338  m_BufferImageGranularity,
6339  vkMemReq.size,
6340  vkMemReq.alignment,
6341  suballocType,
6342  canMakeOtherLost,
6343  &currRequest))
6344  {
6345  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6346  if(pBestRequestBlock == VMA_NULL ||
6347  currRequestCost < bestRequestCost)
6348  {
6349  pBestRequestBlock = pCurrBlock;
6350  bestRequest = currRequest;
6351  bestRequestCost = currRequestCost;
6352 
6353  if(bestRequestCost == 0)
6354  {
6355  break;
6356  }
6357  }
6358  }
6359  }
6360 
6361  if(pBestRequestBlock != VMA_NULL)
6362  {
6363  if(mapped)
6364  {
6365  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6366  if(res != VK_SUCCESS)
6367  {
6368  return res;
6369  }
6370  }
6371 
6372  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6373  currentFrameIndex,
6374  m_FrameInUseCount,
6375  &bestRequest))
6376  {
6377  // We no longer have an empty Allocation.
6378  if(pBestRequestBlock->m_Metadata.IsEmpty())
6379  {
6380  m_HasEmptyBlock = false;
6381  }
6382  // Allocate from this pBlock.
6383  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6384  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6385  (*pAllocation)->InitBlockAllocation(
6386  hCurrentPool,
6387  pBestRequestBlock,
6388  bestRequest.offset,
6389  vkMemReq.alignment,
6390  vkMemReq.size,
6391  suballocType,
6392  mapped,
6393  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6394  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6395  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6396  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6397  return VK_SUCCESS;
6398  }
6399  // else: Some allocations must have been touched while we are here. Next try.
6400  }
6401  else
6402  {
6403  // Could not find place in any of the blocks - break outer loop.
6404  break;
6405  }
6406  }
6407  /* Maximum number of tries exceeded - a very unlike event when many other
6408  threads are simultaneously touching allocations making it impossible to make
6409  lost at the same time as we try to allocate. */
6410  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6411  {
6412  return VK_ERROR_TOO_MANY_OBJECTS;
6413  }
6414  }
6415 
6416  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6417 }
6418 
6419 void VmaBlockVector::Free(
6420  VmaAllocation hAllocation)
6421 {
6422  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6423 
6424  // Scope for lock.
6425  {
6426  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6427 
6428  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6429 
6430  if(hAllocation->IsPersistentMap())
6431  {
6432  pBlock->m_Mapping.Unmap(m_hAllocator, pBlock->m_hMemory, 1);
6433  }
6434 
6435  pBlock->m_Metadata.Free(hAllocation);
6436  VMA_HEAVY_ASSERT(pBlock->Validate());
6437 
6438  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6439 
6440  // pBlock became empty after this deallocation.
6441  if(pBlock->m_Metadata.IsEmpty())
6442  {
6443  // Already has empty Allocation. We don't want to have two, so delete this one.
6444  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6445  {
6446  pBlockToDelete = pBlock;
6447  Remove(pBlock);
6448  }
6449  // We now have first empty Allocation.
6450  else
6451  {
6452  m_HasEmptyBlock = true;
6453  }
6454  }
6455  // pBlock didn't become empty, but we have another empty block - find and free that one.
6456  // (This is optional, heuristics.)
6457  else if(m_HasEmptyBlock)
6458  {
6459  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6460  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6461  {
6462  pBlockToDelete = pLastBlock;
6463  m_Blocks.pop_back();
6464  m_HasEmptyBlock = false;
6465  }
6466  }
6467 
6468  IncrementallySortBlocks();
6469  }
6470 
6471  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6472  // lock, for performance reason.
6473  if(pBlockToDelete != VMA_NULL)
6474  {
6475  VMA_DEBUG_LOG(" Deleted empty allocation");
6476  pBlockToDelete->Destroy(m_hAllocator);
6477  vma_delete(m_hAllocator, pBlockToDelete);
6478  }
6479 }
6480 
6481 size_t VmaBlockVector::CalcMaxBlockSize() const
6482 {
6483  size_t result = 0;
6484  for(size_t i = m_Blocks.size(); i--; )
6485  {
6486  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6487  if(result >= m_PreferredBlockSize)
6488  {
6489  break;
6490  }
6491  }
6492  return result;
6493 }
6494 
6495 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6496 {
6497  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6498  {
6499  if(m_Blocks[blockIndex] == pBlock)
6500  {
6501  VmaVectorRemove(m_Blocks, blockIndex);
6502  return;
6503  }
6504  }
6505  VMA_ASSERT(0);
6506 }
6507 
6508 void VmaBlockVector::IncrementallySortBlocks()
6509 {
6510  // Bubble sort only until first swap.
6511  for(size_t i = 1; i < m_Blocks.size(); ++i)
6512  {
6513  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6514  {
6515  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6516  return;
6517  }
6518  }
6519 }
6520 
6521 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6522 {
6523  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6524  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6525  allocInfo.allocationSize = blockSize;
6526  VkDeviceMemory mem = VK_NULL_HANDLE;
6527  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6528  if(res < 0)
6529  {
6530  return res;
6531  }
6532 
6533  // New VkDeviceMemory successfully created.
6534 
6535  // Create new Allocation for it.
6536  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6537  pBlock->Init(
6538  m_MemoryTypeIndex,
6539  mem,
6540  allocInfo.allocationSize);
6541 
6542  m_Blocks.push_back(pBlock);
6543  if(pNewBlockIndex != VMA_NULL)
6544  {
6545  *pNewBlockIndex = m_Blocks.size() - 1;
6546  }
6547 
6548  return VK_SUCCESS;
6549 }
6550 
6551 #if VMA_STATS_STRING_ENABLED
6552 
6553 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6554 {
6555  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6556 
6557  json.BeginObject();
6558 
6559  if(m_IsCustomPool)
6560  {
6561  json.WriteString("MemoryTypeIndex");
6562  json.WriteNumber(m_MemoryTypeIndex);
6563 
6564  json.WriteString("BlockSize");
6565  json.WriteNumber(m_PreferredBlockSize);
6566 
6567  json.WriteString("BlockCount");
6568  json.BeginObject(true);
6569  if(m_MinBlockCount > 0)
6570  {
6571  json.WriteString("Min");
6572  json.WriteNumber((uint64_t)m_MinBlockCount);
6573  }
6574  if(m_MaxBlockCount < SIZE_MAX)
6575  {
6576  json.WriteString("Max");
6577  json.WriteNumber((uint64_t)m_MaxBlockCount);
6578  }
6579  json.WriteString("Cur");
6580  json.WriteNumber((uint64_t)m_Blocks.size());
6581  json.EndObject();
6582 
6583  if(m_FrameInUseCount > 0)
6584  {
6585  json.WriteString("FrameInUseCount");
6586  json.WriteNumber(m_FrameInUseCount);
6587  }
6588  }
6589  else
6590  {
6591  json.WriteString("PreferredBlockSize");
6592  json.WriteNumber(m_PreferredBlockSize);
6593  }
6594 
6595  json.WriteString("Blocks");
6596  json.BeginArray();
6597  for(size_t i = 0; i < m_Blocks.size(); ++i)
6598  {
6599  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6600  }
6601  json.EndArray();
6602 
6603  json.EndObject();
6604 }
6605 
6606 #endif // #if VMA_STATS_STRING_ENABLED
6607 
6608 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6609  VmaAllocator hAllocator,
6610  uint32_t currentFrameIndex)
6611 {
6612  if(m_pDefragmentator == VMA_NULL)
6613  {
6614  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6615  hAllocator,
6616  this,
6617  currentFrameIndex);
6618  }
6619 
6620  return m_pDefragmentator;
6621 }
6622 
6623 VkResult VmaBlockVector::Defragment(
6624  VmaDefragmentationStats* pDefragmentationStats,
6625  VkDeviceSize& maxBytesToMove,
6626  uint32_t& maxAllocationsToMove)
6627 {
6628  if(m_pDefragmentator == VMA_NULL)
6629  {
6630  return VK_SUCCESS;
6631  }
6632 
6633  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6634 
6635  // Defragment.
6636  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6637 
6638  // Accumulate statistics.
6639  if(pDefragmentationStats != VMA_NULL)
6640  {
6641  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
6642  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
6643  pDefragmentationStats->bytesMoved += bytesMoved;
6644  pDefragmentationStats->allocationsMoved += allocationsMoved;
6645  VMA_ASSERT(bytesMoved <= maxBytesToMove);
6646  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
6647  maxBytesToMove -= bytesMoved;
6648  maxAllocationsToMove -= allocationsMoved;
6649  }
6650 
6651  // Free empty blocks.
6652  m_HasEmptyBlock = false;
6653  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
6654  {
6655  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
6656  if(pBlock->m_Metadata.IsEmpty())
6657  {
6658  if(m_Blocks.size() > m_MinBlockCount)
6659  {
6660  if(pDefragmentationStats != VMA_NULL)
6661  {
6662  ++pDefragmentationStats->deviceMemoryBlocksFreed;
6663  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
6664  }
6665 
6666  VmaVectorRemove(m_Blocks, blockIndex);
6667  pBlock->Destroy(m_hAllocator);
6668  vma_delete(m_hAllocator, pBlock);
6669  }
6670  else
6671  {
6672  m_HasEmptyBlock = true;
6673  }
6674  }
6675  }
6676 
6677  return result;
6678 }
6679 
6680 void VmaBlockVector::DestroyDefragmentator()
6681 {
6682  if(m_pDefragmentator != VMA_NULL)
6683  {
6684  vma_delete(m_hAllocator, m_pDefragmentator);
6685  m_pDefragmentator = VMA_NULL;
6686  }
6687 }
6688 
6689 void VmaBlockVector::MakePoolAllocationsLost(
6690  uint32_t currentFrameIndex,
6691  size_t* pLostAllocationCount)
6692 {
6693  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6694  size_t lostAllocationCount = 0;
6695  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6696  {
6697  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6698  VMA_ASSERT(pBlock);
6699  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
6700  }
6701  if(pLostAllocationCount != VMA_NULL)
6702  {
6703  *pLostAllocationCount = lostAllocationCount;
6704  }
6705 }
6706 
6707 void VmaBlockVector::AddStats(VmaStats* pStats)
6708 {
6709  const uint32_t memTypeIndex = m_MemoryTypeIndex;
6710  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
6711 
6712  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6713 
6714  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6715  {
6716  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6717  VMA_ASSERT(pBlock);
6718  VMA_HEAVY_ASSERT(pBlock->Validate());
6719  VmaStatInfo allocationStatInfo;
6720  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
6721  VmaAddStatInfo(pStats->total, allocationStatInfo);
6722  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
6723  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
6724  }
6725 }
6726 
6728 // VmaDefragmentator members definition
6729 
6730 VmaDefragmentator::VmaDefragmentator(
6731  VmaAllocator hAllocator,
6732  VmaBlockVector* pBlockVector,
6733  uint32_t currentFrameIndex) :
6734  m_hAllocator(hAllocator),
6735  m_pBlockVector(pBlockVector),
6736  m_CurrentFrameIndex(currentFrameIndex),
6737  m_BytesMoved(0),
6738  m_AllocationsMoved(0),
6739  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
6740  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
6741 {
6742 }
6743 
6744 VmaDefragmentator::~VmaDefragmentator()
6745 {
6746  for(size_t i = m_Blocks.size(); i--; )
6747  {
6748  vma_delete(m_hAllocator, m_Blocks[i]);
6749  }
6750 }
6751 
6752 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
6753 {
6754  AllocationInfo allocInfo;
6755  allocInfo.m_hAllocation = hAlloc;
6756  allocInfo.m_pChanged = pChanged;
6757  m_Allocations.push_back(allocInfo);
6758 }
6759 
6760 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
6761 {
6762  // It has already been mapped for defragmentation.
6763  if(m_pMappedDataForDefragmentation)
6764  {
6765  *ppMappedData = m_pMappedDataForDefragmentation;
6766  return VK_SUCCESS;
6767  }
6768 
6769  // It is originally mapped.
6770  if(m_pBlock->m_Mapping.GetMappedData())
6771  {
6772  *ppMappedData = m_pBlock->m_Mapping.GetMappedData();
6773  return VK_SUCCESS;
6774  }
6775 
6776  // Map on first usage.
6777  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
6778  *ppMappedData = m_pMappedDataForDefragmentation;
6779  return res;
6780 }
6781 
6782 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
6783 {
6784  if(m_pMappedDataForDefragmentation != VMA_NULL)
6785  {
6786  m_pBlock->Unmap(hAllocator, 1);
6787  }
6788 }
6789 
6790 VkResult VmaDefragmentator::DefragmentRound(
6791  VkDeviceSize maxBytesToMove,
6792  uint32_t maxAllocationsToMove)
6793 {
6794  if(m_Blocks.empty())
6795  {
6796  return VK_SUCCESS;
6797  }
6798 
6799  size_t srcBlockIndex = m_Blocks.size() - 1;
6800  size_t srcAllocIndex = SIZE_MAX;
6801  for(;;)
6802  {
6803  // 1. Find next allocation to move.
6804  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
6805  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
6806  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
6807  {
6808  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
6809  {
6810  // Finished: no more allocations to process.
6811  if(srcBlockIndex == 0)
6812  {
6813  return VK_SUCCESS;
6814  }
6815  else
6816  {
6817  --srcBlockIndex;
6818  srcAllocIndex = SIZE_MAX;
6819  }
6820  }
6821  else
6822  {
6823  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
6824  }
6825  }
6826 
6827  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
6828  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
6829 
6830  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
6831  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
6832  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
6833  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
6834 
6835  // 2. Try to find new place for this allocation in preceding or current block.
6836  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
6837  {
6838  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
6839  VmaAllocationRequest dstAllocRequest;
6840  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
6841  m_CurrentFrameIndex,
6842  m_pBlockVector->GetFrameInUseCount(),
6843  m_pBlockVector->GetBufferImageGranularity(),
6844  size,
6845  alignment,
6846  suballocType,
6847  false, // canMakeOtherLost
6848  &dstAllocRequest) &&
6849  MoveMakesSense(
6850  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
6851  {
6852  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
6853 
6854  // Reached limit on number of allocations or bytes to move.
6855  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
6856  (m_BytesMoved + size > maxBytesToMove))
6857  {
6858  return VK_INCOMPLETE;
6859  }
6860 
6861  void* pDstMappedData = VMA_NULL;
6862  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
6863  if(res != VK_SUCCESS)
6864  {
6865  return res;
6866  }
6867 
6868  void* pSrcMappedData = VMA_NULL;
6869  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
6870  if(res != VK_SUCCESS)
6871  {
6872  return res;
6873  }
6874 
6875  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
6876  memcpy(
6877  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
6878  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
6879  static_cast<size_t>(size));
6880 
6881  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
6882  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
6883 
6884  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
6885 
6886  if(allocInfo.m_pChanged != VMA_NULL)
6887  {
6888  *allocInfo.m_pChanged = VK_TRUE;
6889  }
6890 
6891  ++m_AllocationsMoved;
6892  m_BytesMoved += size;
6893 
6894  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
6895 
6896  break;
6897  }
6898  }
6899 
6900  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
6901 
6902  if(srcAllocIndex > 0)
6903  {
6904  --srcAllocIndex;
6905  }
6906  else
6907  {
6908  if(srcBlockIndex > 0)
6909  {
6910  --srcBlockIndex;
6911  srcAllocIndex = SIZE_MAX;
6912  }
6913  else
6914  {
6915  return VK_SUCCESS;
6916  }
6917  }
6918  }
6919 }
6920 
6921 VkResult VmaDefragmentator::Defragment(
6922  VkDeviceSize maxBytesToMove,
6923  uint32_t maxAllocationsToMove)
6924 {
6925  if(m_Allocations.empty())
6926  {
6927  return VK_SUCCESS;
6928  }
6929 
6930  // Create block info for each block.
6931  const size_t blockCount = m_pBlockVector->m_Blocks.size();
6932  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6933  {
6934  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
6935  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
6936  m_Blocks.push_back(pBlockInfo);
6937  }
6938 
6939  // Sort them by m_pBlock pointer value.
6940  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
6941 
6942  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
6943  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
6944  {
6945  AllocationInfo& allocInfo = m_Allocations[blockIndex];
6946  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
6947  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
6948  {
6949  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
6950  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
6951  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
6952  {
6953  (*it)->m_Allocations.push_back(allocInfo);
6954  }
6955  else
6956  {
6957  VMA_ASSERT(0);
6958  }
6959  }
6960  }
6961  m_Allocations.clear();
6962 
6963  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6964  {
6965  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
6966  pBlockInfo->CalcHasNonMovableAllocations();
6967  pBlockInfo->SortAllocationsBySizeDescecnding();
6968  }
6969 
6970  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
6971  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
6972 
6973  // Execute defragmentation rounds (the main part).
6974  VkResult result = VK_SUCCESS;
6975  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
6976  {
6977  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
6978  }
6979 
6980  // Unmap blocks that were mapped for defragmentation.
6981  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
6982  {
6983  m_Blocks[blockIndex]->Unmap(m_hAllocator);
6984  }
6985 
6986  return result;
6987 }
6988 
6989 bool VmaDefragmentator::MoveMakesSense(
6990  size_t dstBlockIndex, VkDeviceSize dstOffset,
6991  size_t srcBlockIndex, VkDeviceSize srcOffset)
6992 {
6993  if(dstBlockIndex < srcBlockIndex)
6994  {
6995  return true;
6996  }
6997  if(dstBlockIndex > srcBlockIndex)
6998  {
6999  return false;
7000  }
7001  if(dstOffset < srcOffset)
7002  {
7003  return true;
7004  }
7005  return false;
7006 }
7007 
7009 // VmaAllocator_T
7010 
7011 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7012  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7013  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7014  m_hDevice(pCreateInfo->device),
7015  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7016  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7017  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7018  m_PreferredLargeHeapBlockSize(0),
7019  m_PhysicalDevice(pCreateInfo->physicalDevice),
7020  m_CurrentFrameIndex(0),
7021  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7022 {
7023  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7024 
7025  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7026  memset(&m_MemProps, 0, sizeof(m_MemProps));
7027  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7028 
7029  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7030  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7031 
7032  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7033  {
7034  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7035  }
7036 
7037  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7038  {
7039  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7040  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7041  }
7042 
7043  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7044 
7045  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7046  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7047 
7048  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7049  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7050 
7051  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7052  {
7053  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7054  {
7055  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7056  if(limit != VK_WHOLE_SIZE)
7057  {
7058  m_HeapSizeLimit[heapIndex] = limit;
7059  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7060  {
7061  m_MemProps.memoryHeaps[heapIndex].size = limit;
7062  }
7063  }
7064  }
7065  }
7066 
7067  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7068  {
7069  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7070 
7071  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7072  this,
7073  memTypeIndex,
7074  preferredBlockSize,
7075  0,
7076  SIZE_MAX,
7077  GetBufferImageGranularity(),
7078  pCreateInfo->frameInUseCount,
7079  false); // isCustomPool
7080  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7081  // becase minBlockCount is 0.
7082  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7083  }
7084 }
7085 
7086 VmaAllocator_T::~VmaAllocator_T()
7087 {
7088  VMA_ASSERT(m_Pools.empty());
7089 
7090  for(size_t i = GetMemoryTypeCount(); i--; )
7091  {
7092  vma_delete(this, m_pDedicatedAllocations[i]);
7093  vma_delete(this, m_pBlockVectors[i]);
7094  }
7095 }
7096 
7097 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7098 {
7099 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7100  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7101  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7102  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7103  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7104  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7105  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7106  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7107  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7108  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7109  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7110  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7111  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7112  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7113  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7114  if(m_UseKhrDedicatedAllocation)
7115  {
7116  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7117  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7118  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7119  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7120  }
7121 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7122 
7123 #define VMA_COPY_IF_NOT_NULL(funcName) \
7124  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7125 
7126  if(pVulkanFunctions != VMA_NULL)
7127  {
7128  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7129  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7130  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7131  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7132  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7133  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7134  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7135  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7136  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7137  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7138  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7139  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7140  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7141  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7142  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7143  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7144  }
7145 
7146 #undef VMA_COPY_IF_NOT_NULL
7147 
7148  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7149  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7150  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7151  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7152  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7153  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7154  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7155  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7156  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7157  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7158  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7159  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7160  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7161  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7162  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7163  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7164  if(m_UseKhrDedicatedAllocation)
7165  {
7166  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7167  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7168  }
7169 }
7170 
7171 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7172 {
7173  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7174  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7175  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7176  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7177 }
7178 
7179 VkResult VmaAllocator_T::AllocateMemoryOfType(
7180  const VkMemoryRequirements& vkMemReq,
7181  bool dedicatedAllocation,
7182  VkBuffer dedicatedBuffer,
7183  VkImage dedicatedImage,
7184  const VmaAllocationCreateInfo& createInfo,
7185  uint32_t memTypeIndex,
7186  VmaSuballocationType suballocType,
7187  VmaAllocation* pAllocation)
7188 {
7189  VMA_ASSERT(pAllocation != VMA_NULL);
7190  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7191 
7192  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7193 
7194  // If memory type is not HOST_VISIBLE, disable MAPPED.
7195  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7196  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7197  {
7198  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7199  }
7200 
7201  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7202  VMA_ASSERT(blockVector);
7203 
7204  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7205  bool preferDedicatedMemory =
7206  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7207  dedicatedAllocation ||
7208  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7209  vkMemReq.size > preferredBlockSize / 2;
7210 
7211  if(preferDedicatedMemory &&
7212  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7213  finalCreateInfo.pool == VK_NULL_HANDLE)
7214  {
7216  }
7217 
7218  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7219  {
7220  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7221  {
7222  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7223  }
7224  else
7225  {
7226  return AllocateDedicatedMemory(
7227  vkMemReq.size,
7228  suballocType,
7229  memTypeIndex,
7230  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7231  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7232  finalCreateInfo.pUserData,
7233  dedicatedBuffer,
7234  dedicatedImage,
7235  pAllocation);
7236  }
7237  }
7238  else
7239  {
7240  VkResult res = blockVector->Allocate(
7241  VK_NULL_HANDLE, // hCurrentPool
7242  m_CurrentFrameIndex.load(),
7243  vkMemReq,
7244  finalCreateInfo,
7245  suballocType,
7246  pAllocation);
7247  if(res == VK_SUCCESS)
7248  {
7249  return res;
7250  }
7251 
7252  // 5. Try dedicated memory.
7253  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7254  {
7255  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7256  }
7257  else
7258  {
7259  res = AllocateDedicatedMemory(
7260  vkMemReq.size,
7261  suballocType,
7262  memTypeIndex,
7263  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7264  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7265  finalCreateInfo.pUserData,
7266  dedicatedBuffer,
7267  dedicatedImage,
7268  pAllocation);
7269  if(res == VK_SUCCESS)
7270  {
7271  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7272  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7273  return VK_SUCCESS;
7274  }
7275  else
7276  {
7277  // Everything failed: Return error code.
7278  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7279  return res;
7280  }
7281  }
7282  }
7283 }
7284 
7285 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7286  VkDeviceSize size,
7287  VmaSuballocationType suballocType,
7288  uint32_t memTypeIndex,
7289  bool map,
7290  bool isUserDataString,
7291  void* pUserData,
7292  VkBuffer dedicatedBuffer,
7293  VkImage dedicatedImage,
7294  VmaAllocation* pAllocation)
7295 {
7296  VMA_ASSERT(pAllocation);
7297 
7298  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7299  allocInfo.memoryTypeIndex = memTypeIndex;
7300  allocInfo.allocationSize = size;
7301 
7302  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7303  if(m_UseKhrDedicatedAllocation)
7304  {
7305  if(dedicatedBuffer != VK_NULL_HANDLE)
7306  {
7307  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7308  dedicatedAllocInfo.buffer = dedicatedBuffer;
7309  allocInfo.pNext = &dedicatedAllocInfo;
7310  }
7311  else if(dedicatedImage != VK_NULL_HANDLE)
7312  {
7313  dedicatedAllocInfo.image = dedicatedImage;
7314  allocInfo.pNext = &dedicatedAllocInfo;
7315  }
7316  }
7317 
7318  // Allocate VkDeviceMemory.
7319  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7320  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7321  if(res < 0)
7322  {
7323  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7324  return res;
7325  }
7326 
7327  void* pMappedData = VMA_NULL;
7328  if(map)
7329  {
7330  res = (*m_VulkanFunctions.vkMapMemory)(
7331  m_hDevice,
7332  hMemory,
7333  0,
7334  VK_WHOLE_SIZE,
7335  0,
7336  &pMappedData);
7337  if(res < 0)
7338  {
7339  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7340  FreeVulkanMemory(memTypeIndex, size, hMemory);
7341  return res;
7342  }
7343  }
7344 
7345  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7346  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7347  (*pAllocation)->SetUserData(this, pUserData);
7348 
7349  // Register it in m_pDedicatedAllocations.
7350  {
7351  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7352  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7353  VMA_ASSERT(pDedicatedAllocations);
7354  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7355  }
7356 
7357  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7358 
7359  return VK_SUCCESS;
7360 }
7361 
7362 void VmaAllocator_T::GetBufferMemoryRequirements(
7363  VkBuffer hBuffer,
7364  VkMemoryRequirements& memReq,
7365  bool& requiresDedicatedAllocation,
7366  bool& prefersDedicatedAllocation) const
7367 {
7368  if(m_UseKhrDedicatedAllocation)
7369  {
7370  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7371  memReqInfo.buffer = hBuffer;
7372 
7373  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7374 
7375  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7376  memReq2.pNext = &memDedicatedReq;
7377 
7378  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7379 
7380  memReq = memReq2.memoryRequirements;
7381  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7382  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7383  }
7384  else
7385  {
7386  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7387  requiresDedicatedAllocation = false;
7388  prefersDedicatedAllocation = false;
7389  }
7390 }
7391 
7392 void VmaAllocator_T::GetImageMemoryRequirements(
7393  VkImage hImage,
7394  VkMemoryRequirements& memReq,
7395  bool& requiresDedicatedAllocation,
7396  bool& prefersDedicatedAllocation) const
7397 {
7398  if(m_UseKhrDedicatedAllocation)
7399  {
7400  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7401  memReqInfo.image = hImage;
7402 
7403  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7404 
7405  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7406  memReq2.pNext = &memDedicatedReq;
7407 
7408  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7409 
7410  memReq = memReq2.memoryRequirements;
7411  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7412  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7413  }
7414  else
7415  {
7416  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7417  requiresDedicatedAllocation = false;
7418  prefersDedicatedAllocation = false;
7419  }
7420 }
7421 
7422 VkResult VmaAllocator_T::AllocateMemory(
7423  const VkMemoryRequirements& vkMemReq,
7424  bool requiresDedicatedAllocation,
7425  bool prefersDedicatedAllocation,
7426  VkBuffer dedicatedBuffer,
7427  VkImage dedicatedImage,
7428  const VmaAllocationCreateInfo& createInfo,
7429  VmaSuballocationType suballocType,
7430  VmaAllocation* pAllocation)
7431 {
7432  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7433  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7434  {
7435  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7436  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7437  }
7438  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7440  {
7441  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7442  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7443  }
7444  if(requiresDedicatedAllocation)
7445  {
7446  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7447  {
7448  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7449  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7450  }
7451  if(createInfo.pool != VK_NULL_HANDLE)
7452  {
7453  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7454  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7455  }
7456  }
7457  if((createInfo.pool != VK_NULL_HANDLE) &&
7458  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7459  {
7460  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7461  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7462  }
7463 
7464  if(createInfo.pool != VK_NULL_HANDLE)
7465  {
7466  return createInfo.pool->m_BlockVector.Allocate(
7467  createInfo.pool,
7468  m_CurrentFrameIndex.load(),
7469  vkMemReq,
7470  createInfo,
7471  suballocType,
7472  pAllocation);
7473  }
7474  else
7475  {
7476  // Bit mask of memory Vulkan types acceptable for this allocation.
7477  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7478  uint32_t memTypeIndex = UINT32_MAX;
7479  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7480  if(res == VK_SUCCESS)
7481  {
7482  res = AllocateMemoryOfType(
7483  vkMemReq,
7484  requiresDedicatedAllocation || prefersDedicatedAllocation,
7485  dedicatedBuffer,
7486  dedicatedImage,
7487  createInfo,
7488  memTypeIndex,
7489  suballocType,
7490  pAllocation);
7491  // Succeeded on first try.
7492  if(res == VK_SUCCESS)
7493  {
7494  return res;
7495  }
7496  // Allocation from this memory type failed. Try other compatible memory types.
7497  else
7498  {
7499  for(;;)
7500  {
7501  // Remove old memTypeIndex from list of possibilities.
7502  memoryTypeBits &= ~(1u << memTypeIndex);
7503  // Find alternative memTypeIndex.
7504  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7505  if(res == VK_SUCCESS)
7506  {
7507  res = AllocateMemoryOfType(
7508  vkMemReq,
7509  requiresDedicatedAllocation || prefersDedicatedAllocation,
7510  dedicatedBuffer,
7511  dedicatedImage,
7512  createInfo,
7513  memTypeIndex,
7514  suballocType,
7515  pAllocation);
7516  // Allocation from this alternative memory type succeeded.
7517  if(res == VK_SUCCESS)
7518  {
7519  return res;
7520  }
7521  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7522  }
7523  // No other matching memory type index could be found.
7524  else
7525  {
7526  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7527  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7528  }
7529  }
7530  }
7531  }
7532  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7533  else
7534  return res;
7535  }
7536 }
7537 
7538 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7539 {
7540  VMA_ASSERT(allocation);
7541 
7542  if(allocation->CanBecomeLost() == false ||
7543  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7544  {
7545  switch(allocation->GetType())
7546  {
7547  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7548  {
7549  VmaBlockVector* pBlockVector = VMA_NULL;
7550  VmaPool hPool = allocation->GetPool();
7551  if(hPool != VK_NULL_HANDLE)
7552  {
7553  pBlockVector = &hPool->m_BlockVector;
7554  }
7555  else
7556  {
7557  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7558  pBlockVector = m_pBlockVectors[memTypeIndex];
7559  }
7560  pBlockVector->Free(allocation);
7561  }
7562  break;
7563  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7564  FreeDedicatedMemory(allocation);
7565  break;
7566  default:
7567  VMA_ASSERT(0);
7568  }
7569  }
7570 
7571  allocation->SetUserData(this, VMA_NULL);
7572  vma_delete(this, allocation);
7573 }
7574 
7575 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7576 {
7577  // Initialize.
7578  InitStatInfo(pStats->total);
7579  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7580  InitStatInfo(pStats->memoryType[i]);
7581  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7582  InitStatInfo(pStats->memoryHeap[i]);
7583 
7584  // Process default pools.
7585  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7586  {
7587  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7588  VMA_ASSERT(pBlockVector);
7589  pBlockVector->AddStats(pStats);
7590  }
7591 
7592  // Process custom pools.
7593  {
7594  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7595  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7596  {
7597  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7598  }
7599  }
7600 
7601  // Process dedicated allocations.
7602  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7603  {
7604  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7605  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7606  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7607  VMA_ASSERT(pDedicatedAllocVector);
7608  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7609  {
7610  VmaStatInfo allocationStatInfo;
7611  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7612  VmaAddStatInfo(pStats->total, allocationStatInfo);
7613  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7614  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7615  }
7616  }
7617 
7618  // Postprocess.
7619  VmaPostprocessCalcStatInfo(pStats->total);
7620  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7621  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7622  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7623  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7624 }
7625 
7626 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7627 
7628 VkResult VmaAllocator_T::Defragment(
7629  VmaAllocation* pAllocations,
7630  size_t allocationCount,
7631  VkBool32* pAllocationsChanged,
7632  const VmaDefragmentationInfo* pDefragmentationInfo,
7633  VmaDefragmentationStats* pDefragmentationStats)
7634 {
7635  if(pAllocationsChanged != VMA_NULL)
7636  {
7637  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
7638  }
7639  if(pDefragmentationStats != VMA_NULL)
7640  {
7641  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
7642  }
7643 
7644  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
7645 
7646  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
7647 
7648  const size_t poolCount = m_Pools.size();
7649 
7650  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
7651  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
7652  {
7653  VmaAllocation hAlloc = pAllocations[allocIndex];
7654  VMA_ASSERT(hAlloc);
7655  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
7656  // DedicatedAlloc cannot be defragmented.
7657  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
7658  // Only HOST_VISIBLE memory types can be defragmented.
7659  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
7660  // Lost allocation cannot be defragmented.
7661  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
7662  {
7663  VmaBlockVector* pAllocBlockVector = VMA_NULL;
7664 
7665  const VmaPool hAllocPool = hAlloc->GetPool();
7666  // This allocation belongs to custom pool.
7667  if(hAllocPool != VK_NULL_HANDLE)
7668  {
7669  pAllocBlockVector = &hAllocPool->GetBlockVector();
7670  }
7671  // This allocation belongs to general pool.
7672  else
7673  {
7674  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
7675  }
7676 
7677  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
7678 
7679  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
7680  &pAllocationsChanged[allocIndex] : VMA_NULL;
7681  pDefragmentator->AddAllocation(hAlloc, pChanged);
7682  }
7683  }
7684 
7685  VkResult result = VK_SUCCESS;
7686 
7687  // ======== Main processing.
7688 
7689  VkDeviceSize maxBytesToMove = SIZE_MAX;
7690  uint32_t maxAllocationsToMove = UINT32_MAX;
7691  if(pDefragmentationInfo != VMA_NULL)
7692  {
7693  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
7694  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
7695  }
7696 
7697  // Process standard memory.
7698  for(uint32_t memTypeIndex = 0;
7699  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
7700  ++memTypeIndex)
7701  {
7702  // Only HOST_VISIBLE memory types can be defragmented.
7703  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7704  {
7705  result = m_pBlockVectors[memTypeIndex]->Defragment(
7706  pDefragmentationStats,
7707  maxBytesToMove,
7708  maxAllocationsToMove);
7709  }
7710  }
7711 
7712  // Process custom pools.
7713  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
7714  {
7715  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
7716  pDefragmentationStats,
7717  maxBytesToMove,
7718  maxAllocationsToMove);
7719  }
7720 
7721  // ======== Destroy defragmentators.
7722 
7723  // Process custom pools.
7724  for(size_t poolIndex = poolCount; poolIndex--; )
7725  {
7726  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
7727  }
7728 
7729  // Process standard memory.
7730  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
7731  {
7732  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
7733  {
7734  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
7735  }
7736  }
7737 
7738  return result;
7739 }
7740 
7741 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
7742 {
7743  if(hAllocation->CanBecomeLost())
7744  {
7745  /*
7746  Warning: This is a carefully designed algorithm.
7747  Do not modify unless you really know what you're doing :)
7748  */
7749  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7750  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7751  for(;;)
7752  {
7753  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7754  {
7755  pAllocationInfo->memoryType = UINT32_MAX;
7756  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
7757  pAllocationInfo->offset = 0;
7758  pAllocationInfo->size = hAllocation->GetSize();
7759  pAllocationInfo->pMappedData = VMA_NULL;
7760  pAllocationInfo->pUserData = hAllocation->GetUserData();
7761  return;
7762  }
7763  else if(localLastUseFrameIndex == localCurrFrameIndex)
7764  {
7765  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7766  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7767  pAllocationInfo->offset = hAllocation->GetOffset();
7768  pAllocationInfo->size = hAllocation->GetSize();
7769  pAllocationInfo->pMappedData = VMA_NULL;
7770  pAllocationInfo->pUserData = hAllocation->GetUserData();
7771  return;
7772  }
7773  else // Last use time earlier than current time.
7774  {
7775  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7776  {
7777  localLastUseFrameIndex = localCurrFrameIndex;
7778  }
7779  }
7780  }
7781  }
7782  else
7783  {
7784  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
7785  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
7786  pAllocationInfo->offset = hAllocation->GetOffset();
7787  pAllocationInfo->size = hAllocation->GetSize();
7788  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
7789  pAllocationInfo->pUserData = hAllocation->GetUserData();
7790  }
7791 }
7792 
7793 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
7794 {
7795  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
7796  if(hAllocation->CanBecomeLost())
7797  {
7798  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
7799  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
7800  for(;;)
7801  {
7802  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7803  {
7804  return false;
7805  }
7806  else if(localLastUseFrameIndex == localCurrFrameIndex)
7807  {
7808  return true;
7809  }
7810  else // Last use time earlier than current time.
7811  {
7812  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
7813  {
7814  localLastUseFrameIndex = localCurrFrameIndex;
7815  }
7816  }
7817  }
7818  }
7819  else
7820  {
7821  return true;
7822  }
7823 }
7824 
7825 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
7826 {
7827  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
7828 
7829  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
7830 
7831  if(newCreateInfo.maxBlockCount == 0)
7832  {
7833  newCreateInfo.maxBlockCount = SIZE_MAX;
7834  }
7835  if(newCreateInfo.blockSize == 0)
7836  {
7837  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
7838  }
7839 
7840  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
7841 
7842  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
7843  if(res != VK_SUCCESS)
7844  {
7845  vma_delete(this, *pPool);
7846  *pPool = VMA_NULL;
7847  return res;
7848  }
7849 
7850  // Add to m_Pools.
7851  {
7852  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7853  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
7854  }
7855 
7856  return VK_SUCCESS;
7857 }
7858 
7859 void VmaAllocator_T::DestroyPool(VmaPool pool)
7860 {
7861  // Remove from m_Pools.
7862  {
7863  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7864  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
7865  VMA_ASSERT(success && "Pool not found in Allocator.");
7866  }
7867 
7868  vma_delete(this, pool);
7869 }
7870 
7871 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
7872 {
7873  pool->m_BlockVector.GetPoolStats(pPoolStats);
7874 }
7875 
7876 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
7877 {
7878  m_CurrentFrameIndex.store(frameIndex);
7879 }
7880 
7881 void VmaAllocator_T::MakePoolAllocationsLost(
7882  VmaPool hPool,
7883  size_t* pLostAllocationCount)
7884 {
7885  hPool->m_BlockVector.MakePoolAllocationsLost(
7886  m_CurrentFrameIndex.load(),
7887  pLostAllocationCount);
7888 }
7889 
7890 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
7891 {
7892  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
7893  (*pAllocation)->InitLost();
7894 }
7895 
7896 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
7897 {
7898  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
7899 
7900  VkResult res;
7901  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7902  {
7903  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7904  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
7905  {
7906  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7907  if(res == VK_SUCCESS)
7908  {
7909  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
7910  }
7911  }
7912  else
7913  {
7914  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
7915  }
7916  }
7917  else
7918  {
7919  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
7920  }
7921 
7922  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
7923  {
7924  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
7925  }
7926 
7927  return res;
7928 }
7929 
7930 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
7931 {
7932  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
7933  {
7934  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
7935  }
7936 
7937  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
7938 
7939  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
7940  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
7941  {
7942  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
7943  m_HeapSizeLimit[heapIndex] += size;
7944  }
7945 }
7946 
7947 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
7948 {
7949  if(hAllocation->CanBecomeLost())
7950  {
7951  return VK_ERROR_MEMORY_MAP_FAILED;
7952  }
7953 
7954  switch(hAllocation->GetType())
7955  {
7956  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7957  {
7958  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7959  char *pBytes = VMA_NULL;
7960  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
7961  if(res == VK_SUCCESS)
7962  {
7963  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
7964  hAllocation->BlockAllocMap();
7965  }
7966  return res;
7967  }
7968  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7969  return hAllocation->DedicatedAllocMap(this, ppData);
7970  default:
7971  VMA_ASSERT(0);
7972  return VK_ERROR_MEMORY_MAP_FAILED;
7973  }
7974 }
7975 
7976 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
7977 {
7978  switch(hAllocation->GetType())
7979  {
7980  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7981  {
7982  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
7983  hAllocation->BlockAllocUnmap();
7984  pBlock->Unmap(this, 1);
7985  }
7986  break;
7987  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7988  hAllocation->DedicatedAllocUnmap(this);
7989  break;
7990  default:
7991  VMA_ASSERT(0);
7992  }
7993 }
7994 
7995 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
7996 {
7997  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
7998 
7999  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8000  {
8001  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8002  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8003  VMA_ASSERT(pDedicatedAllocations);
8004  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8005  VMA_ASSERT(success);
8006  }
8007 
8008  VkDeviceMemory hMemory = allocation->GetMemory();
8009 
8010  if(allocation->GetMappedData() != VMA_NULL)
8011  {
8012  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8013  }
8014 
8015  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8016 
8017  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8018 }
8019 
8020 #if VMA_STATS_STRING_ENABLED
8021 
8022 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8023 {
8024  bool dedicatedAllocationsStarted = false;
8025  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8026  {
8027  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8028  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8029  VMA_ASSERT(pDedicatedAllocVector);
8030  if(pDedicatedAllocVector->empty() == false)
8031  {
8032  if(dedicatedAllocationsStarted == false)
8033  {
8034  dedicatedAllocationsStarted = true;
8035  json.WriteString("DedicatedAllocations");
8036  json.BeginObject();
8037  }
8038 
8039  json.BeginString("Type ");
8040  json.ContinueString(memTypeIndex);
8041  json.EndString();
8042 
8043  json.BeginArray();
8044 
8045  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8046  {
8047  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8048  json.BeginObject(true);
8049 
8050  json.WriteString("Type");
8051  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8052 
8053  json.WriteString("Size");
8054  json.WriteNumber(hAlloc->GetSize());
8055 
8056  const void* pUserData = hAlloc->GetUserData();
8057  if(pUserData != VMA_NULL)
8058  {
8059  json.WriteString("UserData");
8060  if(hAlloc->IsUserDataString())
8061  {
8062  json.WriteString((const char*)pUserData);
8063  }
8064  else
8065  {
8066  json.BeginString();
8067  json.ContinueString_Pointer(pUserData);
8068  json.EndString();
8069  }
8070  }
8071 
8072  json.EndObject();
8073  }
8074 
8075  json.EndArray();
8076  }
8077  }
8078  if(dedicatedAllocationsStarted)
8079  {
8080  json.EndObject();
8081  }
8082 
8083  {
8084  bool allocationsStarted = false;
8085  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8086  {
8087  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8088  {
8089  if(allocationsStarted == false)
8090  {
8091  allocationsStarted = true;
8092  json.WriteString("DefaultPools");
8093  json.BeginObject();
8094  }
8095 
8096  json.BeginString("Type ");
8097  json.ContinueString(memTypeIndex);
8098  json.EndString();
8099 
8100  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8101  }
8102  }
8103  if(allocationsStarted)
8104  {
8105  json.EndObject();
8106  }
8107  }
8108 
8109  {
8110  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8111  const size_t poolCount = m_Pools.size();
8112  if(poolCount > 0)
8113  {
8114  json.WriteString("Pools");
8115  json.BeginArray();
8116  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8117  {
8118  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8119  }
8120  json.EndArray();
8121  }
8122  }
8123 }
8124 
8125 #endif // #if VMA_STATS_STRING_ENABLED
8126 
8127 static VkResult AllocateMemoryForImage(
8128  VmaAllocator allocator,
8129  VkImage image,
8130  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8131  VmaSuballocationType suballocType,
8132  VmaAllocation* pAllocation)
8133 {
8134  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8135 
8136  VkMemoryRequirements vkMemReq = {};
8137  bool requiresDedicatedAllocation = false;
8138  bool prefersDedicatedAllocation = false;
8139  allocator->GetImageMemoryRequirements(image, vkMemReq,
8140  requiresDedicatedAllocation, prefersDedicatedAllocation);
8141 
8142  return allocator->AllocateMemory(
8143  vkMemReq,
8144  requiresDedicatedAllocation,
8145  prefersDedicatedAllocation,
8146  VK_NULL_HANDLE, // dedicatedBuffer
8147  image, // dedicatedImage
8148  *pAllocationCreateInfo,
8149  suballocType,
8150  pAllocation);
8151 }
8152 
8154 // Public interface
8155 
8156 VkResult vmaCreateAllocator(
8157  const VmaAllocatorCreateInfo* pCreateInfo,
8158  VmaAllocator* pAllocator)
8159 {
8160  VMA_ASSERT(pCreateInfo && pAllocator);
8161  VMA_DEBUG_LOG("vmaCreateAllocator");
8162  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8163  return VK_SUCCESS;
8164 }
8165 
8166 void vmaDestroyAllocator(
8167  VmaAllocator allocator)
8168 {
8169  if(allocator != VK_NULL_HANDLE)
8170  {
8171  VMA_DEBUG_LOG("vmaDestroyAllocator");
8172  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8173  vma_delete(&allocationCallbacks, allocator);
8174  }
8175 }
8176 
8178  VmaAllocator allocator,
8179  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8180 {
8181  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8182  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8183 }
8184 
8186  VmaAllocator allocator,
8187  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8188 {
8189  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8190  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8191 }
8192 
8194  VmaAllocator allocator,
8195  uint32_t memoryTypeIndex,
8196  VkMemoryPropertyFlags* pFlags)
8197 {
8198  VMA_ASSERT(allocator && pFlags);
8199  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8200  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8201 }
8202 
8204  VmaAllocator allocator,
8205  uint32_t frameIndex)
8206 {
8207  VMA_ASSERT(allocator);
8208  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8209 
8210  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8211 
8212  allocator->SetCurrentFrameIndex(frameIndex);
8213 }
8214 
8215 void vmaCalculateStats(
8216  VmaAllocator allocator,
8217  VmaStats* pStats)
8218 {
8219  VMA_ASSERT(allocator && pStats);
8220  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8221  allocator->CalculateStats(pStats);
8222 }
8223 
8224 #if VMA_STATS_STRING_ENABLED
8225 
8226 void vmaBuildStatsString(
8227  VmaAllocator allocator,
8228  char** ppStatsString,
8229  VkBool32 detailedMap)
8230 {
8231  VMA_ASSERT(allocator && ppStatsString);
8232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8233 
8234  VmaStringBuilder sb(allocator);
8235  {
8236  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8237  json.BeginObject();
8238 
8239  VmaStats stats;
8240  allocator->CalculateStats(&stats);
8241 
8242  json.WriteString("Total");
8243  VmaPrintStatInfo(json, stats.total);
8244 
8245  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8246  {
8247  json.BeginString("Heap ");
8248  json.ContinueString(heapIndex);
8249  json.EndString();
8250  json.BeginObject();
8251 
8252  json.WriteString("Size");
8253  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8254 
8255  json.WriteString("Flags");
8256  json.BeginArray(true);
8257  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8258  {
8259  json.WriteString("DEVICE_LOCAL");
8260  }
8261  json.EndArray();
8262 
8263  if(stats.memoryHeap[heapIndex].blockCount > 0)
8264  {
8265  json.WriteString("Stats");
8266  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8267  }
8268 
8269  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8270  {
8271  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8272  {
8273  json.BeginString("Type ");
8274  json.ContinueString(typeIndex);
8275  json.EndString();
8276 
8277  json.BeginObject();
8278 
8279  json.WriteString("Flags");
8280  json.BeginArray(true);
8281  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8282  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8283  {
8284  json.WriteString("DEVICE_LOCAL");
8285  }
8286  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8287  {
8288  json.WriteString("HOST_VISIBLE");
8289  }
8290  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8291  {
8292  json.WriteString("HOST_COHERENT");
8293  }
8294  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8295  {
8296  json.WriteString("HOST_CACHED");
8297  }
8298  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8299  {
8300  json.WriteString("LAZILY_ALLOCATED");
8301  }
8302  json.EndArray();
8303 
8304  if(stats.memoryType[typeIndex].blockCount > 0)
8305  {
8306  json.WriteString("Stats");
8307  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8308  }
8309 
8310  json.EndObject();
8311  }
8312  }
8313 
8314  json.EndObject();
8315  }
8316  if(detailedMap == VK_TRUE)
8317  {
8318  allocator->PrintDetailedMap(json);
8319  }
8320 
8321  json.EndObject();
8322  }
8323 
8324  const size_t len = sb.GetLength();
8325  char* const pChars = vma_new_array(allocator, char, len + 1);
8326  if(len > 0)
8327  {
8328  memcpy(pChars, sb.GetData(), len);
8329  }
8330  pChars[len] = '\0';
8331  *ppStatsString = pChars;
8332 }
8333 
8334 void vmaFreeStatsString(
8335  VmaAllocator allocator,
8336  char* pStatsString)
8337 {
8338  if(pStatsString != VMA_NULL)
8339  {
8340  VMA_ASSERT(allocator);
8341  size_t len = strlen(pStatsString);
8342  vma_delete_array(allocator, pStatsString, len + 1);
8343  }
8344 }
8345 
8346 #endif // #if VMA_STATS_STRING_ENABLED
8347 
8348 /*
8349 This function is not protected by any mutex because it just reads immutable data.
8350 */
8351 VkResult vmaFindMemoryTypeIndex(
8352  VmaAllocator allocator,
8353  uint32_t memoryTypeBits,
8354  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8355  uint32_t* pMemoryTypeIndex)
8356 {
8357  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8358  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8359  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8360 
8361  if(pAllocationCreateInfo->memoryTypeBits != 0)
8362  {
8363  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8364  }
8365 
8366  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8367  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8368 
8369  // Convert usage to requiredFlags and preferredFlags.
8370  switch(pAllocationCreateInfo->usage)
8371  {
8373  break;
8375  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8376  break;
8378  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8379  break;
8381  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8382  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8383  break;
8385  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8386  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8387  break;
8388  default:
8389  break;
8390  }
8391 
8392  *pMemoryTypeIndex = UINT32_MAX;
8393  uint32_t minCost = UINT32_MAX;
8394  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8395  memTypeIndex < allocator->GetMemoryTypeCount();
8396  ++memTypeIndex, memTypeBit <<= 1)
8397  {
8398  // This memory type is acceptable according to memoryTypeBits bitmask.
8399  if((memTypeBit & memoryTypeBits) != 0)
8400  {
8401  const VkMemoryPropertyFlags currFlags =
8402  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8403  // This memory type contains requiredFlags.
8404  if((requiredFlags & ~currFlags) == 0)
8405  {
8406  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8407  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8408  // Remember memory type with lowest cost.
8409  if(currCost < minCost)
8410  {
8411  *pMemoryTypeIndex = memTypeIndex;
8412  if(currCost == 0)
8413  {
8414  return VK_SUCCESS;
8415  }
8416  minCost = currCost;
8417  }
8418  }
8419  }
8420  }
8421  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8422 }
8423 
8425  VmaAllocator allocator,
8426  const VkBufferCreateInfo* pBufferCreateInfo,
8427  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8428  uint32_t* pMemoryTypeIndex)
8429 {
8430  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8431  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8432  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8433  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8434 
8435  const VkDevice hDev = allocator->m_hDevice;
8436  VkBuffer hBuffer = VK_NULL_HANDLE;
8437  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8438  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8439  if(res == VK_SUCCESS)
8440  {
8441  VkMemoryRequirements memReq = {};
8442  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8443  hDev, hBuffer, &memReq);
8444 
8445  res = vmaFindMemoryTypeIndex(
8446  allocator,
8447  memReq.memoryTypeBits,
8448  pAllocationCreateInfo,
8449  pMemoryTypeIndex);
8450 
8451  allocator->GetVulkanFunctions().vkDestroyBuffer(
8452  hDev, hBuffer, allocator->GetAllocationCallbacks());
8453  }
8454  return res;
8455 }
8456 
8458  VmaAllocator allocator,
8459  const VkImageCreateInfo* pImageCreateInfo,
8460  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8461  uint32_t* pMemoryTypeIndex)
8462 {
8463  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8464  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8465  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8466  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8467 
8468  const VkDevice hDev = allocator->m_hDevice;
8469  VkImage hImage = VK_NULL_HANDLE;
8470  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8471  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8472  if(res == VK_SUCCESS)
8473  {
8474  VkMemoryRequirements memReq = {};
8475  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8476  hDev, hImage, &memReq);
8477 
8478  res = vmaFindMemoryTypeIndex(
8479  allocator,
8480  memReq.memoryTypeBits,
8481  pAllocationCreateInfo,
8482  pMemoryTypeIndex);
8483 
8484  allocator->GetVulkanFunctions().vkDestroyImage(
8485  hDev, hImage, allocator->GetAllocationCallbacks());
8486  }
8487  return res;
8488 }
8489 
8490 VkResult vmaCreatePool(
8491  VmaAllocator allocator,
8492  const VmaPoolCreateInfo* pCreateInfo,
8493  VmaPool* pPool)
8494 {
8495  VMA_ASSERT(allocator && pCreateInfo && pPool);
8496 
8497  VMA_DEBUG_LOG("vmaCreatePool");
8498 
8499  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8500 
8501  return allocator->CreatePool(pCreateInfo, pPool);
8502 }
8503 
8504 void vmaDestroyPool(
8505  VmaAllocator allocator,
8506  VmaPool pool)
8507 {
8508  VMA_ASSERT(allocator);
8509 
8510  if(pool == VK_NULL_HANDLE)
8511  {
8512  return;
8513  }
8514 
8515  VMA_DEBUG_LOG("vmaDestroyPool");
8516 
8517  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8518 
8519  allocator->DestroyPool(pool);
8520 }
8521 
8522 void vmaGetPoolStats(
8523  VmaAllocator allocator,
8524  VmaPool pool,
8525  VmaPoolStats* pPoolStats)
8526 {
8527  VMA_ASSERT(allocator && pool && pPoolStats);
8528 
8529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8530 
8531  allocator->GetPoolStats(pool, pPoolStats);
8532 }
8533 
8535  VmaAllocator allocator,
8536  VmaPool pool,
8537  size_t* pLostAllocationCount)
8538 {
8539  VMA_ASSERT(allocator && pool);
8540 
8541  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8542 
8543  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8544 }
8545 
8546 VkResult vmaAllocateMemory(
8547  VmaAllocator allocator,
8548  const VkMemoryRequirements* pVkMemoryRequirements,
8549  const VmaAllocationCreateInfo* pCreateInfo,
8550  VmaAllocation* pAllocation,
8551  VmaAllocationInfo* pAllocationInfo)
8552 {
8553  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8554 
8555  VMA_DEBUG_LOG("vmaAllocateMemory");
8556 
8557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8558 
8559  VkResult result = allocator->AllocateMemory(
8560  *pVkMemoryRequirements,
8561  false, // requiresDedicatedAllocation
8562  false, // prefersDedicatedAllocation
8563  VK_NULL_HANDLE, // dedicatedBuffer
8564  VK_NULL_HANDLE, // dedicatedImage
8565  *pCreateInfo,
8566  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8567  pAllocation);
8568 
8569  if(pAllocationInfo && result == VK_SUCCESS)
8570  {
8571  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8572  }
8573 
8574  return result;
8575 }
8576 
8578  VmaAllocator allocator,
8579  VkBuffer buffer,
8580  const VmaAllocationCreateInfo* pCreateInfo,
8581  VmaAllocation* pAllocation,
8582  VmaAllocationInfo* pAllocationInfo)
8583 {
8584  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8585 
8586  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8587 
8588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8589 
8590  VkMemoryRequirements vkMemReq = {};
8591  bool requiresDedicatedAllocation = false;
8592  bool prefersDedicatedAllocation = false;
8593  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
8594  requiresDedicatedAllocation,
8595  prefersDedicatedAllocation);
8596 
8597  VkResult result = allocator->AllocateMemory(
8598  vkMemReq,
8599  requiresDedicatedAllocation,
8600  prefersDedicatedAllocation,
8601  buffer, // dedicatedBuffer
8602  VK_NULL_HANDLE, // dedicatedImage
8603  *pCreateInfo,
8604  VMA_SUBALLOCATION_TYPE_BUFFER,
8605  pAllocation);
8606 
8607  if(pAllocationInfo && result == VK_SUCCESS)
8608  {
8609  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8610  }
8611 
8612  return result;
8613 }
8614 
8615 VkResult vmaAllocateMemoryForImage(
8616  VmaAllocator allocator,
8617  VkImage image,
8618  const VmaAllocationCreateInfo* pCreateInfo,
8619  VmaAllocation* pAllocation,
8620  VmaAllocationInfo* pAllocationInfo)
8621 {
8622  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8623 
8624  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
8625 
8626  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8627 
8628  VkResult result = AllocateMemoryForImage(
8629  allocator,
8630  image,
8631  pCreateInfo,
8632  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
8633  pAllocation);
8634 
8635  if(pAllocationInfo && result == VK_SUCCESS)
8636  {
8637  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8638  }
8639 
8640  return result;
8641 }
8642 
8643 void vmaFreeMemory(
8644  VmaAllocator allocator,
8645  VmaAllocation allocation)
8646 {
8647  VMA_ASSERT(allocator && allocation);
8648 
8649  VMA_DEBUG_LOG("vmaFreeMemory");
8650 
8651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8652 
8653  allocator->FreeMemory(allocation);
8654 }
8655 
8657  VmaAllocator allocator,
8658  VmaAllocation allocation,
8659  VmaAllocationInfo* pAllocationInfo)
8660 {
8661  VMA_ASSERT(allocator && allocation && pAllocationInfo);
8662 
8663  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8664 
8665  allocator->GetAllocationInfo(allocation, pAllocationInfo);
8666 }
8667 
8668 bool vmaTouchAllocation(
8669  VmaAllocator allocator,
8670  VmaAllocation allocation)
8671 {
8672  VMA_ASSERT(allocator && allocation);
8673 
8674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8675 
8676  return allocator->TouchAllocation(allocation);
8677 }
8678 
8680  VmaAllocator allocator,
8681  VmaAllocation allocation,
8682  void* pUserData)
8683 {
8684  VMA_ASSERT(allocator && allocation);
8685 
8686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8687 
8688  allocation->SetUserData(allocator, pUserData);
8689 }
8690 
8692  VmaAllocator allocator,
8693  VmaAllocation* pAllocation)
8694 {
8695  VMA_ASSERT(allocator && pAllocation);
8696 
8697  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
8698 
8699  allocator->CreateLostAllocation(pAllocation);
8700 }
8701 
8702 VkResult vmaMapMemory(
8703  VmaAllocator allocator,
8704  VmaAllocation allocation,
8705  void** ppData)
8706 {
8707  VMA_ASSERT(allocator && allocation && ppData);
8708 
8709  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8710 
8711  return allocator->Map(allocation, ppData);
8712 }
8713 
8714 void vmaUnmapMemory(
8715  VmaAllocator allocator,
8716  VmaAllocation allocation)
8717 {
8718  VMA_ASSERT(allocator && allocation);
8719 
8720  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8721 
8722  allocator->Unmap(allocation);
8723 }
8724 
8725 VkResult vmaDefragment(
8726  VmaAllocator allocator,
8727  VmaAllocation* pAllocations,
8728  size_t allocationCount,
8729  VkBool32* pAllocationsChanged,
8730  const VmaDefragmentationInfo *pDefragmentationInfo,
8731  VmaDefragmentationStats* pDefragmentationStats)
8732 {
8733  VMA_ASSERT(allocator && pAllocations);
8734 
8735  VMA_DEBUG_LOG("vmaDefragment");
8736 
8737  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8738 
8739  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
8740 }
8741 
8742 VkResult vmaCreateBuffer(
8743  VmaAllocator allocator,
8744  const VkBufferCreateInfo* pBufferCreateInfo,
8745  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8746  VkBuffer* pBuffer,
8747  VmaAllocation* pAllocation,
8748  VmaAllocationInfo* pAllocationInfo)
8749 {
8750  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
8751 
8752  VMA_DEBUG_LOG("vmaCreateBuffer");
8753 
8754  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8755 
8756  *pBuffer = VK_NULL_HANDLE;
8757  *pAllocation = VK_NULL_HANDLE;
8758 
8759  // 1. Create VkBuffer.
8760  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
8761  allocator->m_hDevice,
8762  pBufferCreateInfo,
8763  allocator->GetAllocationCallbacks(),
8764  pBuffer);
8765  if(res >= 0)
8766  {
8767  // 2. vkGetBufferMemoryRequirements.
8768  VkMemoryRequirements vkMemReq = {};
8769  bool requiresDedicatedAllocation = false;
8770  bool prefersDedicatedAllocation = false;
8771  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
8772  requiresDedicatedAllocation, prefersDedicatedAllocation);
8773 
8774  // Make sure alignment requirements for specific buffer usages reported
8775  // in Physical Device Properties are included in alignment reported by memory requirements.
8776  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
8777  {
8778  VMA_ASSERT(vkMemReq.alignment %
8779  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
8780  }
8781  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
8782  {
8783  VMA_ASSERT(vkMemReq.alignment %
8784  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
8785  }
8786  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
8787  {
8788  VMA_ASSERT(vkMemReq.alignment %
8789  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
8790  }
8791 
8792  // 3. Allocate memory using allocator.
8793  res = allocator->AllocateMemory(
8794  vkMemReq,
8795  requiresDedicatedAllocation,
8796  prefersDedicatedAllocation,
8797  *pBuffer, // dedicatedBuffer
8798  VK_NULL_HANDLE, // dedicatedImage
8799  *pAllocationCreateInfo,
8800  VMA_SUBALLOCATION_TYPE_BUFFER,
8801  pAllocation);
8802  if(res >= 0)
8803  {
8804  // 3. Bind buffer with memory.
8805  res = (*allocator->GetVulkanFunctions().vkBindBufferMemory)(
8806  allocator->m_hDevice,
8807  *pBuffer,
8808  (*pAllocation)->GetMemory(),
8809  (*pAllocation)->GetOffset());
8810  if(res >= 0)
8811  {
8812  // All steps succeeded.
8813  if(pAllocationInfo != VMA_NULL)
8814  {
8815  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8816  }
8817  return VK_SUCCESS;
8818  }
8819  allocator->FreeMemory(*pAllocation);
8820  *pAllocation = VK_NULL_HANDLE;
8821  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8822  *pBuffer = VK_NULL_HANDLE;
8823  return res;
8824  }
8825  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
8826  *pBuffer = VK_NULL_HANDLE;
8827  return res;
8828  }
8829  return res;
8830 }
8831 
8832 void vmaDestroyBuffer(
8833  VmaAllocator allocator,
8834  VkBuffer buffer,
8835  VmaAllocation allocation)
8836 {
8837  if(buffer != VK_NULL_HANDLE)
8838  {
8839  VMA_ASSERT(allocator);
8840 
8841  VMA_DEBUG_LOG("vmaDestroyBuffer");
8842 
8843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8844 
8845  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
8846 
8847  allocator->FreeMemory(allocation);
8848  }
8849 }
8850 
8851 VkResult vmaCreateImage(
8852  VmaAllocator allocator,
8853  const VkImageCreateInfo* pImageCreateInfo,
8854  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8855  VkImage* pImage,
8856  VmaAllocation* pAllocation,
8857  VmaAllocationInfo* pAllocationInfo)
8858 {
8859  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
8860 
8861  VMA_DEBUG_LOG("vmaCreateImage");
8862 
8863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8864 
8865  *pImage = VK_NULL_HANDLE;
8866  *pAllocation = VK_NULL_HANDLE;
8867 
8868  // 1. Create VkImage.
8869  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
8870  allocator->m_hDevice,
8871  pImageCreateInfo,
8872  allocator->GetAllocationCallbacks(),
8873  pImage);
8874  if(res >= 0)
8875  {
8876  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
8877  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
8878  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
8879 
8880  // 2. Allocate memory using allocator.
8881  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
8882  if(res >= 0)
8883  {
8884  // 3. Bind image with memory.
8885  res = (*allocator->GetVulkanFunctions().vkBindImageMemory)(
8886  allocator->m_hDevice,
8887  *pImage,
8888  (*pAllocation)->GetMemory(),
8889  (*pAllocation)->GetOffset());
8890  if(res >= 0)
8891  {
8892  // All steps succeeded.
8893  if(pAllocationInfo != VMA_NULL)
8894  {
8895  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8896  }
8897  return VK_SUCCESS;
8898  }
8899  allocator->FreeMemory(*pAllocation);
8900  *pAllocation = VK_NULL_HANDLE;
8901  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8902  *pImage = VK_NULL_HANDLE;
8903  return res;
8904  }
8905  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
8906  *pImage = VK_NULL_HANDLE;
8907  return res;
8908  }
8909  return res;
8910 }
8911 
8912 void vmaDestroyImage(
8913  VmaAllocator allocator,
8914  VkImage image,
8915  VmaAllocation allocation)
8916 {
8917  if(image != VK_NULL_HANDLE)
8918  {
8919  VMA_ASSERT(allocator);
8920 
8921  VMA_DEBUG_LOG("vmaDestroyImage");
8922 
8923  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8924 
8925  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
8926 
8927  allocator->FreeMemory(allocation);
8928  }
8929 }
8930 
8931 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:896
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1150
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:921
@@ -72,12 +72,12 @@ $(function() {
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1107
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:900
-
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1418
+
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1456
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:918
-
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1584
-
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1288
+
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1631
+
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1326
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
-
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1342
+
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1380
Definition: vk_mem_alloc.h:1187
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:889
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1225
@@ -93,34 +93,34 @@ $(function() {
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:903
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1047
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:911
-
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1588
+
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:1635
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:947
VmaStatInfo total
Definition: vk_mem_alloc.h:1057
-
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1596
+
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1643
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1209
-
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1579
+
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1626
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:904
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:831
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:924
-
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1296
-
Definition: vk_mem_alloc.h:1290
-
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1428
+
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1334
+
Definition: vk_mem_alloc.h:1328
+
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1466
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:901
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1246
-
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1312
-
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1348
+
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1350
+
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:1386
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:887
-
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1299
-
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
+
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1337
+
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VmaMemoryUsage
Definition: vk_mem_alloc.h:1085
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
-
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1574
+
Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:1621
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
-
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1592
+
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:1639
Definition: vk_mem_alloc.h:1124
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1233
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:902
@@ -133,30 +133,32 @@ $(function() {
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:858
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:863
-
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1594
+
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1641
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1220
-
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1358
+
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region.
Definition: vk_mem_alloc.h:1396
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:897
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1036
-
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1307
+
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1345
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:850
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:1194
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1049
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:854
-
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1302
+
bool vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
TODO finish documentation...
+
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1340
Definition: vk_mem_alloc.h:1133
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1215
Definition: vk_mem_alloc.h:1206
+
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1039
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:899
-
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1320
+
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1358
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory.
Definition: vk_mem_alloc.h:933
-
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1351
+
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1389
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:1204
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1239
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
@@ -164,52 +166,53 @@ $(function() {
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1055
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:1174
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1048
+
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:908
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:852
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:907
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
-
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1334
+
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1372
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
-
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1442
+
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1480
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:927
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1048
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1045
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
-
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1339
-
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1423
+
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:1377
+
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:1461
Definition: vk_mem_alloc.h:1202
-
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1590
+
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:1637
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:895
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR
Definition: vk_mem_alloc.h:910
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1043
Definition: vk_mem_alloc.h:1090
-
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1292
+
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:1330
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1041
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:905
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:909
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:1161
Definition: vk_mem_alloc.h:1117
-
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1437
+
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1475
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:885
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:898
-
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1404
+
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1442
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
-
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1270
+
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:1308
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1049
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1056
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
-
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1345
+
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:1383
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1049
-
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1409
+
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1447