Renames in the library and fixes in tests for the new defragmentation

This commit is contained in:
Adam Sawicki 2019-12-23 16:38:31 +01:00
parent a52012de37
commit c467e28f2f
2 changed files with 110 additions and 73 deletions

View file

@ -696,6 +696,10 @@ struct AllocInfo
VkImageCreateInfo m_ImageInfo;
};
// After defragmentation.
VkBuffer m_NewBuffer = VK_NULL_HANDLE;
VkImage m_NewImage = VK_NULL_HANDLE;
void CreateBuffer(
const VkBufferCreateInfo& bufCreateInfo,
const VmaAllocationCreateInfo& allocCreateInfo);
@ -729,11 +733,13 @@ void AllocInfo::Destroy()
{
if(m_Image)
{
assert(!m_Buffer);
vkDestroyImage(g_hDevice, m_Image, g_Allocs);
m_Image = VK_NULL_HANDLE;
}
if(m_Buffer)
{
assert(!m_Image);
vkDestroyBuffer(g_hDevice, m_Buffer, g_Allocs);
m_Buffer = VK_NULL_HANDLE;
}
@ -922,7 +928,7 @@ static void UploadGpuData(const AllocInfo* allocInfo, size_t allocInfoCount)
TEST(currAllocInfo.m_ImageInfo.format == VK_FORMAT_R8G8B8A8_UNORM && "Only RGBA8 images are currently supported.");
TEST(currAllocInfo.m_ImageInfo.mipLevels == 1 && "Only single mip images are currently supported.");
const VkDeviceSize size = currAllocInfo.m_ImageInfo.extent.width * currAllocInfo.m_ImageInfo.extent.height * sizeof(uint32_t);
const VkDeviceSize size = (VkDeviceSize)currAllocInfo.m_ImageInfo.extent.width * currAllocInfo.m_ImageInfo.extent.height * sizeof(uint32_t);
VkBuffer stagingBuf = VK_NULL_HANDLE;
void* stagingBufMappedPtr = nullptr;
@ -1850,7 +1856,7 @@ static void TestDefragmentationGpu()
g_MemoryAliasingWarningEnabled = true;
}
static void ProcessDefragmentationStepInfo(VmaDefragmentationStepInfo &stepInfo)
static void ProcessDefragmentationStepInfo(VmaDefragmentationPassInfo &stepInfo)
{
std::vector<VkImageMemoryBarrier> beginImageBarriers;
std::vector<VkImageMemoryBarrier> finalizeImageBarriers;
@ -1866,9 +1872,7 @@ static void ProcessDefragmentationStepInfo(VmaDefragmentationStepInfo &stepInfo)
VkMemoryBarrier beginMemoryBarrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER };
VkMemoryBarrier finalizeMemoryBarrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER };
std::vector<void *> newHandles;
for(uint32_t i = 0; i < stepInfo.moveCount; ++ i)
for(uint32_t i = 0; i < stepInfo.moveCount; ++i)
{
VmaAllocationInfo info;
vmaGetAllocationInfo(g_hAllocator, stepInfo.pMoves[i].allocation, &info);
@ -1883,7 +1887,7 @@ static void ProcessDefragmentationStepInfo(VmaDefragmentationStepInfo &stepInfo)
TEST(result >= VK_SUCCESS);
vkBindImageMemory(g_hDevice, newImage, stepInfo.pMoves[i].memory, stepInfo.pMoves[i].offset);
newHandles.push_back(newImage);
allocInfo->m_NewImage = newImage;
// Keep track of our pipeline stages that we need to wait/signal on
beginSrcStageMask |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
@ -1937,7 +1941,7 @@ static void ProcessDefragmentationStepInfo(VmaDefragmentationStepInfo &stepInfo)
TEST(result >= VK_SUCCESS);
vkBindBufferMemory(g_hDevice, newBuffer, stepInfo.pMoves[i].memory, stepInfo.pMoves[i].offset);
newHandles.push_back(newBuffer);
allocInfo->m_NewBuffer = newBuffer;
// Keep track of our pipeline stages that we need to wait/signal on
beginSrcStageMask |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
@ -2006,13 +2010,8 @@ static void ProcessDefragmentationStepInfo(VmaDefragmentationStepInfo &stepInfo)
vkCmdCopyImage(
g_hTemporaryCommandBuffer,
allocInfo->m_Image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
(VkImage)newHandles[i], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
allocInfo->m_NewImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
(uint32_t)imageCopies.size(), imageCopies.data());
imageCopies.clear();
// Update our alloc info with the new resource to be used
allocInfo->m_Image = (VkImage)newHandles[i];
}
else if(allocInfo->m_Buffer)
{
@ -2022,16 +2021,11 @@ static void ProcessDefragmentationStepInfo(VmaDefragmentationStepInfo &stepInfo)
allocInfo->m_BufferInfo.size };
vkCmdCopyBuffer(g_hTemporaryCommandBuffer,
allocInfo->m_Buffer, (VkBuffer)newHandles[i],
allocInfo->m_Buffer, allocInfo->m_NewBuffer,
1, &region);
// Update our alloc info with the new resource to be used
allocInfo->m_Buffer = (VkBuffer)newHandles[i];
}
}
if(!finalizeImageBarriers.empty() || wantsMemoryBarrier)
{
const uint32_t memoryBarrierCount = wantsMemoryBarrier ? 1 : 0;
@ -2056,7 +2050,7 @@ static void TestDefragmentationIncrementalBasic()
const VkDeviceSize bufSizeMin = 5ull * 1024 * 1024;
const VkDeviceSize bufSizeMax = 10ull * 1024 * 1024;
const VkDeviceSize totalSize = 3ull * 256 * 1024 * 1024;
const size_t imageCount = (size_t)(totalSize / (imageSizes[0] * imageSizes[0] * 4)) / 2;
const size_t imageCount = totalSize / ((size_t)imageSizes[0] * imageSizes[0] * 4) / 2;
const size_t bufCount = (size_t)(totalSize / bufSizeMin) / 2;
const size_t percentToLeave = 30;
RandomNumberGenerator rand = { 234522 };
@ -2142,9 +2136,6 @@ static void TestDefragmentationIncrementalBasic()
for(size_t i = 0; i < allocCount; ++i)
{
VmaAllocationInfo allocInfo = {};
vmaGetAllocationInfo(g_hAllocator, allocations[i].m_Allocation, &allocInfo);
allocationPtrs.push_back(allocations[i].m_Allocation);
}
@ -2164,23 +2155,49 @@ static void TestDefragmentationIncrementalBasic()
res = VK_NOT_READY;
std::vector<VmaDefragmentationStepMoveInfo> moveInfo;
std::vector<VmaDefragmentationPassMoveInfo> moveInfo;
moveInfo.resize(movableAllocCount);
while(res == VK_NOT_READY)
{
VmaDefragmentationStepInfo stepInfo = {};
VmaDefragmentationPassInfo stepInfo = {};
stepInfo.pMoves = moveInfo.data();
stepInfo.moveCount = (uint32_t)moveInfo.size();
res = vmaDefragmentationStepBegin(g_hAllocator, &stepInfo, ctx);
res = vmaBeginDefragmentationPass(g_hAllocator, ctx, &stepInfo);
TEST(res >= VK_SUCCESS);
BeginSingleTimeCommands();
std::vector<void*> newHandles;
ProcessDefragmentationStepInfo(stepInfo);
EndSingleTimeCommands();
res = vmaDefragmentationStepEnd(g_hAllocator, ctx);
res = vmaEndDefragmentationPass(g_hAllocator, ctx);
// Destroy old buffers/images and replace them with new handles.
for(size_t i = 0; i < stepInfo.moveCount; ++i)
{
VmaAllocation const alloc = stepInfo.pMoves[i].allocation;
VmaAllocationInfo vmaAllocInfo;
vmaGetAllocationInfo(g_hAllocator, alloc, &vmaAllocInfo);
AllocInfo* allocInfo = (AllocInfo*)vmaAllocInfo.pUserData;
if(allocInfo->m_Buffer)
{
assert(allocInfo->m_NewBuffer && !allocInfo->m_Image && !allocInfo->m_NewImage);
vkDestroyBuffer(g_hDevice, allocInfo->m_Buffer, g_Allocs);
allocInfo->m_Buffer = allocInfo->m_NewBuffer;
allocInfo->m_NewBuffer = VK_NULL_HANDLE;
}
else if(allocInfo->m_Image)
{
assert(allocInfo->m_NewImage && !allocInfo->m_Buffer && !allocInfo->m_NewBuffer);
vkDestroyImage(g_hDevice, allocInfo->m_Image, g_Allocs);
allocInfo->m_Image = allocInfo->m_NewImage;
allocInfo->m_NewImage = VK_NULL_HANDLE;
}
else
assert(0);
}
}
TEST(res >= VK_SUCCESS);
@ -2199,7 +2216,7 @@ static void TestDefragmentationIncrementalBasic()
swprintf_s(fileName, L"GPU_defragmentation_incremental_basic_B_after.json");
SaveAllocatorStatsToFile(fileName);
// Destroy all remaining buffers.
// Destroy all remaining buffers and images.
for(size_t i = allocations.size(); i--; )
{
allocations[i].Destroy();
@ -2343,18 +2360,18 @@ void TestDefragmentationIncrementalComplex()
res = VK_NOT_READY;
std::vector<VmaDefragmentationStepMoveInfo> moveInfo;
std::vector<VmaDefragmentationPassMoveInfo> moveInfo;
moveInfo.resize(movableAllocCount);
MakeAdditionalAllocation();
while(res == VK_NOT_READY)
{
VmaDefragmentationStepInfo stepInfo = {};
VmaDefragmentationPassInfo stepInfo = {};
stepInfo.pMoves = moveInfo.data();
stepInfo.moveCount = (uint32_t)moveInfo.size();
res = vmaDefragmentationStepBegin(g_hAllocator, &stepInfo, ctx);
res = vmaBeginDefragmentationPass(g_hAllocator, ctx, &stepInfo);
TEST(res >= VK_SUCCESS);
MakeAdditionalAllocation();
@ -2363,7 +2380,32 @@ void TestDefragmentationIncrementalComplex()
ProcessDefragmentationStepInfo(stepInfo);
EndSingleTimeCommands();
res = vmaDefragmentationStepEnd(g_hAllocator, ctx);
res = vmaEndDefragmentationPass(g_hAllocator, ctx);
// Destroy old buffers/images and replace them with new handles.
for(size_t i = 0; i < stepInfo.moveCount; ++i)
{
VmaAllocation const alloc = stepInfo.pMoves[i].allocation;
VmaAllocationInfo vmaAllocInfo;
vmaGetAllocationInfo(g_hAllocator, alloc, &vmaAllocInfo);
AllocInfo* allocInfo = (AllocInfo*)vmaAllocInfo.pUserData;
if(allocInfo->m_Buffer)
{
assert(allocInfo->m_NewBuffer && !allocInfo->m_Image && !allocInfo->m_NewImage);
vkDestroyBuffer(g_hDevice, allocInfo->m_Buffer, g_Allocs);
allocInfo->m_Buffer = allocInfo->m_NewBuffer;
allocInfo->m_NewBuffer = VK_NULL_HANDLE;
}
else if(allocInfo->m_Image)
{
assert(allocInfo->m_NewImage && !allocInfo->m_Buffer && !allocInfo->m_NewBuffer);
vkDestroyImage(g_hDevice, allocInfo->m_Image, g_Allocs);
allocInfo->m_Image = allocInfo->m_NewImage;
allocInfo->m_NewImage = VK_NULL_HANDLE;
}
else
assert(0);
}
MakeAdditionalAllocation();
}

View file

@ -1952,7 +1952,6 @@ typedef struct VmaVulkanFunctions {
PFN_vkCreateImage vkCreateImage;
PFN_vkDestroyImage vkDestroyImage;
PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
PFN_vkCmdCopyImage vkCmdCopyImage;
#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
@ -3193,20 +3192,20 @@ typedef struct VmaDefragmentationInfo2 {
VkCommandBuffer commandBuffer;
} VmaDefragmentationInfo2;
typedef struct VmaDefragmentationStepMoveInfo {
typedef struct VmaDefragmentationPassMoveInfo {
VmaAllocation allocation;
VkDeviceMemory memory;
VkDeviceSize offset;
} VmaDefragmentationStepMoveInfo;
} VmaDefragmentationPassMoveInfo;
/** \brief Parameters for incremental defragmentation steps.
To be used with function vmaDefragmentationStepBegin().
To be used with function vmaBeginDefragmentationPass().
*/
typedef struct VmaDefragmentationStepInfo {
typedef struct VmaDefragmentationPassInfo {
uint32_t moveCount;
VmaDefragmentationStepMoveInfo* pMoves;
} VmaDefragmentationStepInfo;
VmaDefragmentationPassMoveInfo* pMoves;
} VmaDefragmentationPassInfo;
/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
@ -3281,12 +3280,12 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
VmaAllocator allocator,
VmaDefragmentationContext context);
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationStepBegin(
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
VmaAllocator allocator,
VmaDefragmentationStepInfo* pInfo,
VmaDefragmentationContext context
VmaDefragmentationContext context,
VmaDefragmentationPassInfo* pInfo
);
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationStepEnd(
VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
VmaAllocator allocator,
VmaDefragmentationContext context
);
@ -3733,10 +3732,10 @@ void *aligned_alloc(size_t alignment, size_t size)
VmaRWMutex() { InitializeSRWLock(&m_Lock); }
void LockRead() { AcquireSRWLockShared(&m_Lock); }
void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock); }
bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock); }
bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
private:
SRWLOCK m_Lock;
};
@ -6357,7 +6356,7 @@ public:
uint32_t ProcessDefragmentations(
class VmaBlockVectorDefragmentationContext *pCtx,
VmaDefragmentationStepMoveInfo* pMove, uint32_t maxMoves);
VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
void CommitDefragmentations(
class VmaBlockVectorDefragmentationContext *pCtx,
@ -6395,8 +6394,6 @@ private:
VkDeviceSize CalcMaxBlockSize() const;
static VkImageAspectFlags ImageAspectMaskForFormat(VkFormat format);
// Finds and removes given block from vector.
void Remove(VmaDeviceMemoryBlock* pBlock);
@ -6896,8 +6893,8 @@ public:
VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
VkResult DefragmentStepBegin(VmaDefragmentationStepInfo* pInfo);
VkResult DefragmentStepEnd();
VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
VkResult DefragmentPassEnd();
private:
const VmaAllocator m_hAllocator;
@ -7249,10 +7246,10 @@ public:
VkResult DefragmentationEnd(
VmaDefragmentationContext context);
VkResult DefragmentationStepBegin(
VmaDefragmentationStepInfo* pInfo,
VkResult DefragmentationPassBegin(
VmaDefragmentationPassInfo* pInfo,
VmaDefragmentationContext context);
VkResult DefragmentationStepEnd(
VkResult DefragmentationPassEnd(
VmaDefragmentationContext context);
void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
@ -13016,7 +13013,7 @@ void VmaBlockVector::DefragmentationEnd(
uint32_t VmaBlockVector::ProcessDefragmentations(
class VmaBlockVectorDefragmentationContext *pCtx,
VmaDefragmentationStepMoveInfo* pMove, uint32_t maxMoves)
VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
{
VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
@ -14123,9 +14120,9 @@ VkResult VmaDefragmentationContext_T::Defragment(
return res;
}
VkResult VmaDefragmentationContext_T::DefragmentStepBegin(VmaDefragmentationStepInfo* pInfo)
VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
{
VmaDefragmentationStepMoveInfo* pCurrentMove = pInfo->pMoves;
VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
uint32_t movesLeft = pInfo->moveCount;
// Process default pools.
@ -14197,7 +14194,7 @@ VkResult VmaDefragmentationContext_T::DefragmentStepBegin(VmaDefragmentationStep
return VK_SUCCESS;
}
VkResult VmaDefragmentationContext_T::DefragmentStepEnd()
VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
{
VkResult res = VK_SUCCESS;
@ -15065,7 +15062,6 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc
m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
m_VulkanFunctions.vkCmdCopyImage = (PFN_vkCmdCopyImage)vkCmdCopyImage;
#if VMA_VULKAN_VERSION >= 1001000
if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
{
@ -15132,7 +15128,6 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc
VMA_COPY_IF_NOT_NULL(vkCreateImage);
VMA_COPY_IF_NOT_NULL(vkDestroyImage);
VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
VMA_COPY_IF_NOT_NULL(vkCmdCopyImage);
#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
@ -15167,7 +15162,6 @@ void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunc
VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
VMA_ASSERT(m_VulkanFunctions.vkCmdCopyImage != VMA_NULL);
#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
{
@ -15905,16 +15899,16 @@ VkResult VmaAllocator_T::DefragmentationEnd(
return VK_SUCCESS;
}
VkResult VmaAllocator_T::DefragmentationStepBegin(
VmaDefragmentationStepInfo* pInfo,
VkResult VmaAllocator_T::DefragmentationPassBegin(
VmaDefragmentationPassInfo* pInfo,
VmaDefragmentationContext context)
{
return context->DefragmentStepBegin(pInfo);
return context->DefragmentPassBegin(pInfo);
}
VkResult VmaAllocator_T::DefragmentationStepEnd(
VkResult VmaAllocator_T::DefragmentationPassEnd(
VmaDefragmentationContext context)
{
return context->DefragmentStepEnd();
return context->DefragmentPassEnd();
}
@ -17736,16 +17730,17 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
}
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationStepBegin(
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
VmaAllocator allocator,
VmaDefragmentationStepInfo* pInfo,
VmaDefragmentationContext context)
VmaDefragmentationContext context,
VmaDefragmentationPassInfo* pInfo
)
{
VMA_ASSERT(allocator);
VMA_ASSERT(pInfo);
VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves));
VMA_DEBUG_LOG("vmaDefragmentationStepBegin");
VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
VMA_DEBUG_GLOBAL_MUTEX_LOCK
@ -17755,21 +17750,21 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationStepBegin(
return VK_SUCCESS;
}
return allocator->DefragmentationStepBegin(pInfo, context);
return allocator->DefragmentationPassBegin(pInfo, context);
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationStepEnd(
VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
VmaAllocator allocator,
VmaDefragmentationContext context)
{
VMA_ASSERT(allocator);
VMA_DEBUG_LOG("vmaDefragmentationStepEnd");
VMA_DEBUG_LOG("vmaEndDefragmentationPass");
VMA_DEBUG_GLOBAL_MUTEX_LOCK
if(context == VK_NULL_HANDLE)
return VK_SUCCESS;
return allocator->DefragmentationStepEnd(context);
return allocator->DefragmentationPassEnd(context);
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(