Further refactoring of defragmentation classes.

This commit is contained in:
Adam Sawicki 2018-10-18 13:11:00 +02:00
parent 2dcfcf8b63
commit 29b04041f7

View file

@ -5778,17 +5778,27 @@ class VmaBlockVectorDefragmentationContext
{ {
VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext) VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
public: public:
VkResult res;
VmaPool hCustomPool; // Null if not from custom pool.
VmaBlockVector* pBlockVector; // Redundant, for convenience not to fetch from m_hAllocator.
VmaDefragmentationAlgorithm* pAlgorithm; // Owner of this object.
VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts; VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
VmaBlockVectorDefragmentationContext(VmaAllocator hAllocator) ; VmaBlockVectorDefragmentationContext(
VmaAllocator hAllocator,
VmaPool hCustomPool, // Optional.
VmaBlockVector* pBlockVector,
uint32_t currFrameIndex);
~VmaBlockVectorDefragmentationContext(); ~VmaBlockVectorDefragmentationContext();
VmaPool GetCustomPool() const { return m_hCustomPool; }
VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
private: private:
const VmaAllocator m_hAllocator; const VmaAllocator m_hAllocator;
// Null if not from custom pool.
const VmaPool m_hCustomPool;
// Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
VmaBlockVector* const m_pBlockVector;
// Owner of this object.
VmaDefragmentationAlgorithm* m_pAlgorithm;
}; };
struct VmaDefragmentationContext_T struct VmaDefragmentationContext_T
@ -5796,9 +5806,6 @@ struct VmaDefragmentationContext_T
private: private:
VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
public: public:
VmaBlockVectorDefragmentationContext* defaultPoolContexts[VK_MAX_MEMORY_TYPES];
VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > customPoolContexts;
VmaDefragmentationContext_T(VmaAllocator hAllocator, uint32_t currFrameIndex); VmaDefragmentationContext_T(VmaAllocator hAllocator, uint32_t currFrameIndex);
~VmaDefragmentationContext_T(); ~VmaDefragmentationContext_T();
@ -5809,9 +5816,9 @@ public:
/* /*
Returns: Returns:
VK_SUCCESS if succeeded and object can be destroyed immediately. - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
VK_NOT_READY if succeeded but the object must remain alive until vmaDefragmentationEnd. - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
Negative value if error occured and object can be destroyed immediately. - Negative value if error occured and object can be destroyed immediately.
*/ */
VkResult Defragment( VkResult Defragment(
VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
@ -5821,6 +5828,10 @@ public:
private: private:
const VmaAllocator m_hAllocator; const VmaAllocator m_hAllocator;
const uint32_t m_CurrFrameIndex; const uint32_t m_CurrFrameIndex;
// Owner of these objects.
VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
// Owner of these objects.
VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
}; };
#if VMA_RECORDING_ENABLED #if VMA_RECORDING_ENABLED
@ -11448,13 +11459,13 @@ VkResult VmaBlockVector::Defragment(
const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove; const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves = VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks())); VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
res = pDefragCtx->pAlgorithm->Defragment(moves, maxBytesToMove, maxAllocationsToMove); res = pDefragCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
// Accumulate statistics. // Accumulate statistics.
if(pDefragmentationStats != VMA_NULL) if(pDefragmentationStats != VMA_NULL)
{ {
const VkDeviceSize bytesMoved = pDefragCtx->pAlgorithm->GetBytesMoved(); const VkDeviceSize bytesMoved = pDefragCtx->GetAlgorithm()->GetBytesMoved();
const uint32_t allocationsMoved = pDefragCtx->pAlgorithm->GetAllocationsMoved(); const uint32_t allocationsMoved = pDefragCtx->GetAlgorithm()->GetAllocationsMoved();
pDefragmentationStats->bytesMoved += bytesMoved; pDefragmentationStats->bytesMoved += bytesMoved;
pDefragmentationStats->allocationsMoved += allocationsMoved; pDefragmentationStats->allocationsMoved += allocationsMoved;
VMA_ASSERT(bytesMoved <= maxBytesToMove); VMA_ASSERT(bytesMoved <= maxBytesToMove);
@ -11797,41 +11808,46 @@ bool VmaDefragmentationAlgorithm::MoveMakesSense(
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// VmaBlockVectorDefragmentationContext // VmaBlockVectorDefragmentationContext
VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(VmaAllocator hAllocator) : VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
res(VK_SUCCESS), VmaAllocator hAllocator,
hCustomPool(VK_NULL_HANDLE), VmaPool hCustomPool,
pBlockVector(VMA_NULL), VmaBlockVector* pBlockVector,
pAlgorithm(VMA_NULL), uint32_t currFrameIndex) :
blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())), blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
m_hAllocator(hAllocator) m_hAllocator(hAllocator),
m_hCustomPool(hCustomPool),
m_pBlockVector(pBlockVector),
m_pAlgorithm(VMA_NULL)
{ {
m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm)(
m_hAllocator, m_pBlockVector, currFrameIndex);
} }
VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
{ {
vma_delete(m_hAllocator, pAlgorithm); vma_delete(m_hAllocator, m_pAlgorithm);
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// VmaDefragmentationContext // VmaDefragmentationContext
VmaDefragmentationContext_T::VmaDefragmentationContext_T(VmaAllocator hAllocator, uint32_t currFrameIndex) : VmaDefragmentationContext_T::VmaDefragmentationContext_T(VmaAllocator hAllocator, uint32_t currFrameIndex) :
customPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks())),
m_hAllocator(hAllocator), m_hAllocator(hAllocator),
m_CurrFrameIndex(currFrameIndex) m_CurrFrameIndex(currFrameIndex),
m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
{ {
memset(defaultPoolContexts, 0, sizeof(defaultPoolContexts)); memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
} }
VmaDefragmentationContext_T::~VmaDefragmentationContext_T() VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
{ {
for(size_t i = customPoolContexts.size(); i--; ) for(size_t i = m_CustomPoolContexts.size(); i--; )
{ {
vma_delete(m_hAllocator, customPoolContexts[i]); vma_delete(m_hAllocator, m_CustomPoolContexts[i]);
} }
for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
{ {
vma_delete(m_hAllocator, defaultPoolContexts[i]); vma_delete(m_hAllocator, m_DefaultPoolContexts[i]);
} }
} }
@ -11859,21 +11875,22 @@ void VmaDefragmentationContext_T::AddAllocations(
// Pools with algorithm other than default are not defragmented. // Pools with algorithm other than default are not defragmented.
if(hAllocPool->m_BlockVector.GetAlgorithm() == 0) if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
{ {
for(size_t i = customPoolContexts.size(); i--; ) for(size_t i = m_CustomPoolContexts.size(); i--; )
{ {
if(customPoolContexts[i]->hCustomPool == hAllocPool) if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
{ {
pBlockVectorDefragCtx = customPoolContexts[i]; pBlockVectorDefragCtx = m_CustomPoolContexts[i];
break; break;
} }
} }
if(!pBlockVectorDefragCtx) if(!pBlockVectorDefragCtx)
{ {
pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
m_hAllocator); m_hAllocator,
pBlockVectorDefragCtx->hCustomPool = hAllocPool; hAllocPool,
pBlockVectorDefragCtx->pBlockVector = &hAllocPool->m_BlockVector; &hAllocPool->m_BlockVector,
customPoolContexts.push_back(pBlockVectorDefragCtx); m_CurrFrameIndex);
m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
} }
} }
} }
@ -11881,26 +11898,23 @@ void VmaDefragmentationContext_T::AddAllocations(
else else
{ {
const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
pBlockVectorDefragCtx = defaultPoolContexts[memTypeIndex]; pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
if(!pBlockVectorDefragCtx) if(!pBlockVectorDefragCtx)
{ {
pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
m_hAllocator); m_hAllocator,
pBlockVectorDefragCtx->pBlockVector = m_hAllocator->m_pBlockVectors[memTypeIndex]; VMA_NULL, // hCustomPool
defaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx; m_hAllocator->m_pBlockVectors[memTypeIndex],
m_CurrFrameIndex);
m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
} }
} }
if(pBlockVectorDefragCtx) if(pBlockVectorDefragCtx)
{ {
if(!pBlockVectorDefragCtx->pAlgorithm)
{
pBlockVectorDefragCtx->pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm)(
m_hAllocator, pBlockVectorDefragCtx->pBlockVector, m_CurrFrameIndex);
}
VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ? VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
&pAllocationsChanged[allocIndex] : VMA_NULL; &pAllocationsChanged[allocIndex] : VMA_NULL;
pBlockVectorDefragCtx->pAlgorithm->AddAllocation(hAlloc, pChanged); pBlockVectorDefragCtx->GetAlgorithm()->AddAllocation(hAlloc, pChanged);
} }
} }
} }
@ -11929,11 +11943,11 @@ VkResult VmaDefragmentationContext_T::Defragment(
memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS; memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
++memTypeIndex) ++memTypeIndex)
{ {
if(defaultPoolContexts[memTypeIndex]) if(m_DefaultPoolContexts[memTypeIndex])
{ {
VMA_ASSERT(defaultPoolContexts[memTypeIndex]->pBlockVector); VMA_ASSERT(m_DefaultPoolContexts[memTypeIndex]->GetBlockVector());
VkResult localRes = defaultPoolContexts[memTypeIndex]->pBlockVector->Defragment( VkResult localRes = m_DefaultPoolContexts[memTypeIndex]->GetBlockVector()->Defragment(
defaultPoolContexts[memTypeIndex], m_DefaultPoolContexts[memTypeIndex],
pStats, pStats,
maxCpuBytesToMove, maxCpuAllocationsToMove, maxCpuBytesToMove, maxCpuAllocationsToMove,
maxGpuBytesToMove, maxGpuAllocationsToMove, maxGpuBytesToMove, maxGpuAllocationsToMove,
@ -11946,13 +11960,13 @@ VkResult VmaDefragmentationContext_T::Defragment(
} }
// Process custom pools. // Process custom pools.
for(size_t customCtxIndex = 0, customCtxCount = customPoolContexts.size(); for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
customCtxIndex < customCtxCount && res >= VK_SUCCESS; customCtxIndex < customCtxCount && res >= VK_SUCCESS;
++customCtxIndex) ++customCtxIndex)
{ {
VMA_ASSERT(customPoolContexts[customCtxIndex]->pBlockVector); VMA_ASSERT(m_CustomPoolContexts[customCtxIndex]->GetBlockVector());
VkResult localRes = customPoolContexts[customCtxIndex]->pBlockVector->Defragment( VkResult localRes = m_CustomPoolContexts[customCtxIndex]->GetBlockVector()->Defragment(
customPoolContexts[customCtxIndex], m_CustomPoolContexts[customCtxIndex],
pStats, pStats,
maxCpuBytesToMove, maxCpuAllocationsToMove, maxCpuBytesToMove, maxCpuAllocationsToMove,
maxGpuBytesToMove, maxGpuAllocationsToMove, maxGpuBytesToMove, maxGpuAllocationsToMove,