forked from eden-emu/eden
Compare commits
6 commits
Author | SHA1 | Date | |
---|---|---|---|
546dcd5aef | |||
d3510b1397 | |||
d09899722d | |||
0929bfc156 | |||
1ffa98a40d | |||
38d18af8ba |
6 changed files with 314 additions and 120 deletions
src
common
video_core/renderer_vulkan
|
@ -13,11 +13,21 @@ public:
|
||||||
explicit FreeRegionManager() = default;
|
explicit FreeRegionManager() = default;
|
||||||
~FreeRegionManager() = default;
|
~FreeRegionManager() = default;
|
||||||
|
|
||||||
|
// Clear all free regions
|
||||||
|
void Clear() {
|
||||||
|
std::scoped_lock lk(m_mutex);
|
||||||
|
m_free_regions.clear();
|
||||||
|
}
|
||||||
|
|
||||||
void SetAddressSpace(void* start, size_t size) {
|
void SetAddressSpace(void* start, size_t size) {
|
||||||
this->FreeBlock(start, size);
|
this->FreeBlock(start, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<void*, size_t> FreeBlock(void* block_ptr, size_t size) {
|
std::pair<void*, size_t> FreeBlock(void* block_ptr, size_t size) {
|
||||||
|
if (block_ptr == nullptr || size == 0) {
|
||||||
|
return {nullptr, 0};
|
||||||
|
}
|
||||||
|
|
||||||
std::scoped_lock lk(m_mutex);
|
std::scoped_lock lk(m_mutex);
|
||||||
|
|
||||||
// Check to see if we are adjacent to any regions.
|
// Check to see if we are adjacent to any regions.
|
||||||
|
@ -41,6 +51,11 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void AllocateBlock(void* block_ptr, size_t size) {
|
void AllocateBlock(void* block_ptr, size_t size) {
|
||||||
|
// Skip if pointer is null or size is zero
|
||||||
|
if (block_ptr == nullptr || size == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
std::scoped_lock lk(m_mutex);
|
std::scoped_lock lk(m_mutex);
|
||||||
|
|
||||||
auto address = reinterpret_cast<uintptr_t>(block_ptr);
|
auto address = reinterpret_cast<uintptr_t>(block_ptr);
|
||||||
|
|
|
@ -491,6 +491,12 @@ public:
|
||||||
// Intersect the range with our address space.
|
// Intersect the range with our address space.
|
||||||
AdjustMap(&virtual_offset, &length);
|
AdjustMap(&virtual_offset, &length);
|
||||||
|
|
||||||
|
// Skip if length is zero after adjustment
|
||||||
|
if (length == 0) {
|
||||||
|
LOG_DEBUG(HW_Memory, "Skipping zero-length mapping at virtual_offset={}", virtual_offset);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// We are removing a placeholder.
|
// We are removing a placeholder.
|
||||||
free_manager.AllocateBlock(virtual_base + virtual_offset, length);
|
free_manager.AllocateBlock(virtual_base + virtual_offset, length);
|
||||||
|
|
||||||
|
@ -520,14 +526,22 @@ public:
|
||||||
// Intersect the range with our address space.
|
// Intersect the range with our address space.
|
||||||
AdjustMap(&virtual_offset, &length);
|
AdjustMap(&virtual_offset, &length);
|
||||||
|
|
||||||
|
// Skip if length is zero after adjustment
|
||||||
|
if (length == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Merge with any adjacent placeholder mappings.
|
// Merge with any adjacent placeholder mappings.
|
||||||
auto [merged_pointer, merged_size] =
|
auto [merged_pointer, merged_size] =
|
||||||
free_manager.FreeBlock(virtual_base + virtual_offset, length);
|
free_manager.FreeBlock(virtual_base + virtual_offset, length);
|
||||||
|
|
||||||
|
// Only attempt to mmap if we have a valid pointer and size
|
||||||
|
if (merged_pointer != nullptr && merged_size > 0) {
|
||||||
void* ret = mmap(merged_pointer, merged_size, PROT_NONE,
|
void* ret = mmap(merged_pointer, merged_size, PROT_NONE,
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
||||||
ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
|
ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {
|
||||||
// Intersect the range with our address space.
|
// Intersect the range with our address space.
|
||||||
|
@ -576,19 +590,26 @@ public:
|
||||||
private:
|
private:
|
||||||
/// Release all resources in the object
|
/// Release all resources in the object
|
||||||
void Release() {
|
void Release() {
|
||||||
|
// Make sure we release resources in the correct order
|
||||||
|
// First clear the free region manager to avoid any dangling references
|
||||||
|
free_manager.Clear();
|
||||||
|
|
||||||
if (virtual_map_base != MAP_FAILED) {
|
if (virtual_map_base != MAP_FAILED) {
|
||||||
int ret = munmap(virtual_map_base, virtual_size);
|
int ret = munmap(virtual_map_base, virtual_size);
|
||||||
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
|
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
|
||||||
|
virtual_map_base = reinterpret_cast<u8*>(MAP_FAILED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (backing_base != MAP_FAILED) {
|
if (backing_base != MAP_FAILED) {
|
||||||
int ret = munmap(backing_base, backing_size);
|
int ret = munmap(backing_base, backing_size);
|
||||||
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
|
ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno));
|
||||||
|
backing_base = reinterpret_cast<u8*>(MAP_FAILED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fd != -1) {
|
if (fd != -1) {
|
||||||
int ret = close(fd);
|
int ret = close(fd);
|
||||||
ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno));
|
ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno));
|
||||||
|
fd = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -686,8 +707,10 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
||||||
ASSERT(virtual_offset + length <= virtual_size);
|
ASSERT(virtual_offset + length <= virtual_size);
|
||||||
ASSERT(host_offset + length <= backing_size);
|
ASSERT(host_offset + length <= backing_size);
|
||||||
if (length == 0 || !virtual_base || !impl) {
|
if (length == 0 || !virtual_base || !impl) {
|
||||||
|
LOG_ERROR(HW_Memory, "Invalid mapping operation: virtual_base or impl is null");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
LOG_INFO(HW_Memory, "Mapping memory: virtual_offset={}, host_offset={}, length={}", virtual_offset, host_offset, length);
|
||||||
impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
|
impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -696,8 +719,10 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap)
|
||||||
ASSERT(length % PageAlignment == 0);
|
ASSERT(length % PageAlignment == 0);
|
||||||
ASSERT(virtual_offset + length <= virtual_size);
|
ASSERT(virtual_offset + length <= virtual_size);
|
||||||
if (length == 0 || !virtual_base || !impl) {
|
if (length == 0 || !virtual_base || !impl) {
|
||||||
|
LOG_ERROR(HW_Memory, "Invalid unmapping operation: virtual_base or impl is null");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
LOG_INFO(HW_Memory, "Unmapping memory: virtual_offset={}, length={}", virtual_offset, length);
|
||||||
impl->Unmap(virtual_offset + virtual_base_offset, length);
|
impl->Unmap(virtual_offset + virtual_base_offset, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -110,27 +110,16 @@ try
|
||||||
, device_memory(device_memory_)
|
, device_memory(device_memory_)
|
||||||
, gpu(gpu_)
|
, gpu(gpu_)
|
||||||
, library(OpenLibrary(context.get()))
|
, library(OpenLibrary(context.get()))
|
||||||
,
|
, dld()
|
||||||
// Create raw Vulkan instance first
|
// Initialize resources in the same order as they are declared in the header
|
||||||
instance(CreateInstance(*library,
|
, instance(CreateInstance(*library,
|
||||||
dld,
|
dld,
|
||||||
VK_API_VERSION_1_1,
|
VK_API_VERSION_1_1,
|
||||||
render_window.GetWindowInfo().type,
|
render_window.GetWindowInfo().type,
|
||||||
Settings::values.renderer_debug.GetValue()))
|
Settings::values.renderer_debug.GetValue()))
|
||||||
,
|
, debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance)
|
||||||
// Now create RAII wrappers for the resources in the correct order
|
|
||||||
managed_instance(MakeManagedInstance(instance, dld))
|
|
||||||
,
|
|
||||||
// Create debug messenger if debug is enabled
|
|
||||||
debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance)
|
|
||||||
: vk::DebugUtilsMessenger{})
|
: vk::DebugUtilsMessenger{})
|
||||||
, managed_debug_messenger(Settings::values.renderer_debug
|
, surface(CreateSurface(instance, render_window.GetWindowInfo()))
|
||||||
? MakeManagedDebugUtilsMessenger(debug_messenger, instance, dld)
|
|
||||||
: ManagedDebugUtilsMessenger{})
|
|
||||||
,
|
|
||||||
// Create surface
|
|
||||||
surface(CreateSurface(instance, render_window.GetWindowInfo()))
|
|
||||||
, managed_surface(MakeManagedSurface(surface, instance, dld))
|
|
||||||
, device(CreateDevice(instance, dld, *surface))
|
, device(CreateDevice(instance, dld, *surface))
|
||||||
, memory_allocator(device)
|
, memory_allocator(device)
|
||||||
, state_tracker()
|
, state_tracker()
|
||||||
|
@ -172,22 +161,19 @@ try
|
||||||
scheduler,
|
scheduler,
|
||||||
PresentFiltersForAppletCapture)
|
PresentFiltersForAppletCapture)
|
||||||
, rasterizer(render_window, gpu, device_memory, device, memory_allocator, state_tracker, scheduler)
|
, rasterizer(render_window, gpu, device_memory, device, memory_allocator, state_tracker, scheduler)
|
||||||
, applet_frame() {
|
, turbo_mode()
|
||||||
|
, applet_frame()
|
||||||
|
, managed_instance(MakeManagedInstance(instance, dld))
|
||||||
|
, managed_debug_messenger(Settings::values.renderer_debug
|
||||||
|
? MakeManagedDebugUtilsMessenger(debug_messenger, instance, dld)
|
||||||
|
: ManagedDebugUtilsMessenger{})
|
||||||
|
, managed_surface(MakeManagedSurface(surface, instance, dld)) {
|
||||||
|
|
||||||
if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) {
|
if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) {
|
||||||
turbo_mode.emplace(instance, dld);
|
turbo_mode.emplace(instance, dld);
|
||||||
scheduler.RegisterOnSubmit([this] { turbo_mode->QueueSubmitted(); });
|
scheduler.RegisterOnSubmit([this] { turbo_mode->QueueSubmitted(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ANDROID
|
|
||||||
// Release ownership from the old instance and surface
|
|
||||||
instance.release();
|
|
||||||
surface.release();
|
|
||||||
if (Settings::values.renderer_debug) {
|
|
||||||
debug_messenger.release();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
Report();
|
Report();
|
||||||
} catch (const vk::Exception& exception) {
|
} catch (const vk::Exception& exception) {
|
||||||
LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what());
|
LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what());
|
||||||
|
|
|
@ -76,32 +76,37 @@ private:
|
||||||
std::shared_ptr<Common::DynamicLibrary> library;
|
std::shared_ptr<Common::DynamicLibrary> library;
|
||||||
vk::InstanceDispatch dld;
|
vk::InstanceDispatch dld;
|
||||||
|
|
||||||
// Keep original handles for compatibility with existing code
|
// Order of member variables determines destruction order (reverse of declaration)
|
||||||
|
// Critical Vulkan resources should be declared in proper dependency order
|
||||||
|
|
||||||
|
// Base Vulkan instance, debugging, and surface
|
||||||
vk::Instance instance;
|
vk::Instance instance;
|
||||||
// RAII wrapper for instance
|
|
||||||
ManagedInstance managed_instance;
|
|
||||||
|
|
||||||
vk::DebugUtilsMessenger debug_messenger;
|
vk::DebugUtilsMessenger debug_messenger;
|
||||||
// RAII wrapper for debug messenger
|
|
||||||
ManagedDebugUtilsMessenger managed_debug_messenger;
|
|
||||||
|
|
||||||
vk::SurfaceKHR surface;
|
vk::SurfaceKHR surface;
|
||||||
// RAII wrapper for surface
|
|
||||||
ManagedSurface managed_surface;
|
|
||||||
|
|
||||||
|
// Device and core resources
|
||||||
Device device;
|
Device device;
|
||||||
MemoryAllocator memory_allocator;
|
MemoryAllocator memory_allocator;
|
||||||
StateTracker state_tracker;
|
StateTracker state_tracker;
|
||||||
Scheduler scheduler;
|
Scheduler scheduler;
|
||||||
Swapchain swapchain;
|
Swapchain swapchain;
|
||||||
PresentManager present_manager;
|
PresentManager present_manager;
|
||||||
|
|
||||||
|
// Rendering components
|
||||||
BlitScreen blit_swapchain;
|
BlitScreen blit_swapchain;
|
||||||
BlitScreen blit_capture;
|
BlitScreen blit_capture;
|
||||||
BlitScreen blit_applet;
|
BlitScreen blit_applet;
|
||||||
RasterizerVulkan rasterizer;
|
RasterizerVulkan rasterizer;
|
||||||
std::optional<TurboMode> turbo_mode;
|
|
||||||
|
|
||||||
|
// Optional components
|
||||||
|
std::optional<TurboMode> turbo_mode;
|
||||||
Frame applet_frame;
|
Frame applet_frame;
|
||||||
|
|
||||||
|
// RAII wrappers - must be destroyed before their raw handles
|
||||||
|
// so they are declared after to be destroyed first
|
||||||
|
ManagedInstance managed_instance;
|
||||||
|
ManagedDebugUtilsMessenger managed_debug_messenger;
|
||||||
|
ManagedSurface managed_surface;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -1529,6 +1529,58 @@ void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset,
|
||||||
if (is_rescaled) {
|
if (is_rescaled) {
|
||||||
ScaleDown(true);
|
ScaleDown(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle MSAA upload if necessary
|
||||||
|
if (info.num_samples > 1 && runtime->CanUploadMSAA()) {
|
||||||
|
// Only use MSAA copy pass for color formats
|
||||||
|
// Depth/stencil formats need special handling
|
||||||
|
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
|
||||||
|
// Create a temporary non-MSAA image to upload the data first
|
||||||
|
ImageInfo temp_info = info;
|
||||||
|
temp_info.num_samples = 1;
|
||||||
|
|
||||||
|
// Create image with same usage flags as the target image to avoid validation errors
|
||||||
|
VkImageCreateInfo image_ci = MakeImageCreateInfo(runtime->device, temp_info);
|
||||||
|
image_ci.usage = original_image.UsageFlags();
|
||||||
|
vk::Image temp_image = runtime->memory_allocator.CreateImage(image_ci);
|
||||||
|
|
||||||
|
// Upload to the temporary non-MSAA image
|
||||||
|
scheduler->RequestOutsideRenderPassOperationContext();
|
||||||
|
auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
|
||||||
|
const VkBuffer src_buffer = buffer;
|
||||||
|
const VkImage temp_vk_image = *temp_image;
|
||||||
|
const VkImageAspectFlags vk_aspect_mask = aspect_mask;
|
||||||
|
scheduler->Record([src_buffer, temp_vk_image, vk_aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) {
|
||||||
|
CopyBufferToImage(cmdbuf, src_buffer, temp_vk_image, vk_aspect_mask, false, vk_copies);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Now use MSAACopyPass to convert from non-MSAA to MSAA
|
||||||
|
std::vector<VideoCommon::ImageCopy> image_copies;
|
||||||
|
for (const auto& copy : copies) {
|
||||||
|
VideoCommon::ImageCopy image_copy;
|
||||||
|
image_copy.src_offset = {0, 0, 0}; // Use zero offset for source
|
||||||
|
image_copy.dst_offset = copy.image_offset;
|
||||||
|
image_copy.src_subresource = copy.image_subresource;
|
||||||
|
image_copy.dst_subresource = copy.image_subresource;
|
||||||
|
image_copy.extent = copy.image_extent;
|
||||||
|
image_copies.push_back(image_copy);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a wrapper Image for the temporary image
|
||||||
|
Image temp_wrapper(*runtime, temp_info, 0, 0);
|
||||||
|
temp_wrapper.original_image = std::move(temp_image);
|
||||||
|
temp_wrapper.current_image = &Image::original_image;
|
||||||
|
temp_wrapper.aspect_mask = aspect_mask;
|
||||||
|
temp_wrapper.initialized = true;
|
||||||
|
|
||||||
|
// Use MSAACopyPass to convert from non-MSAA to MSAA
|
||||||
|
runtime->msaa_copy_pass->CopyImage(*this, temp_wrapper, image_copies, false);
|
||||||
|
std::exchange(initialized, true);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// For depth/stencil formats, fall back to regular upload
|
||||||
|
} else {
|
||||||
|
// Regular non-MSAA upload
|
||||||
scheduler->RequestOutsideRenderPassOperationContext();
|
scheduler->RequestOutsideRenderPassOperationContext();
|
||||||
auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
|
auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask);
|
||||||
const VkBuffer src_buffer = buffer;
|
const VkBuffer src_buffer = buffer;
|
||||||
|
@ -1539,6 +1591,8 @@ void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset,
|
||||||
vk_copies](vk::CommandBuffer cmdbuf) {
|
vk_copies](vk::CommandBuffer cmdbuf) {
|
||||||
CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, is_initialized, vk_copies);
|
CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, is_initialized, vk_copies);
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
if (is_rescaled) {
|
if (is_rescaled) {
|
||||||
ScaleUp();
|
ScaleUp();
|
||||||
}
|
}
|
||||||
|
@ -1565,6 +1619,114 @@ void Image::DownloadMemory(std::span<VkBuffer> buffers_span, std::span<size_t> o
|
||||||
if (is_rescaled) {
|
if (is_rescaled) {
|
||||||
ScaleDown();
|
ScaleDown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle MSAA download if necessary
|
||||||
|
if (info.num_samples > 1 && runtime->msaa_copy_pass) {
|
||||||
|
// Only use MSAA copy pass for color formats
|
||||||
|
// Depth/stencil formats need special handling
|
||||||
|
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
|
||||||
|
// Create a temporary non-MSAA image to download the data
|
||||||
|
ImageInfo temp_info = info;
|
||||||
|
temp_info.num_samples = 1;
|
||||||
|
|
||||||
|
// Create image with same usage flags as the target image to avoid validation errors
|
||||||
|
VkImageCreateInfo image_ci = MakeImageCreateInfo(runtime->device, temp_info);
|
||||||
|
image_ci.usage = original_image.UsageFlags();
|
||||||
|
vk::Image temp_image = runtime->memory_allocator.CreateImage(image_ci);
|
||||||
|
|
||||||
|
// Create a wrapper Image for the temporary image
|
||||||
|
Image temp_wrapper(*runtime, temp_info, 0, 0);
|
||||||
|
temp_wrapper.original_image = std::move(temp_image);
|
||||||
|
temp_wrapper.current_image = &Image::original_image;
|
||||||
|
temp_wrapper.aspect_mask = aspect_mask;
|
||||||
|
temp_wrapper.initialized = true;
|
||||||
|
|
||||||
|
// Convert from MSAA to non-MSAA using MSAACopyPass
|
||||||
|
std::vector<VideoCommon::ImageCopy> image_copies;
|
||||||
|
for (const auto& copy : copies) {
|
||||||
|
VideoCommon::ImageCopy image_copy;
|
||||||
|
image_copy.src_offset = copy.image_offset;
|
||||||
|
image_copy.dst_offset = copy.image_offset;
|
||||||
|
image_copy.src_subresource = copy.image_subresource;
|
||||||
|
image_copy.dst_subresource = copy.image_subresource;
|
||||||
|
image_copy.extent = copy.image_extent;
|
||||||
|
image_copies.push_back(image_copy);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use MSAACopyPass to convert from MSAA to non-MSAA
|
||||||
|
runtime->msaa_copy_pass->CopyImage(temp_wrapper, *this, image_copies, true);
|
||||||
|
|
||||||
|
// Now download from the non-MSAA image
|
||||||
|
boost::container::small_vector<VkBuffer, 8> buffers_vector{};
|
||||||
|
boost::container::small_vector<boost::container::small_vector<VkBufferImageCopy, 16>, 8>
|
||||||
|
vk_copies;
|
||||||
|
for (size_t index = 0; index < buffers_span.size(); index++) {
|
||||||
|
buffers_vector.emplace_back(buffers_span[index]);
|
||||||
|
vk_copies.emplace_back(
|
||||||
|
TransformBufferImageCopies(copies, offsets_span[index], aspect_mask));
|
||||||
|
}
|
||||||
|
|
||||||
|
scheduler->RequestOutsideRenderPassOperationContext();
|
||||||
|
scheduler->Record([buffers = std::move(buffers_vector), image = *temp_wrapper.original_image,
|
||||||
|
aspect_mask_ = aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) {
|
||||||
|
const VkImageMemoryBarrier read_barrier{
|
||||||
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
|
||||||
|
.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
|
||||||
|
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||||
|
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.image = image,
|
||||||
|
.subresourceRange{
|
||||||
|
.aspectMask = aspect_mask_,
|
||||||
|
.baseMipLevel = 0,
|
||||||
|
.levelCount = VK_REMAINING_MIP_LEVELS,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||||
|
0, read_barrier);
|
||||||
|
|
||||||
|
for (size_t index = 0; index < buffers.size(); index++) {
|
||||||
|
cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffers[index],
|
||||||
|
vk_copies[index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
const VkMemoryBarrier memory_write_barrier{
|
||||||
|
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
|
||||||
|
.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
|
||||||
|
};
|
||||||
|
const VkImageMemoryBarrier image_write_barrier{
|
||||||
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.srcAccessMask = 0,
|
||||||
|
.dstAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
|
||||||
|
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
|
||||||
|
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.image = image,
|
||||||
|
.subresourceRange{
|
||||||
|
.aspectMask = aspect_mask_,
|
||||||
|
.baseMipLevel = 0,
|
||||||
|
.levelCount = VK_REMAINING_MIP_LEVELS,
|
||||||
|
.baseArrayLayer = 0,
|
||||||
|
.layerCount = VK_REMAINING_ARRAY_LAYERS,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||||
|
0, memory_write_barrier, nullptr, image_write_barrier);
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// For depth/stencil formats, fall back to regular download
|
||||||
|
} else {
|
||||||
|
// Regular non-MSAA download
|
||||||
boost::container::small_vector<VkBuffer, 8> buffers_vector{};
|
boost::container::small_vector<VkBuffer, 8> buffers_vector{};
|
||||||
boost::container::small_vector<boost::container::small_vector<VkBufferImageCopy, 16>, 8>
|
boost::container::small_vector<boost::container::small_vector<VkBufferImageCopy, 16>, 8>
|
||||||
vk_copies;
|
vk_copies;
|
||||||
|
@ -1629,6 +1791,8 @@ void Image::DownloadMemory(std::span<VkBuffer> buffers_span, std::span<size_t> o
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||||
0, memory_write_barrier, nullptr, image_write_barrier);
|
0, memory_write_barrier, nullptr, image_write_barrier);
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
if (is_rescaled) {
|
if (is_rescaled) {
|
||||||
ScaleUp(true);
|
ScaleUp(true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,8 +82,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CanUploadMSAA() const noexcept {
|
bool CanUploadMSAA() const noexcept {
|
||||||
// TODO: Implement buffer to MSAA uploads
|
return msaa_copy_pass.operator bool();
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AccelerateImageUpload(Image&, const StagingBufferRef&,
|
void AccelerateImageUpload(Image&, const StagingBufferRef&,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue