forked from eden-emu/eden
commit
0d034ba584
11 changed files with 93 additions and 29 deletions
|
@ -37,7 +37,8 @@ enum class BooleanSetting(override val key: String) : AbstractBooleanSetting {
|
|||
SHOW_APP_RAM_USAGE("show_app_ram_usage"),
|
||||
SHOW_SYSTEM_RAM_USAGE("show_system_ram_usage"),
|
||||
SHOW_BAT_TEMPERATURE("show_bat_temperature"),
|
||||
OVERLAY_BACKGROUND("overlay_background"),;
|
||||
OVERLAY_BACKGROUND("overlay_background"),
|
||||
USE_LRU_CACHE("use_lru_cache"),;
|
||||
external fun isFrameSkippingEnabled(): Boolean
|
||||
external fun isFrameInterpolationEnabled(): Boolean
|
||||
|
||||
|
|
|
@ -119,6 +119,13 @@ abstract class SettingsItem(
|
|||
// List of all general
|
||||
val settingsItems = HashMap<String, SettingsItem>().apply {
|
||||
put(StringInputSetting(StringSetting.DEVICE_NAME, titleId = R.string.device_name))
|
||||
put(
|
||||
SwitchSetting(
|
||||
BooleanSetting.USE_LRU_CACHE,
|
||||
titleId = R.string.use_lru_cache,
|
||||
descriptionId = R.string.use_lru_cache_description
|
||||
)
|
||||
)
|
||||
put(
|
||||
SwitchSetting(
|
||||
BooleanSetting.RENDERER_USE_SPEED_LIMIT,
|
||||
|
|
|
@ -251,6 +251,7 @@ class SettingsFragmentPresenter(
|
|||
add(IntSetting.RENDERER_ASTC_DECODE_METHOD.key)
|
||||
add(IntSetting.RENDERER_ASTC_RECOMPRESSION.key)
|
||||
add(IntSetting.RENDERER_VRAM_USAGE_MODE.key)
|
||||
add(BooleanSetting.USE_LRU_CACHE.key)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -472,6 +473,13 @@ class SettingsFragmentPresenter(
|
|||
descriptionId = R.string.frame_skipping_description
|
||||
)
|
||||
)
|
||||
add(
|
||||
SwitchSetting(
|
||||
BooleanSetting.USE_LRU_CACHE,
|
||||
titleId = R.string.use_lru_cache,
|
||||
descriptionId = R.string.use_lru_cache_description
|
||||
)
|
||||
)
|
||||
add(ByteSetting.RENDERER_DYNA_STATE.key)
|
||||
add(
|
||||
SwitchSetting(
|
||||
|
|
|
@ -49,6 +49,10 @@ struct Values {
|
|||
Settings::SwitchableSetting<std::string, false> driver_path{linkage, "", "driver_path",
|
||||
Settings::Category::GpuDriver};
|
||||
|
||||
// LRU Cache
|
||||
Settings::SwitchableSetting<bool> use_lru_cache{linkage, true, "use_lru_cache",
|
||||
Settings::Category::System};
|
||||
|
||||
Settings::Setting<s32> theme{linkage, 0, "theme", Settings::Category::Android};
|
||||
Settings::Setting<s32> theme_mode{linkage, -1, "theme_mode", Settings::Category::Android};
|
||||
Settings::Setting<bool> black_backgrounds{linkage, false, "black_backgrounds",
|
||||
|
|
|
@ -657,6 +657,10 @@
|
|||
<string name="resolution_three">3X (2160p/3240p) (Slow)</string>
|
||||
<string name="resolution_four">4X (2880p/4320p) (Slow)</string>
|
||||
|
||||
<!-- LRU Cache -->
|
||||
<string name="use_lru_cache">Enable LRU Cache</string>
|
||||
<string name="use_lru_cache_description">Enable or disable the Least Recently Used (LRU) cache for improved performance</string>
|
||||
|
||||
<!-- Renderer VSync -->
|
||||
<string name="renderer_vsync_immediate">Immediate (Off)</string>
|
||||
<string name="renderer_vsync_mailbox">Mailbox</string>
|
||||
|
|
|
@ -218,6 +218,7 @@ struct Values {
|
|||
// Memory
|
||||
SwitchableSetting<bool> use_gpu_memory_manager{linkage, false, "Use GPU Memory Manager", Category::Core};
|
||||
SwitchableSetting<bool> enable_memory_snapshots{linkage, false, "Enable Memory Snapshots", Category::Core};
|
||||
SwitchableSetting<bool> lru_cache_enabled{linkage, true, "use_lru_cache", Category::System};
|
||||
|
||||
// Cpu
|
||||
SwitchableSetting<CpuBackend, true> cpu_backend{linkage,
|
||||
|
|
|
@ -3,16 +3,26 @@
|
|||
#include <list>
|
||||
#include <unordered_map>
|
||||
#include <optional>
|
||||
#include "common/logging/log.h"
|
||||
|
||||
template<typename KeyType, typename ValueType>
|
||||
class LRUCache {
|
||||
private:
|
||||
bool enabled = true;
|
||||
size_t capacity;
|
||||
std::list<KeyType> cache_list;
|
||||
std::unordered_map<KeyType, std::pair<typename std::list<KeyType>::iterator, ValueType>> cache_map;
|
||||
|
||||
public:
|
||||
explicit LRUCache(size_t capacity) : capacity(capacity) {
|
||||
explicit LRUCache(size_t capacity, bool enabled = true) : enabled(enabled), capacity(capacity) {
|
||||
cache_map.reserve(capacity);
|
||||
LOG_WARNING(Core, "LRU Cache initialized with state: {}", enabled ? "enabled" : "disabled");
|
||||
}
|
||||
|
||||
// Returns pointer to value if found, nullptr otherwise
|
||||
ValueType* get(const KeyType& key) {
|
||||
if (!enabled) return nullptr;
|
||||
|
||||
auto it = cache_map.find(key);
|
||||
if (it == cache_map.end()) {
|
||||
return nullptr;
|
||||
|
@ -25,12 +35,16 @@ public:
|
|||
|
||||
// Returns pointer to value if found (without promoting it), nullptr otherwise
|
||||
ValueType* peek(const KeyType& key) const {
|
||||
if (!enabled) return nullptr;
|
||||
|
||||
auto it = cache_map.find(key);
|
||||
return it != cache_map.end() ? &(it->second.second) : nullptr;
|
||||
}
|
||||
|
||||
// Inserts or updates a key-value pair
|
||||
void put(const KeyType& key, const ValueType& value) {
|
||||
if (!enabled) return;
|
||||
|
||||
auto it = cache_map.find(key);
|
||||
|
||||
if (it != cache_map.end()) {
|
||||
|
@ -52,6 +66,20 @@ public:
|
|||
cache_map[key] = {cache_list.begin(), value};
|
||||
}
|
||||
|
||||
// Enable or disable the LRU cache
|
||||
void setEnabled(bool state) {
|
||||
enabled = state;
|
||||
LOG_WARNING(Core, "LRU Cache state changed to: {}", state ? "enabled" : "disabled");
|
||||
if (!enabled) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the cache is enabled
|
||||
bool isEnabled() const {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
// Attempts to get value, returns std::nullopt if not found
|
||||
std::optional<ValueType> try_get(const KeyType& key) {
|
||||
auto* val = get(key);
|
||||
|
@ -60,16 +88,18 @@ public:
|
|||
|
||||
// Checks if key exists in cache
|
||||
bool contains(const KeyType& key) const {
|
||||
if (!enabled) return false;
|
||||
return cache_map.find(key) != cache_map.end();
|
||||
}
|
||||
|
||||
// Removes a key from the cache if it exists
|
||||
bool erase(const KeyType& key) {
|
||||
if (!enabled) return false;
|
||||
|
||||
auto it = cache_map.find(key);
|
||||
if (it == cache_map.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
cache_list.erase(it->second.first);
|
||||
cache_map.erase(it);
|
||||
return true;
|
||||
|
@ -83,7 +113,7 @@ public:
|
|||
|
||||
// Returns current number of elements in cache
|
||||
size_t size() const {
|
||||
return cache_map.size();
|
||||
return enabled ? cache_map.size() : 0;
|
||||
}
|
||||
|
||||
// Returns maximum capacity of cache
|
||||
|
@ -93,6 +123,8 @@ public:
|
|||
|
||||
// Resizes the cache, evicting LRU items if new capacity is smaller
|
||||
void resize(size_t new_capacity) {
|
||||
if (!enabled) return;
|
||||
|
||||
capacity = new_capacity;
|
||||
while (cache_map.size() > capacity) {
|
||||
auto last = cache_list.back();
|
||||
|
@ -101,9 +133,4 @@ public:
|
|||
}
|
||||
cache_map.reserve(capacity);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t capacity;
|
||||
std::list<KeyType> cache_list;
|
||||
std::unordered_map<KeyType, std::pair<typename std::list<KeyType>::iterator, ValueType>> cache_map;
|
||||
};
|
|
@ -23,6 +23,8 @@ constexpr size_t MaxRelativeBranch = 128_MiB;
|
|||
constexpr u32 ModuleCodeIndex = 0x24 / sizeof(u32);
|
||||
|
||||
Patcher::Patcher() : c(m_patch_instructions) {
|
||||
LOG_WARNING(Core_ARM, "Patcher initialized with LRU cache {}",
|
||||
patch_cache.isEnabled() ? "enabled" : "disabled");
|
||||
// The first word of the patch section is always a branch to the first instruction of the
|
||||
// module.
|
||||
c.dw(0);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <oaknut/oaknut.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/hle/kernel/code_set.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
|
@ -62,19 +63,28 @@ private:
|
|||
|
||||
private:
|
||||
static constexpr size_t CACHE_SIZE = 4096; // Cache size for patch entries
|
||||
LRUCache<uintptr_t, PatchTextAddress> patch_cache{CACHE_SIZE};
|
||||
LRUCache<uintptr_t, PatchTextAddress> patch_cache{CACHE_SIZE, Settings::values.lru_cache_enabled.GetValue()};
|
||||
|
||||
void BranchToPatch(uintptr_t module_dest) {
|
||||
// Try to get existing patch entry from cache
|
||||
if (auto* cached_patch = patch_cache.get(module_dest)) {
|
||||
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), *cached_patch});
|
||||
return;
|
||||
}
|
||||
if (patch_cache.isEnabled()) {
|
||||
LOG_DEBUG(Core_ARM, "LRU cache lookup for address {:#x}", module_dest);
|
||||
// Try to get existing patch entry from cache
|
||||
if (auto* cached_patch = patch_cache.get(module_dest)) {
|
||||
LOG_DEBUG(Core_ARM, "LRU cache hit for address {:#x}", module_dest);
|
||||
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), *cached_patch});
|
||||
return;
|
||||
}
|
||||
LOG_DEBUG(Core_ARM, "LRU cache miss for address {:#x}, creating new patch", module_dest);
|
||||
|
||||
// If not in cache, create new entry and cache it
|
||||
const auto patch_addr = c.offset();
|
||||
curr_patch->m_branch_to_patch_relocations.push_back({patch_addr, module_dest});
|
||||
patch_cache.put(module_dest, patch_addr);
|
||||
// If not in cache, create new entry and cache it
|
||||
const auto patch_addr = c.offset();
|
||||
curr_patch->m_branch_to_patch_relocations.push_back({patch_addr, module_dest});
|
||||
patch_cache.put(module_dest, patch_addr);
|
||||
} else {
|
||||
LOG_DEBUG(Core_ARM, "LRU cache disabled - creating direct patch for address {:#x}", module_dest);
|
||||
// LRU disabled - use pre-LRU approach
|
||||
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), module_dest});
|
||||
}
|
||||
}
|
||||
|
||||
void BranchToModule(uintptr_t module_dest) {
|
||||
|
|
|
@ -243,10 +243,10 @@ public:
|
|||
u32 GetPointerBufferSize() const {
|
||||
return m_pointer_buffer_size;
|
||||
}
|
||||
|
||||
|
||||
void SetPointerBufferSize(u32 size) {
|
||||
m_pointer_buffer_size = size;
|
||||
}
|
||||
}
|
||||
|
||||
Result Terminate();
|
||||
|
||||
|
|
|
@ -714,33 +714,33 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
|
|||
if (Settings::values.dyna_state.GetValue() == 0) {
|
||||
must_emulate_scaled_formats = true;
|
||||
LOG_INFO(Render_Vulkan, "Dynamic state is disabled (dyna_state = 0), forcing scaled format emulation ON");
|
||||
|
||||
|
||||
// Remove all dynamic state 1-2 extensions and features
|
||||
RemoveExtensionFeature(extensions.custom_border_color, features.custom_border_color,
|
||||
VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME);
|
||||
|
||||
|
||||
RemoveExtensionFeature(extensions.extended_dynamic_state, features.extended_dynamic_state,
|
||||
VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
|
||||
|
||||
|
||||
RemoveExtensionFeature(extensions.extended_dynamic_state2, features.extended_dynamic_state2,
|
||||
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
|
||||
|
||||
|
||||
RemoveExtensionFeature(extensions.vertex_input_dynamic_state, features.vertex_input_dynamic_state,
|
||||
VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME);
|
||||
|
||||
|
||||
// Disable extended dynamic state 3 features
|
||||
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
|
||||
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
|
||||
features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable = false;
|
||||
|
||||
|
||||
dynamic_state3_blending = false;
|
||||
dynamic_state3_enables = false;
|
||||
|
||||
|
||||
LOG_INFO(Render_Vulkan, "Dynamic state extensions and features have been fully disabled");
|
||||
} else {
|
||||
must_emulate_scaled_formats = false;
|
||||
LOG_INFO(Render_Vulkan, "Dynamic state is enabled (dyna_state = 1-3), disabling scaled format emulation");
|
||||
}
|
||||
}
|
||||
|
||||
logical = vk::Device::Create(physical, queue_cis, ExtensionListForVulkan(loaded_extensions),
|
||||
first_next, dld);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue