diff --git a/src/android/app/src/main/res/values/strings.xml b/src/android/app/src/main/res/values/strings.xml
index 5d8276ab71..fe46ecf310 100644
--- a/src/android/app/src/main/res/values/strings.xml
+++ b/src/android/app/src/main/res/values/strings.xml
@@ -95,7 +95,7 @@
Synchronize Core Speed
Synchronize the core tick speed to the maximum speed percentage to improve performance without altering the game\'s actual speed.
Enable LRU Cache
- Enable or disable the Least Recently Used (LRU) cache, increasing performance by saving CPU process usage. Some games have issues with it, notably TotK 1.2.1, so disable if the game doesn\'t boot or crashes randomly.
+ Enable or disable the Least Recently Used (LRU) cache, increasing performance by saving CPU process usage. Some games may see issues with this setting, so disable it if the game doesn\'t boot or crashes randomly.
Fast CPU Time
Forces the emulated CPU to run at a higher clock, reducing certain FPS limiters. This option is hacky and may cause issues, and weaker CPUs may see reduced performance.
Custom CPU Ticks
diff --git a/src/core/arm/nce/patcher.h b/src/core/arm/nce/patcher.h
index 53a923138c..7f54608e3f 100644
--- a/src/core/arm/nce/patcher.h
+++ b/src/core/arm/nce/patcher.h
@@ -16,6 +16,24 @@
#include "core/hle/kernel/physical_memory.h"
#include "lru_cache.h"
#include
+using ModuleID = std::array; // NSO build ID
+struct PatchCacheKey {
+ ModuleID module_id;
+ uintptr_t offset;
+ bool operator==(const PatchCacheKey&) const = default;
+};
+
+template <>
+struct std::hash {
+ size_t operator()(const PatchCacheKey& key) const {
+ // Simple XOR hash of first few bytes
+ size_t hash = 0;
+ for (size_t i = 0; i < key.module_id.size(); ++i) {
+ hash ^= static_cast(key.module_id[i]) << ((i % sizeof(size_t)) * 8);
+ }
+ return hash ^ std::hash{}(key.offset);
+ }
+};
namespace Core::NCE {
@@ -31,13 +49,15 @@ using EntryTrampolines = std::unordered_map
class Patcher {
public:
+ void SetModuleID(const ModuleID& id) {
+ module_id = id;
+ }
Patcher(const Patcher&) = delete;
Patcher& operator=(const Patcher&) = delete;
Patcher(Patcher&& other) noexcept;
Patcher& operator=(Patcher&&) noexcept = delete;
explicit Patcher();
~Patcher();
-
bool PatchText(const Kernel::PhysicalMemory& program_image,
const Kernel::CodeSet::Segment& code);
bool RelocateAndCopy(Common::ProcessAddress load_base, const Kernel::CodeSet::Segment& code,
@@ -50,7 +70,7 @@ public:
private:
using ModuleDestLabel = uintptr_t;
-
+ ModuleID module_id{};
struct Trampoline {
ptrdiff_t patch_offset;
uintptr_t module_offset;
@@ -68,26 +88,25 @@ private:
private:
static constexpr size_t CACHE_SIZE = 16384; // Cache size for patch entries
- LRUCache patch_cache{CACHE_SIZE, Settings::values.lru_cache_enabled.GetValue()};
+ LRUCache patch_cache{CACHE_SIZE, Settings::values.lru_cache_enabled.GetValue()};
void BranchToPatch(uintptr_t module_dest) {
if (patch_cache.isEnabled()) {
- LOG_DEBUG(Core_ARM, "LRU cache lookup for address {:#x}", module_dest);
+ PatchCacheKey key{module_id, module_dest};
+ LOG_DEBUG(Core_ARM, "LRU cache lookup for module={}, offset={:#x}", fmt::ptr(module_id.data()), module_dest);
// Try to get existing patch entry from cache
- if (auto* cached_patch = patch_cache.get(module_dest)) {
- LOG_WARNING(Core_ARM, "LRU cache hit for address {:#x}", module_dest);
+ if (auto* cached_patch = patch_cache.get(key)) {
+ LOG_WARNING(Core_ARM, "LRU cache hit for module offset {:#x}", module_dest);
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), *cached_patch});
return;
}
- LOG_DEBUG(Core_ARM, "LRU cache miss for address {:#x}, creating new patch", module_dest);
-
- // If not in cache, create new entry and cache it
+ LOG_DEBUG(Core_ARM, "LRU cache miss for module offset {:#x}, creating new patch", module_dest);
+ // Not in cache: create and store
const auto patch_addr = c.offset();
curr_patch->m_branch_to_patch_relocations.push_back({patch_addr, module_dest});
- patch_cache.put(module_dest, patch_addr);
+ patch_cache.put(key, patch_addr);
} else {
- LOG_DEBUG(Core_ARM, "LRU cache disabled - creating direct patch for address {:#x}", module_dest);
- // LRU disabled - use pre-LRU approach
+ LOG_DEBUG(Core_ARM, "LRU cache disabled - direct patch for offset {:#x}", module_dest);
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), module_dest});
}
}
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 583b7e9270..2cd62df072 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -166,9 +166,12 @@ std::optional AppLoader_NSO::LoadModule(Kernel::KProcess& process, Core::
const auto& code = codeset.CodeSegment();
auto* patch = patches ? &patches->operator[](patch_index) : nullptr;
if (patch && !load_into_process) {
+ //Set module ID using build_id from the NSO header
+ patch->SetModuleID(nso_header.build_id);
// Patch SVCs and MRS calls in the guest code
while (!patch->PatchText(program_image, code)) {
patch = &patches->emplace_back();
+ patch->SetModuleID(nso_header.build_id); // In case the patcher is changed for big modules, the new patcher should also have the build_id
}
} else if (patch) {
// Relocate code patch and copy to the program_image.