WIP: DO-NOT-MERGE: NCE experiments: Fix build errors
This commit is contained in:
parent
777b674edf
commit
27ceda2c6c
2 changed files with 20 additions and 24 deletions
|
@ -382,23 +382,23 @@ void ArmNce::SignalInterrupt(Kernel::KThread* thread) {
|
|||
}
|
||||
}
|
||||
|
||||
const std::size_t CACHE_PAGE_SIZE = 4096;
|
||||
|
||||
void ArmNce::ClearInstructionCache() {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
const size_t PAGE_SIZE = 4096;
|
||||
void* start = (void*)((uintptr_t)__builtin_return_address(0) & ~(PAGE_SIZE - 1));
|
||||
void* end = (void*)((uintptr_t)start + PAGE_SIZE * 2); // Clear two pages for better coverage
|
||||
|
||||
// Prefetch next likely pages
|
||||
__builtin_prefetch((void*)((uintptr_t)end), 1, 3);
|
||||
__builtin___clear_cache(static_cast<char*>(start), static_cast<char*>(end));
|
||||
#endif
|
||||
|
||||
#ifdef __aarch64__
|
||||
// Ensure all previous memory operations complete
|
||||
asm volatile("dmb ish" ::: "memory");
|
||||
asm volatile("dsb ish" ::: "memory");
|
||||
asm volatile("isb" ::: "memory");
|
||||
#endif
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
void* start = (void*)((uintptr_t)__builtin_return_address(0) & ~(CACHE_PAGE_SIZE - 1));
|
||||
void* end =
|
||||
(void*)((uintptr_t)start + CACHE_PAGE_SIZE * 2); // Clear two pages for better coverage
|
||||
// Prefetch next likely pages
|
||||
__builtin_prefetch((void*)((uintptr_t)end), 1, 3);
|
||||
__builtin___clear_cache(static_cast<char*>(start), static_cast<char*>(end));
|
||||
#endif
|
||||
#ifdef __aarch64__
|
||||
// Ensure all previous memory operations complete
|
||||
asm volatile("dmb ish" ::: "memory");
|
||||
asm volatile("dsb ish" ::: "memory");
|
||||
asm volatile("isb" ::: "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
void ArmNce::InvalidateCacheRange(u64 addr, std::size_t size) {
|
||||
|
|
|
@ -408,6 +408,8 @@ bool InterpreterVisitor::RegisterImmediate(bool wback, bool postindex, size_t sc
|
|||
bool signed_ = false;
|
||||
size_t regsize = 0;
|
||||
|
||||
const size_t datasize = 8 << scale;
|
||||
|
||||
if (opc.Bit<1>() == 0) {
|
||||
memop = opc.Bit<0>() ? MemOp::Load : MemOp::Store;
|
||||
regsize = size == 0b11 ? 64 : 32;
|
||||
|
@ -427,7 +429,6 @@ bool InterpreterVisitor::RegisterImmediate(bool wback, bool postindex, size_t sc
|
|||
return false;
|
||||
}
|
||||
|
||||
// Use aligned access where possible
|
||||
alignas(8) u64 address;
|
||||
if (Rn == Reg::SP) {
|
||||
address = this->GetSp();
|
||||
|
@ -435,21 +436,18 @@ bool InterpreterVisitor::RegisterImmediate(bool wback, bool postindex, size_t sc
|
|||
address = this->GetReg(Rn);
|
||||
}
|
||||
|
||||
// Pre-index addressing
|
||||
if (!postindex) {
|
||||
address += offset;
|
||||
}
|
||||
|
||||
// Alignment optimization for common cases
|
||||
const bool is_aligned = (address % 8) == 0;
|
||||
//const bool is_aligned = (address % 8) == 0;
|
||||
|
||||
// Enhanced prefetching for loads with aligned addresses
|
||||
if (memop == MemOp::Load) {
|
||||
const size_t CACHE_LINE_SIZE = 64;
|
||||
if ((address % 16) == 0) {
|
||||
__builtin_prefetch((void*)address, 0, 3);
|
||||
__builtin_prefetch((void*)(address + CACHE_LINE_SIZE), 0, 3);
|
||||
if (datasize >= 32) {
|
||||
if (datasize >= 32) { // Now datasize is in scope
|
||||
__builtin_prefetch((void*)(address + CACHE_LINE_SIZE * 2), 0, 2);
|
||||
}
|
||||
} else if ((address % 8) == 0) {
|
||||
|
@ -457,7 +455,6 @@ bool InterpreterVisitor::RegisterImmediate(bool wback, bool postindex, size_t sc
|
|||
}
|
||||
}
|
||||
|
||||
const size_t datasize = 8 << scale;
|
||||
switch (memop) {
|
||||
case MemOp::Store: {
|
||||
u64 data = this->GetReg(Rt);
|
||||
|
@ -483,7 +480,6 @@ bool InterpreterVisitor::RegisterImmediate(bool wback, bool postindex, size_t sc
|
|||
if (postindex) {
|
||||
address += offset;
|
||||
}
|
||||
|
||||
if (Rn == Reg::SP) {
|
||||
this->SetSp(address);
|
||||
} else {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue