From 7d0f2e43b6ae0172fe5ad2da6d6f7cf8afb383fb Mon Sep 17 00:00:00 2001 From: "Unknown W. Brackets" Date: Sun, 24 Sep 2023 10:16:10 -0700 Subject: [PATCH] irjit: Fix safety of kernel bit memory addresses. --- Core/MIPS/ARM64/Arm64IRCompLoadStore.cpp | 17 +++++++++++------ Core/MIPS/RiscV/RiscVCompLoadStore.cpp | 15 +++++++++++++-- Core/MIPS/x86/X64IRCompLoadStore.cpp | 24 +++++++++++++++--------- 3 files changed, 39 insertions(+), 17 deletions(-) diff --git a/Core/MIPS/ARM64/Arm64IRCompLoadStore.cpp b/Core/MIPS/ARM64/Arm64IRCompLoadStore.cpp index 42a966d4371a..d0fde9f6f2fc 100644 --- a/Core/MIPS/ARM64/Arm64IRCompLoadStore.cpp +++ b/Core/MIPS/ARM64/Arm64IRCompLoadStore.cpp @@ -80,7 +80,12 @@ Arm64JitBackend::LoadStoreArg Arm64JitBackend::PrepareSrc1Address(IRInst inst) { // If it's about to be clobbered, don't waste time pointerifying. Use displacement. bool clobbersSrc1 = !readsFromSrc1 && regs_.IsGPRClobbered(inst.src1); - int32_t imm = (int32_t)inst.constant; + int64_t imm = (int32_t)inst.constant; + // It can't be this negative, must be a constant address with the top bit set. + if ((imm & 0xC0000000) == 0x80000000) { + imm = (uint64_t)(uint32_t)inst.constant; + } + LoadStoreArg addrArg; if (inst.src1 == MIPS_REG_ZERO) { // The constant gets applied later. @@ -100,7 +105,7 @@ Arm64JitBackend::LoadStoreArg Arm64JitBackend::PrepareSrc1Address(IRInst inst) { // Since we can't modify src1, let's just use a temp reg while copying. if (!addrArg.useRegisterOffset) { - ADDI2R(SCRATCH1, regs_.MapGPR(inst.src1), (s64)imm, SCRATCH2); + ADDI2R(SCRATCH1, regs_.MapGPR(inst.src1), imm, SCRATCH2); #ifdef MASKED_PSP_MEMORY ANDI2R(SCRATCH1, SCRATCH1, Memory::MEMVIEW32_MASK, SCRATCH2); #endif @@ -114,7 +119,7 @@ Arm64JitBackend::LoadStoreArg Arm64JitBackend::PrepareSrc1Address(IRInst inst) { // The offset gets set later. addrArg.base = regs_.MapGPRAsPointer(inst.src1); } else { - ADDI2R(SCRATCH1, regs_.MapGPR(inst.src1), (s64)imm, SCRATCH2); + ADDI2R(SCRATCH1, regs_.MapGPR(inst.src1), imm, SCRATCH2); #ifdef MASKED_PSP_MEMORY ANDI2R(SCRATCH1, SCRATCH1, Memory::MEMVIEW32_MASK, SCRATCH2); #endif @@ -137,15 +142,15 @@ Arm64JitBackend::LoadStoreArg Arm64JitBackend::PrepareSrc1Address(IRInst inst) { int scale = IROpToByteWidth(inst.op); if (imm > 0 && (imm & (scale - 1)) == 0 && imm <= 0xFFF * scale) { // Okay great, use the LDR/STR form. - addrArg.immOffset = imm; + addrArg.immOffset = (int)imm; addrArg.useUnscaled = false; } else if (imm >= -256 && imm < 256) { // An unscaled offset (LDUR/STUR) should work fine for this range. - addrArg.immOffset = imm; + addrArg.immOffset = (int)imm; addrArg.useUnscaled = true; } else { // No luck, we'll need to load into a register. - MOVI2R(SCRATCH1, (s64)imm); + MOVI2R(SCRATCH1, imm); addrArg.regOffset = SCRATCH1; addrArg.useRegisterOffset = true; addrArg.signExtendRegOffset = true; diff --git a/Core/MIPS/RiscV/RiscVCompLoadStore.cpp b/Core/MIPS/RiscV/RiscVCompLoadStore.cpp index 80b149ca02f3..9db1ebb657c6 100644 --- a/Core/MIPS/RiscV/RiscVCompLoadStore.cpp +++ b/Core/MIPS/RiscV/RiscVCompLoadStore.cpp @@ -59,8 +59,19 @@ int32_t RiscVJitBackend::AdjustForAddressOffset(RiscVGen::RiscVReg *reg, int32_t if (constant > 0) constant &= Memory::MEMVIEW32_MASK; #endif - LI(SCRATCH2, constant); - ADD(SCRATCH1, *reg, SCRATCH2); + // It can't be this negative, must be a constant with top bit set. + if ((constant & 0xC0000000) == 0x80000000) { + if (cpu_info.RiscV_Zba) { + LI(SCRATCH2, constant); + ADD_UW(SCRATCH1, SCRATCH2, *reg); + } else { + LI(SCRATCH2, (uint32_t)constant); + ADD(SCRATCH1, *reg, SCRATCH2); + } + } else { + LI(SCRATCH2, constant); + ADD(SCRATCH1, *reg, SCRATCH2); + } *reg = SCRATCH1; return 0; } diff --git a/Core/MIPS/x86/X64IRCompLoadStore.cpp b/Core/MIPS/x86/X64IRCompLoadStore.cpp index d033832bf3f0..9b3eea1341d4 100644 --- a/Core/MIPS/x86/X64IRCompLoadStore.cpp +++ b/Core/MIPS/x86/X64IRCompLoadStore.cpp @@ -45,35 +45,41 @@ Gen::OpArg X64JitBackend::PrepareSrc1Address(IRInst inst) { // If it's about to be clobbered, don't waste time pointerifying. Use displacement. bool clobbersSrc1 = !readsFromSrc1 && regs_.IsGPRClobbered(inst.src1); + int32_t disp = (int32_t)inst.constant; + // It can't be this negative, must be a constant address with the top bit set. + if ((disp & 0xC0000000) == 0x80000000) { + disp = inst.constant & 0x7FFFFFFF; + } + #ifdef MASKED_PSP_MEMORY - if (inst.constant > 0) - inst.constant &= Memory::MEMVIEW32_MASK; + if (disp > 0) + disp &= Memory::MEMVIEW32_MASK; #endif OpArg addrArg; if (inst.src1 == MIPS_REG_ZERO) { #ifdef MASKED_PSP_MEMORY - inst.constant &= Memory::MEMVIEW32_MASK; + disp &= Memory::MEMVIEW32_MASK; #endif #if PPSSPP_ARCH(AMD64) - addrArg = MDisp(MEMBASEREG, inst.constant & 0x7FFFFFFF); + addrArg = MDisp(MEMBASEREG, disp & 0x7FFFFFFF); #else - addrArg = M(Memory::base + inst.constant); + addrArg = M(Memory::base + disp); #endif } else if ((jo.cachePointers || src1IsPointer) && !readsFromSrc1 && (!clobbersSrc1 || src1IsPointer)) { X64Reg src1 = regs_.MapGPRAsPointer(inst.src1); - addrArg = MDisp(src1, (int)inst.constant); + addrArg = MDisp(src1, disp); } else { regs_.MapGPR(inst.src1); #ifdef MASKED_PSP_MEMORY - LEA(PTRBITS, SCRATCH1, MDisp(regs_.RX(inst.src1), (int)inst.constant)); + LEA(PTRBITS, SCRATCH1, MDisp(regs_.RX(inst.src1), disp)); AND(PTRBITS, R(SCRATCH1), Imm32(Memory::MEMVIEW32_MASK)); addrArg = MDisp(SCRATCH1, (intptr_t)Memory::base); #else #if PPSSPP_ARCH(AMD64) - addrArg = MComplex(MEMBASEREG, regs_.RX(inst.src1), SCALE_1, (int)inst.constant); + addrArg = MComplex(MEMBASEREG, regs_.RX(inst.src1), SCALE_1, disp); #else - addrArg = MDisp(regs_.RX(inst.src1), Memory::base + inst.constant); + addrArg = MDisp(regs_.RX(inst.src1), Memory::base + disp); #endif #endif }