Permalink
Browse files

JitAsmCommon: Amend member variable names for CommonAsmRoutinesBase

  • Loading branch information...
lioncash committed May 30, 2018
1 parent 24e0b4b commit f5f4c10fd10ceb6d7662a580d5472c5a77229726
@@ -493,7 +493,7 @@ void Jit64::WriteBLRExit()
MOV(32, R(RSCRATCH), PPCSTATE(pc));
MOV(32, R(RSCRATCH2), Imm32(js.downcountAmount));
CMP(64, R(RSCRATCH), MDisp(RSP, 8));
J_CC(CC_NE, asm_routines.dispatcherMispredictedBLR);
J_CC(CC_NE, asm_routines.dispatcher_mispredicted_blr);
SUB(32, PPCSTATE(downcount), R(RSCRATCH2));
RET();
}
@@ -536,13 +536,13 @@ void Jit64::WriteExternalExceptionExit()
void Jit64::Run()
{
CompiledCode pExecAddr = (CompiledCode)asm_routines.enterCode;
CompiledCode pExecAddr = (CompiledCode)asm_routines.enter_code;
pExecAddr();
}
void Jit64::SingleStep()
{
CompiledCode pExecAddr = (CompiledCode)asm_routines.enterCode;
CompiledCode pExecAddr = (CompiledCode)asm_routines.enter_code;
pExecAddr();
}
@@ -660,7 +660,7 @@ const u8* Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
// available.
FixupBranch skip = J_CC(CC_G);
MOV(32, PPCSTATE(pc), Imm32(js.blockStart));
JMP(asm_routines.doTiming, true); // downcount hit zero - go doTiming.
JMP(asm_routines.do_timing, true); // downcount hit zero - go do_timing.
SetJumpTarget(skip);
const u8* normalEntry = GetCodePtr();
@@ -717,7 +717,7 @@ const u8* Jit64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
ABI_CallFunctionC(JitInterface::CompileExceptionCheck,
static_cast<u32>(JitInterface::ExceptionType::PairedQuantize));
ABI_PopRegistersAndAdjustStack({}, 0);
JMP(asm_routines.dispatcherNoCheck, true);
JMP(asm_routines.dispatcher_no_check, true);
SwitchToNearCode();
// Insert a check that the GQRs are still the value we expect at
@@ -36,7 +36,7 @@ void Jit64AsmRoutineManager::Init(u8* stack_top)
void Jit64AsmRoutineManager::Generate()
{
enterCode = AlignCode16();
enter_code = AlignCode16();
// We need to own the beginning of RSP, so we do an extra stack adjustment
// for the shadow region before calls in this function. This call will
// waste a bit of space for a second shadow, but whatever.
@@ -66,7 +66,7 @@ void Jit64AsmRoutineManager::Generate()
ABI_PopRegistersAndAdjustStack({}, 0);
FixupBranch skipToRealDispatch =
J(SConfig::GetInstance().bEnableDebugging); // skip the sync and compare first time
dispatcherMispredictedBLR = GetCodePtr();
dispatcher_mispredicted_blr = GetCodePtr();
AND(32, PPCSTATE(pc), Imm32(0xFFFFFFFC));
#if 0 // debug mispredicts
@@ -103,7 +103,7 @@ void Jit64AsmRoutineManager::Generate()
SetJumpTarget(skipToRealDispatch);
dispatcherNoCheck = GetCodePtr();
dispatcher_no_check = GetCodePtr();
// The following is a translation of JitBaseBlockCache::Dispatch into assembly.
const bool assembly_dispatcher = true;
@@ -187,10 +187,10 @@ void Jit64AsmRoutineManager::Generate()
ABI_CallFunction(JitTrampoline);
ABI_PopRegistersAndAdjustStack({}, 0);
JMP(dispatcherNoCheck, true);
JMP(dispatcher_no_check, true);
SetJumpTarget(bail);
doTiming = GetCodePtr();
do_timing = GetCodePtr();
// make sure npc contains the next pc (needed for exception checking in CoreTiming::Advance)
MOV(32, R(RSCRATCH), PPCSTATE(pc));
@@ -215,7 +215,7 @@ void Jit64AsmRoutineManager::Generate()
ABI_PopRegistersAndAdjustStack(ABI_ALL_CALLEE_SAVED, 8, 16);
RET();
JitRegister::Register(enterCode, GetCodePtr(), "JIT_Loop");
JitRegister::Register(enter_code, GetCodePtr(), "JIT_Loop");
GenerateCommon();
}
@@ -78,9 +78,9 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
MOV(32, R(RSCRATCH2), Imm32(gqrValue & 0x3F00));
if (w)
CALL(asm_routines.singleStoreQuantized[type]);
CALL(asm_routines.single_store_quantized[type]);
else
CALL(asm_routines.pairedStoreQuantized[type]);
CALL(asm_routines.paired_store_quantized[type]);
}
}
else
@@ -93,7 +93,8 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
// 0b0011111100000111, or 0x3F07.
MOV(32, R(RSCRATCH2), Imm32(0x3F07));
AND(32, R(RSCRATCH2), PPCSTATE(spr[SPR_GQR0 + i]));
LEA(64, RSCRATCH, M(w ? asm_routines.singleStoreQuantized : asm_routines.pairedStoreQuantized));
LEA(64, RSCRATCH,
M(w ? asm_routines.single_store_quantized : asm_routines.paired_store_quantized));
// 8-bit operations do not zero upper 32-bits of 64-bit registers.
// Here we know that RSCRATCH's least significant byte is zero.
OR(8, R(RSCRATCH), R(RSCRATCH2));
@@ -159,7 +160,8 @@ void Jit64::psq_lXX(UGeckoInstruction inst)
gqr.AddMemOffset(2);
MOV(32, R(RSCRATCH2), Imm32(0x3F07));
AND(32, R(RSCRATCH2), gqr);
LEA(64, RSCRATCH, M(w ? asm_routines.singleLoadQuantized : asm_routines.pairedLoadQuantized));
LEA(64, RSCRATCH,
M(w ? asm_routines.single_load_quantized : asm_routines.paired_load_quantized));
// 8-bit operations do not zero upper 32-bits of 64-bit registers.
// Here we know that RSCRATCH's least significant byte is zero.
OR(8, R(RSCRATCH), R(RSCRATCH2));
@@ -231,22 +231,25 @@ constexpr std::array<u8, 8> sizes{{32, 0, 0, 0, 8, 16, 8, 16}};
void CommonAsmRoutines::GenQuantizedStores()
{
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX).
pairedStoreQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
paired_store_quantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
ReserveCodeSpace(8 * sizeof(u8*));
for (int type = 0; type < 8; type++)
pairedStoreQuantized[type] = GenQuantizedStoreRuntime(false, static_cast<EQuantizeType>(type));
{
paired_store_quantized[type] =
GenQuantizedStoreRuntime(false, static_cast<EQuantizeType>(type));
}
}
// See comment in header for in/outs.
void CommonAsmRoutines::GenQuantizedSingleStores()
{
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX).
singleStoreQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
single_store_quantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
ReserveCodeSpace(8 * sizeof(u8*));
for (int type = 0; type < 8; type++)
singleStoreQuantized[type] = GenQuantizedStoreRuntime(true, static_cast<EQuantizeType>(type));
single_store_quantized[type] = GenQuantizedStoreRuntime(true, static_cast<EQuantizeType>(type));
}
const u8* CommonAsmRoutines::GenQuantizedStoreRuntime(bool single, EQuantizeType type)
@@ -263,21 +266,21 @@ const u8* CommonAsmRoutines::GenQuantizedStoreRuntime(bool single, EQuantizeType
void CommonAsmRoutines::GenQuantizedLoads()
{
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_lXX).
pairedLoadQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
paired_load_quantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
ReserveCodeSpace(8 * sizeof(u8*));
for (int type = 0; type < 8; type++)
pairedLoadQuantized[type] = GenQuantizedLoadRuntime(false, static_cast<EQuantizeType>(type));
paired_load_quantized[type] = GenQuantizedLoadRuntime(false, static_cast<EQuantizeType>(type));
}
void CommonAsmRoutines::GenQuantizedSingleLoads()
{
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_lXX).
singleLoadQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
single_load_quantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
ReserveCodeSpace(8 * sizeof(u8*));
for (int type = 0; type < 8; type++)
singleLoadQuantized[type] = GenQuantizedLoadRuntime(true, static_cast<EQuantizeType>(type));
single_load_quantized[type] = GenQuantizedLoadRuntime(true, static_cast<EQuantizeType>(type));
}
const u8* CommonAsmRoutines::GenQuantizedLoadRuntime(bool single, EQuantizeType type)
@@ -526,13 +526,13 @@ void JitArm64::EndTimeProfile(JitBlock* b)
void JitArm64::Run()
{
CompiledCode pExecAddr = (CompiledCode)enterCode;
CompiledCode pExecAddr = (CompiledCode)enter_code;
pExecAddr();
}
void JitArm64::SingleStep()
{
CompiledCode pExecAddr = (CompiledCode)enterCode;
CompiledCode pExecAddr = (CompiledCode)enter_code;
pExecAddr();
}
@@ -608,7 +608,7 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
{
FixupBranch bail = B(CC_PL);
MOVI2R(DISPATCHER_PC, js.blockStart);
B(doTiming);
B(do_timing);
SetJumpTarget(bail);
}
@@ -81,7 +81,7 @@ void JitArm64::psq_l(UGeckoInstruction inst)
UBFM(type_reg, scale_reg, 16, 18); // Type
UBFM(scale_reg, scale_reg, 24, 29); // Scale
MOVP2R(X30, inst.W ? singleLoadQuantized : pairedLoadQuantized);
MOVP2R(X30, inst.W ? single_load_quantized : paired_load_quantized);
LDR(X30, X30, ArithOption(EncodeRegTo64(type_reg), true));
BLR(X30);
@@ -192,7 +192,7 @@ void JitArm64::psq_st(UGeckoInstruction inst)
SwitchToFarCode();
SetJumpTarget(fail);
// Slow
MOVP2R(X30, &pairedStoreQuantized[16 + inst.W * 8]);
MOVP2R(X30, &paired_store_quantized[16 + inst.W * 8]);
LDR(EncodeRegTo64(type_reg), X30, ArithOption(EncodeRegTo64(type_reg), true));
ABI_PushRegisters(gprs_in_use);
@@ -205,7 +205,7 @@ void JitArm64::psq_st(UGeckoInstruction inst)
SetJumpTarget(pass);
// Fast
MOVP2R(X30, &pairedStoreQuantized[inst.W * 8]);
MOVP2R(X30, &paired_store_quantized[inst.W * 8]);
LDR(EncodeRegTo64(type_reg), X30, ArithOption(EncodeRegTo64(type_reg), true));
BLR(EncodeRegTo64(type_reg));
Oops, something went wrong.

0 comments on commit f5f4c10

Please sign in to comment.