Merge pull request #2481 from lioncash/reference

x64Emitter: Pass some OpArg parameters by const reference.
This commit is contained in:
flacs
2015-05-31 08:22:55 +02:00
7 changed files with 740 additions and 740 deletions

View File

@ -140,8 +140,8 @@ public:
typedef u32 (*Operation)(u32 a, u32 b);
void regimmop(int d, int a, bool binary, u32 value, Operation doop, void (Gen::XEmitter::*op)(int, const Gen::OpArg&, const Gen::OpArg&),
bool Rc = false, bool carry = false);
void fp_tri_op(int d, int a, int b, bool reversible, bool single, void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, Gen::OpArg),
void (Gen::XEmitter::*sseOp)(Gen::X64Reg, Gen::OpArg), bool packed = false, bool roundRHS = false);
void fp_tri_op(int d, int a, int b, bool reversible, bool single, void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, const Gen::OpArg&),
void (Gen::XEmitter::*sseOp)(Gen::X64Reg, const Gen::OpArg&), bool packed = false, bool roundRHS = false);
void FloatCompare(UGeckoInstruction inst, bool upper = false);
// OPCODES

View File

@ -16,8 +16,8 @@ static const u64 GC_ALIGNED16(psAbsMask[2]) = {0x7FFFFFFFFFFFFFFFULL, 0xFFFFFFF
static const u64 GC_ALIGNED16(psAbsMask2[2]) = {0x7FFFFFFFFFFFFFFFULL, 0x7FFFFFFFFFFFFFFFULL};
static const double GC_ALIGNED16(half_qnan_and_s32_max[2]) = {0x7FFFFFFF, -0x80000};
void Jit64::fp_tri_op(int d, int a, int b, bool reversible, bool single, void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg),
void (XEmitter::*sseOp)(X64Reg, OpArg), bool packed, bool roundRHS)
void Jit64::fp_tri_op(int d, int a, int b, bool reversible, bool single, void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&),
void (XEmitter::*sseOp)(X64Reg, const OpArg&), bool packed, bool roundRHS)
{
fpr.Lock(d, a, b);
fpr.BindToRegister(d, d == a || d == b || !single);

View File

@ -488,7 +488,7 @@ static void regEmitBinInst(RegInfo& RI, InstLoc I,
regNormalRegClear(RI, I);
}
static void fregEmitBinInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(X64Reg, OpArg))
static void fregEmitBinInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(X64Reg, const OpArg&))
{
X64Reg reg;
@ -640,7 +640,7 @@ static void regEmitMemStore(RegInfo& RI, InstLoc I, unsigned Size)
regClearInst(RI, getOp1(I));
}
static void regEmitShiftInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(int, OpArg, OpArg))
static void regEmitShiftInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(int, const OpArg&, const OpArg&))
{
X64Reg reg = regBinLHSReg(RI, I);

View File

@ -156,7 +156,7 @@ private:
// Generate the proper MOV instruction depending on whether the read should
// be sign extended or zero extended.
void MoveOpArgToReg(int sbits, Gen::OpArg arg)
void MoveOpArgToReg(int sbits, const Gen::OpArg& arg)
{
if (m_sign_extend)
m_code->MOVSX(32, sbits, m_dst_reg, arg);
@ -233,7 +233,7 @@ void EmuCodeBlock::MMIOLoadToReg(MMIO::Mapping* mmio, Gen::X64Reg reg_value,
}
}
FixupBranch EmuCodeBlock::CheckIfSafeAddress(OpArg reg_value, X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask)
FixupBranch EmuCodeBlock::CheckIfSafeAddress(const OpArg& reg_value, X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask)
{
registers_in_use[reg_addr] = true;
if (reg_value.IsSimpleReg())
@ -397,7 +397,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress,
}
}
static OpArg SwapImmediate(int accessSize, OpArg reg_value)
static OpArg SwapImmediate(int accessSize, const OpArg& reg_value)
{
if (accessSize == 32)
return Imm32(Common::swap32(reg_value.Imm32()));
@ -640,7 +640,7 @@ void EmuCodeBlock::WriteToConstRamAddress(int accessSize, OpArg arg, u32 address
MOV(accessSize, MRegSum(RMEM, RSCRATCH2), R(reg));
}
void EmuCodeBlock::ForceSinglePrecision(X64Reg output, OpArg input, bool packed, bool duplicate)
void EmuCodeBlock::ForceSinglePrecision(X64Reg output, const OpArg& input, bool packed, bool duplicate)
{
// Most games don't need these. Zelda requires it though - some platforms get stuck without them.
if (jit->jo.accurateSinglePrecision)
@ -668,8 +668,8 @@ void EmuCodeBlock::ForceSinglePrecision(X64Reg output, OpArg input, bool packed,
}
// Abstract between AVX and SSE: automatically handle 3-operand instructions
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg), void (XEmitter::*sseOp)(X64Reg, OpArg),
X64Reg regOp, OpArg arg1, OpArg arg2, bool packed, bool reversible)
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&), void (XEmitter::*sseOp)(X64Reg, const OpArg&),
X64Reg regOp, const OpArg& arg1, const OpArg& arg2, bool packed, bool reversible)
{
if (arg1.IsSimpleReg() && regOp == arg1.GetSimpleReg())
{
@ -715,8 +715,8 @@ void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg), void (
}
// Abstract between AVX and SSE: automatically handle 3-operand instructions
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg, u8), void (XEmitter::*sseOp)(X64Reg, OpArg, u8),
X64Reg regOp, OpArg arg1, OpArg arg2, u8 imm)
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&, u8), void (XEmitter::*sseOp)(X64Reg, const OpArg&, u8),
X64Reg regOp, const OpArg& arg1, const OpArg& arg2, u8 imm)
{
if (arg1.IsSimpleReg() && regOp == arg1.GetSimpleReg())
{
@ -755,7 +755,7 @@ static const u64 GC_ALIGNED16(psRoundBit[2]) = {0x8000000, 0x8000000};
// a single precision multiply. To be precise, it drops the low 28 bits of the mantissa,
// rounding to nearest as it does.
// It needs a temp, so let the caller pass that in.
void EmuCodeBlock::Force25BitPrecision(X64Reg output, OpArg input, X64Reg tmp)
void EmuCodeBlock::Force25BitPrecision(X64Reg output, const OpArg& input, X64Reg tmp)
{
if (jit->jo.accurateSinglePrecision)
{

View File

@ -68,7 +68,7 @@ public:
SetCodePtr(nearcode);
}
Gen::FixupBranch CheckIfSafeAddress(Gen::OpArg reg_value, Gen::X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask);
Gen::FixupBranch CheckIfSafeAddress(const Gen::OpArg& reg_value, Gen::X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask);
void UnsafeLoadRegToReg(Gen::X64Reg reg_addr, Gen::X64Reg reg_value, int accessSize, s32 offset = 0, bool signExtend = false);
void UnsafeLoadRegToRegNoSwap(Gen::X64Reg reg_addr, Gen::X64Reg reg_value, int accessSize, s32 offset, bool signExtend = false);
// these return the address of the MOV, for backpatching
@ -116,13 +116,13 @@ public:
void JitSetCAIf(Gen::CCFlags conditionCode);
void JitClearCA();
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, Gen::OpArg), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, Gen::OpArg),
Gen::X64Reg regOp, Gen::OpArg arg1, Gen::OpArg arg2, bool packed = true, bool reversible = false);
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, Gen::OpArg, u8), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, Gen::OpArg, u8),
Gen::X64Reg regOp, Gen::OpArg arg1, Gen::OpArg arg2, u8 imm);
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, const Gen::OpArg&), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, const Gen::OpArg&),
Gen::X64Reg regOp, const Gen::OpArg& arg1, const Gen::OpArg& arg2, bool packed = true, bool reversible = false);
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, const Gen::OpArg&, u8), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, const Gen::OpArg&, u8),
Gen::X64Reg regOp, const Gen::OpArg& arg1, const Gen::OpArg& arg2, u8 imm);
void ForceSinglePrecision(Gen::X64Reg output, Gen::OpArg input, bool packed = true, bool duplicate = false);
void Force25BitPrecision(Gen::X64Reg output, Gen::OpArg input, Gen::X64Reg tmp);
void ForceSinglePrecision(Gen::X64Reg output, const Gen::OpArg& input, bool packed = true, bool duplicate = false);
void Force25BitPrecision(Gen::X64Reg output, const Gen::OpArg& input, Gen::X64Reg tmp);
// RSCRATCH might get trashed
void ConvertSingleToDouble(Gen::X64Reg dst, Gen::X64Reg src, bool src_is_gpr = false);