mirror of
https://github.com/dolphin-emu/dolphin.git
synced 2024-11-14 21:37:52 -07:00
Merge pull request #2481 from lioncash/reference
x64Emitter: Pass some OpArg parameters by const reference.
This commit is contained in:
commit
43ed19a663
File diff suppressed because it is too large
Load Diff
@ -140,16 +140,16 @@ struct OpArg
|
||||
//if scale == 0 never mind offsetting
|
||||
offset = _offset;
|
||||
}
|
||||
bool operator==(OpArg b)
|
||||
bool operator==(const OpArg& b) const
|
||||
{
|
||||
return operandReg == b.operandReg && scale == b.scale && offsetOrBaseReg == b.offsetOrBaseReg &&
|
||||
indexReg == b.indexReg && offset == b.offset;
|
||||
}
|
||||
void WriteREX(XEmitter *emit, int opBits, int bits, int customOp = -1) const;
|
||||
void WriteREX(XEmitter* emit, int opBits, int bits, int customOp = -1) const;
|
||||
void WriteVEX(XEmitter* emit, X64Reg regOp1, X64Reg regOp2, int L, int pp, int mmmmm, int W = 0) const;
|
||||
void WriteRest(XEmitter *emit, int extraBytes=0, X64Reg operandReg=INVALID_REG, bool warn_64bit_offset = true) const;
|
||||
void WriteFloatModRM(XEmitter *emit, FloatOp op);
|
||||
void WriteSingleByteOp(XEmitter *emit, u8 op, X64Reg operandReg, int bits);
|
||||
void WriteRest(XEmitter* emit, int extraBytes=0, X64Reg operandReg=INVALID_REG, bool warn_64bit_offset = true) const;
|
||||
void WriteFloatModRM(XEmitter* emit, FloatOp op);
|
||||
void WriteSingleByteOp(XEmitter* emit, u8 op, X64Reg operandReg, int bits);
|
||||
|
||||
u64 Imm64() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM64); return (u64)offset; }
|
||||
u32 Imm32() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM32); return (u32)offset; }
|
||||
@ -161,7 +161,7 @@ struct OpArg
|
||||
s16 SImm16() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM16); return (s16)offset; }
|
||||
s8 SImm8() const { _dbg_assert_(DYNA_REC, scale == SCALE_IMM8); return (s8)offset; }
|
||||
|
||||
void WriteNormalOp(XEmitter *emit, bool toRM, NormalOp op, const OpArg &operand, int bits) const;
|
||||
void WriteNormalOp(XEmitter* emit, bool toRM, NormalOp op, const OpArg& operand, int bits) const;
|
||||
bool IsImm() const {return scale == SCALE_IMM8 || scale == SCALE_IMM16 || scale == SCALE_IMM32 || scale == SCALE_IMM64;}
|
||||
bool IsSimpleReg() const {return scale == SCALE_NONE;}
|
||||
bool IsSimpleReg(X64Reg reg) const
|
||||
@ -171,7 +171,7 @@ struct OpArg
|
||||
return GetSimpleReg() == reg;
|
||||
}
|
||||
|
||||
bool CanDoOpWith(const OpArg &other) const
|
||||
bool CanDoOpWith(const OpArg& other) const
|
||||
{
|
||||
if (IsSimpleReg()) return true;
|
||||
if (!IsSimpleReg() && !other.IsSimpleReg() && !other.IsImm()) return false;
|
||||
@ -214,7 +214,7 @@ private:
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline OpArg M(const T *ptr) {return OpArg((u64)(const void *)ptr, (int)SCALE_RIP);}
|
||||
inline OpArg M(const T* ptr) {return OpArg((u64)(const void*)ptr, (int)SCALE_RIP);}
|
||||
inline OpArg R(X64Reg value) {return OpArg(0, SCALE_NONE, value);}
|
||||
inline OpArg MatR(X64Reg value) {return OpArg(0, SCALE_ATREG, value);}
|
||||
|
||||
@ -275,7 +275,7 @@ inline u32 PtrOffset(const void* ptr, const void* base)
|
||||
|
||||
struct FixupBranch
|
||||
{
|
||||
u8 *ptr;
|
||||
u8* ptr;
|
||||
int type; //0 = 8bit 1 = 32bit
|
||||
};
|
||||
|
||||
@ -297,7 +297,7 @@ class XEmitter
|
||||
{
|
||||
friend struct OpArg; // for Write8 etc
|
||||
private:
|
||||
u8 *code;
|
||||
u8* code;
|
||||
bool flags_locked;
|
||||
|
||||
void CheckFlags();
|
||||
@ -307,23 +307,23 @@ private:
|
||||
void WriteSimple2Byte(int bits, u8 byte1, u8 byte2, X64Reg reg);
|
||||
void WriteMulDivType(int bits, OpArg src, int ext);
|
||||
void WriteBitSearchType(int bits, X64Reg dest, OpArg src, u8 byte2, bool rep = false);
|
||||
void WriteShift(int bits, OpArg dest, OpArg &shift, int ext);
|
||||
void WriteBitTest(int bits, OpArg &dest, OpArg &index, int ext);
|
||||
void WriteShift(int bits, OpArg dest, const OpArg& shift, int ext);
|
||||
void WriteBitTest(int bits, const OpArg& dest, const OpArg& index, int ext);
|
||||
void WriteMXCSR(OpArg arg, int ext);
|
||||
void WriteSSEOp(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0);
|
||||
void WriteSSSE3Op(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0);
|
||||
void WriteSSE41Op(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0);
|
||||
void WriteVEXOp(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int W = 0, int extrabytes = 0);
|
||||
void WriteVEXOp4(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, X64Reg regOp3, int W = 0);
|
||||
void WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int W = 0, int extrabytes = 0);
|
||||
void WriteAVXOp4(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, X64Reg regOp3, int W = 0);
|
||||
void WriteFMA3Op(u8 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int W = 0);
|
||||
void WriteBMIOp(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
|
||||
void WriteBMI1Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
|
||||
void WriteBMI2Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
|
||||
void WriteMOVBE(int bits, u8 op, X64Reg regOp, OpArg arg);
|
||||
void WriteFloatLoadStore(int bits, FloatOp op, FloatOp op_80b, OpArg arg);
|
||||
void WriteNormalOp(int bits, NormalOp op, const OpArg &a1, const OpArg &a2);
|
||||
void WriteSSSE3Op(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes = 0);
|
||||
void WriteSSE41Op(u8 opPrefix, u16 op, X64Reg regOp, const OpArg& arg, int extrabytes = 0);
|
||||
void WriteVEXOp(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int W = 0, int extrabytes = 0);
|
||||
void WriteVEXOp4(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, X64Reg regOp3, int W = 0);
|
||||
void WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int W = 0, int extrabytes = 0);
|
||||
void WriteAVXOp4(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, X64Reg regOp3, int W = 0);
|
||||
void WriteFMA3Op(u8 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int W = 0);
|
||||
void WriteBMIOp(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes = 0);
|
||||
void WriteBMI1Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes = 0);
|
||||
void WriteBMI2Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, const OpArg& arg, int extrabytes = 0);
|
||||
void WriteMOVBE(int bits, u8 op, X64Reg regOp, const OpArg& arg);
|
||||
void WriteFloatLoadStore(int bits, FloatOp op, FloatOp op_80b, const OpArg& arg);
|
||||
void WriteNormalOp(int bits, NormalOp op, const OpArg& a1, const OpArg& a2);
|
||||
|
||||
void ABI_CalculateFrameSize(BitSet32 mask, size_t rsp_alignment, size_t needed_frame_size, size_t* shadowp, size_t* subtractionp, size_t* xmm_offsetp);
|
||||
|
||||
@ -335,19 +335,19 @@ protected:
|
||||
|
||||
public:
|
||||
XEmitter() { code = nullptr; flags_locked = false; }
|
||||
XEmitter(u8 *code_ptr) { code = code_ptr; flags_locked = false; }
|
||||
XEmitter(u8* code_ptr) { code = code_ptr; flags_locked = false; }
|
||||
virtual ~XEmitter() {}
|
||||
|
||||
void WriteModRM(int mod, int rm, int reg);
|
||||
void WriteSIB(int scale, int index, int base);
|
||||
|
||||
void SetCodePtr(u8 *ptr);
|
||||
void SetCodePtr(u8* ptr);
|
||||
void ReserveCodeSpace(int bytes);
|
||||
const u8 *AlignCode4();
|
||||
const u8 *AlignCode16();
|
||||
const u8 *AlignCodePage();
|
||||
const u8 *GetCodePtr() const;
|
||||
u8 *GetWritableCodePtr();
|
||||
const u8* AlignCode4();
|
||||
const u8* AlignCode16();
|
||||
const u8* AlignCodePage();
|
||||
const u8* GetCodePtr() const;
|
||||
u8* GetWritableCodePtr();
|
||||
|
||||
void LockFlags() { flags_locked = true; }
|
||||
void UnlockFlags() { flags_locked = false; }
|
||||
@ -380,8 +380,8 @@ public:
|
||||
// Stack control
|
||||
void PUSH(X64Reg reg);
|
||||
void POP(X64Reg reg);
|
||||
void PUSH(int bits, const OpArg ®);
|
||||
void POP(int bits, const OpArg ®);
|
||||
void PUSH(int bits, const OpArg& reg);
|
||||
void POP(int bits, const OpArg& reg);
|
||||
void PUSHF();
|
||||
void POPF();
|
||||
|
||||
@ -391,20 +391,20 @@ public:
|
||||
void UD2();
|
||||
FixupBranch J(bool force5bytes = false);
|
||||
|
||||
void JMP(const u8 * addr, bool force5Bytes = false);
|
||||
void JMPptr(const OpArg &arg);
|
||||
void JMP(const u8* addr, bool force5Bytes = false);
|
||||
void JMPptr(const OpArg& arg);
|
||||
void JMPself(); //infinite loop!
|
||||
#ifdef CALL
|
||||
#undef CALL
|
||||
#endif
|
||||
void CALL(const void *fnptr);
|
||||
void CALL(const void* fnptr);
|
||||
void CALLptr(OpArg arg);
|
||||
|
||||
FixupBranch J_CC(CCFlags conditionCode, bool force5bytes = false);
|
||||
//void J_CC(CCFlags conditionCode, JumpTarget target);
|
||||
void J_CC(CCFlags conditionCode, const u8* addr);
|
||||
|
||||
void SetJumpTarget(const FixupBranch &branch);
|
||||
void SetJumpTarget(const FixupBranch& branch);
|
||||
|
||||
void SETcc(CCFlags flag, OpArg dest);
|
||||
// Note: CMOV brings small if any benefit on current CPUs.
|
||||
@ -416,8 +416,8 @@ public:
|
||||
void SFENCE();
|
||||
|
||||
// Bit scan
|
||||
void BSF(int bits, X64Reg dest, OpArg src); //bottom bit to top bit
|
||||
void BSR(int bits, X64Reg dest, OpArg src); //top bit to bottom bit
|
||||
void BSF(int bits, X64Reg dest, const OpArg& src); // Bottom bit to top bit
|
||||
void BSR(int bits, X64Reg dest, const OpArg& src); // Top bit to bottom bit
|
||||
|
||||
// Cache control
|
||||
enum PrefetchLevel
|
||||
@ -428,37 +428,37 @@ public:
|
||||
PF_T2, //Levels 3+ (aliased to T0 on AMD)
|
||||
};
|
||||
void PREFETCH(PrefetchLevel level, OpArg arg);
|
||||
void MOVNTI(int bits, OpArg dest, X64Reg src);
|
||||
void MOVNTDQ(OpArg arg, X64Reg regOp);
|
||||
void MOVNTPS(OpArg arg, X64Reg regOp);
|
||||
void MOVNTPD(OpArg arg, X64Reg regOp);
|
||||
void MOVNTI(int bits, const OpArg& dest, X64Reg src);
|
||||
void MOVNTDQ(const OpArg& arg, X64Reg regOp);
|
||||
void MOVNTPS(const OpArg& arg, X64Reg regOp);
|
||||
void MOVNTPD(const OpArg& arg, X64Reg regOp);
|
||||
|
||||
// Multiplication / division
|
||||
void MUL(int bits, OpArg src); //UNSIGNED
|
||||
void IMUL(int bits, OpArg src); //SIGNED
|
||||
void IMUL(int bits, X64Reg regOp, OpArg src);
|
||||
void IMUL(int bits, X64Reg regOp, OpArg src, OpArg imm);
|
||||
void DIV(int bits, OpArg src);
|
||||
void IDIV(int bits, OpArg src);
|
||||
void MUL(int bits, const OpArg& src); // UNSIGNED
|
||||
void IMUL(int bits, const OpArg& src); // SIGNED
|
||||
void IMUL(int bits, X64Reg regOp, const OpArg& src);
|
||||
void IMUL(int bits, X64Reg regOp, const OpArg& src, const OpArg& imm);
|
||||
void DIV(int bits, const OpArg& src);
|
||||
void IDIV(int bits, const OpArg& src);
|
||||
|
||||
// Shift
|
||||
void ROL(int bits, OpArg dest, OpArg shift);
|
||||
void ROR(int bits, OpArg dest, OpArg shift);
|
||||
void RCL(int bits, OpArg dest, OpArg shift);
|
||||
void RCR(int bits, OpArg dest, OpArg shift);
|
||||
void SHL(int bits, OpArg dest, OpArg shift);
|
||||
void SHR(int bits, OpArg dest, OpArg shift);
|
||||
void SAR(int bits, OpArg dest, OpArg shift);
|
||||
void ROL(int bits, const OpArg& dest, const OpArg& shift);
|
||||
void ROR(int bits, const OpArg& dest, const OpArg& shift);
|
||||
void RCL(int bits, const OpArg& dest, const OpArg& shift);
|
||||
void RCR(int bits, const OpArg& dest, const OpArg& shift);
|
||||
void SHL(int bits, const OpArg& dest, const OpArg& shift);
|
||||
void SHR(int bits, const OpArg& dest, const OpArg& shift);
|
||||
void SAR(int bits, const OpArg& dest, const OpArg& shift);
|
||||
|
||||
// Bit Test
|
||||
void BT(int bits, OpArg dest, OpArg index);
|
||||
void BTS(int bits, OpArg dest, OpArg index);
|
||||
void BTR(int bits, OpArg dest, OpArg index);
|
||||
void BTC(int bits, OpArg dest, OpArg index);
|
||||
void BT(int bits, const OpArg& dest, const OpArg& index);
|
||||
void BTS(int bits, const OpArg& dest, const OpArg& index);
|
||||
void BTR(int bits, const OpArg& dest, const OpArg& index);
|
||||
void BTC(int bits, const OpArg& dest, const OpArg& index);
|
||||
|
||||
// Double-Precision Shift
|
||||
void SHRD(int bits, OpArg dest, OpArg src, OpArg shift);
|
||||
void SHLD(int bits, OpArg dest, OpArg src, OpArg shift);
|
||||
void SHRD(int bits, const OpArg& dest, const OpArg& src, const OpArg& shift);
|
||||
void SHLD(int bits, const OpArg& dest, const OpArg& src, const OpArg& shift);
|
||||
|
||||
// Extend EAX into EDX in various ways
|
||||
void CWD(int bits = 16);
|
||||
@ -472,25 +472,25 @@ public:
|
||||
void LEA(int bits, X64Reg dest, OpArg src);
|
||||
|
||||
// Integer arithmetic
|
||||
void NEG (int bits, OpArg src);
|
||||
void ADD (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void ADC (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void SUB (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void SBB (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void AND (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void CMP (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void NEG (int bits, const OpArg& src);
|
||||
void ADD (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void ADC (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void SUB (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void SBB (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void AND (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void CMP (int bits, const OpArg& a1, const OpArg& a2);
|
||||
|
||||
// Bit operations
|
||||
void NOT (int bits, OpArg src);
|
||||
void OR (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void XOR (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void MOV (int bits, const OpArg &a1, const OpArg &a2);
|
||||
void TEST(int bits, const OpArg &a1, const OpArg &a2);
|
||||
void NOT (int bits, const OpArg& src);
|
||||
void OR (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void XOR (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void MOV (int bits, const OpArg& a1, const OpArg& a2);
|
||||
void TEST(int bits, const OpArg& a1, const OpArg& a2);
|
||||
|
||||
void CMP_or_TEST(int bits, const OpArg &a1, const OpArg &a2);
|
||||
void CMP_or_TEST(int bits, const OpArg& a1, const OpArg& a2);
|
||||
|
||||
// Are these useful at all? Consider removing.
|
||||
void XCHG(int bits, const OpArg &a1, const OpArg &a2);
|
||||
void XCHG(int bits, const OpArg& a1, const OpArg& a2);
|
||||
void XCHG_AHAL();
|
||||
|
||||
// Byte swapping (32 and 64-bit only).
|
||||
@ -503,17 +503,17 @@ public:
|
||||
// Available only on Atom or >= Haswell so far. Test with cpu_info.bMOVBE.
|
||||
void MOVBE(int bits, X64Reg dest, const OpArg& src);
|
||||
void MOVBE(int bits, const OpArg& dest, X64Reg src);
|
||||
void LoadAndSwap(int size, Gen::X64Reg dst, const Gen::OpArg& src);
|
||||
void SwapAndStore(int size, const Gen::OpArg& dst, Gen::X64Reg src);
|
||||
void LoadAndSwap(int size, X64Reg dst, const OpArg& src);
|
||||
void SwapAndStore(int size, const OpArg& dst, X64Reg src);
|
||||
|
||||
// Available only on AMD >= Phenom or Intel >= Haswell
|
||||
void LZCNT(int bits, X64Reg dest, OpArg src);
|
||||
void LZCNT(int bits, X64Reg dest, const OpArg& src);
|
||||
// Note: this one is actually part of BMI1
|
||||
void TZCNT(int bits, X64Reg dest, OpArg src);
|
||||
void TZCNT(int bits, X64Reg dest, const OpArg& src);
|
||||
|
||||
// WARNING - These two take 11-13 cycles and are VectorPath! (AMD64)
|
||||
void STMXCSR(OpArg memloc);
|
||||
void LDMXCSR(OpArg memloc);
|
||||
void STMXCSR(const OpArg& memloc);
|
||||
void LDMXCSR(const OpArg& memloc);
|
||||
|
||||
// Prefixes
|
||||
void LOCK();
|
||||
@ -540,118 +540,118 @@ public:
|
||||
x87_FPUBusy = 0x8000,
|
||||
};
|
||||
|
||||
void FLD(int bits, OpArg src);
|
||||
void FST(int bits, OpArg dest);
|
||||
void FSTP(int bits, OpArg dest);
|
||||
void FLD(int bits, const OpArg& src);
|
||||
void FST(int bits, const OpArg& dest);
|
||||
void FSTP(int bits, const OpArg& dest);
|
||||
void FNSTSW_AX();
|
||||
void FWAIT();
|
||||
|
||||
// SSE/SSE2: Floating point arithmetic
|
||||
void ADDSS(X64Reg regOp, OpArg arg);
|
||||
void ADDSD(X64Reg regOp, OpArg arg);
|
||||
void SUBSS(X64Reg regOp, OpArg arg);
|
||||
void SUBSD(X64Reg regOp, OpArg arg);
|
||||
void MULSS(X64Reg regOp, OpArg arg);
|
||||
void MULSD(X64Reg regOp, OpArg arg);
|
||||
void DIVSS(X64Reg regOp, OpArg arg);
|
||||
void DIVSD(X64Reg regOp, OpArg arg);
|
||||
void MINSS(X64Reg regOp, OpArg arg);
|
||||
void MINSD(X64Reg regOp, OpArg arg);
|
||||
void MAXSS(X64Reg regOp, OpArg arg);
|
||||
void MAXSD(X64Reg regOp, OpArg arg);
|
||||
void SQRTSS(X64Reg regOp, OpArg arg);
|
||||
void SQRTSD(X64Reg regOp, OpArg arg);
|
||||
void RSQRTSS(X64Reg regOp, OpArg arg);
|
||||
void ADDSS(X64Reg regOp, const OpArg& arg);
|
||||
void ADDSD(X64Reg regOp, const OpArg& arg);
|
||||
void SUBSS(X64Reg regOp, const OpArg& arg);
|
||||
void SUBSD(X64Reg regOp, const OpArg& arg);
|
||||
void MULSS(X64Reg regOp, const OpArg& arg);
|
||||
void MULSD(X64Reg regOp, const OpArg& arg);
|
||||
void DIVSS(X64Reg regOp, const OpArg& arg);
|
||||
void DIVSD(X64Reg regOp, const OpArg& arg);
|
||||
void MINSS(X64Reg regOp, const OpArg& arg);
|
||||
void MINSD(X64Reg regOp, const OpArg& arg);
|
||||
void MAXSS(X64Reg regOp, const OpArg& arg);
|
||||
void MAXSD(X64Reg regOp, const OpArg& arg);
|
||||
void SQRTSS(X64Reg regOp, const OpArg& arg);
|
||||
void SQRTSD(X64Reg regOp, const OpArg& arg);
|
||||
void RSQRTSS(X64Reg regOp, const OpArg& arg);
|
||||
|
||||
// SSE/SSE2: Floating point bitwise (yes)
|
||||
void CMPSS(X64Reg regOp, OpArg arg, u8 compare);
|
||||
void CMPSD(X64Reg regOp, OpArg arg, u8 compare);
|
||||
void CMPSS(X64Reg regOp, const OpArg& arg, u8 compare);
|
||||
void CMPSD(X64Reg regOp, const OpArg& arg, u8 compare);
|
||||
|
||||
inline void CMPEQSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_EQ); }
|
||||
inline void CMPLTSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_LT); }
|
||||
inline void CMPLESS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_LE); }
|
||||
inline void CMPUNORDSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_UNORD); }
|
||||
inline void CMPNEQSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_NEQ); }
|
||||
inline void CMPNLTSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_NLT); }
|
||||
inline void CMPORDSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_ORD); }
|
||||
inline void CMPEQSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_EQ); }
|
||||
inline void CMPLTSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_LT); }
|
||||
inline void CMPLESS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_LE); }
|
||||
inline void CMPUNORDSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_UNORD); }
|
||||
inline void CMPNEQSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_NEQ); }
|
||||
inline void CMPNLTSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_NLT); }
|
||||
inline void CMPORDSS(X64Reg regOp, const OpArg& arg) { CMPSS(regOp, arg, CMP_ORD); }
|
||||
|
||||
// SSE/SSE2: Floating point packed arithmetic (x4 for float, x2 for double)
|
||||
void ADDPS(X64Reg regOp, OpArg arg);
|
||||
void ADDPD(X64Reg regOp, OpArg arg);
|
||||
void SUBPS(X64Reg regOp, OpArg arg);
|
||||
void SUBPD(X64Reg regOp, OpArg arg);
|
||||
void CMPPS(X64Reg regOp, OpArg arg, u8 compare);
|
||||
void CMPPD(X64Reg regOp, OpArg arg, u8 compare);
|
||||
void MULPS(X64Reg regOp, OpArg arg);
|
||||
void MULPD(X64Reg regOp, OpArg arg);
|
||||
void DIVPS(X64Reg regOp, OpArg arg);
|
||||
void DIVPD(X64Reg regOp, OpArg arg);
|
||||
void MINPS(X64Reg regOp, OpArg arg);
|
||||
void MINPD(X64Reg regOp, OpArg arg);
|
||||
void MAXPS(X64Reg regOp, OpArg arg);
|
||||
void MAXPD(X64Reg regOp, OpArg arg);
|
||||
void SQRTPS(X64Reg regOp, OpArg arg);
|
||||
void SQRTPD(X64Reg regOp, OpArg arg);
|
||||
void RSQRTPS(X64Reg regOp, OpArg arg);
|
||||
void ADDPS(X64Reg regOp, const OpArg& arg);
|
||||
void ADDPD(X64Reg regOp, const OpArg& arg);
|
||||
void SUBPS(X64Reg regOp, const OpArg& arg);
|
||||
void SUBPD(X64Reg regOp, const OpArg& arg);
|
||||
void CMPPS(X64Reg regOp, const OpArg& arg, u8 compare);
|
||||
void CMPPD(X64Reg regOp, const OpArg& arg, u8 compare);
|
||||
void MULPS(X64Reg regOp, const OpArg& arg);
|
||||
void MULPD(X64Reg regOp, const OpArg& arg);
|
||||
void DIVPS(X64Reg regOp, const OpArg& arg);
|
||||
void DIVPD(X64Reg regOp, const OpArg& arg);
|
||||
void MINPS(X64Reg regOp, const OpArg& arg);
|
||||
void MINPD(X64Reg regOp, const OpArg& arg);
|
||||
void MAXPS(X64Reg regOp, const OpArg& arg);
|
||||
void MAXPD(X64Reg regOp, const OpArg& arg);
|
||||
void SQRTPS(X64Reg regOp, const OpArg& arg);
|
||||
void SQRTPD(X64Reg regOp, const OpArg& arg);
|
||||
void RSQRTPS(X64Reg regOp, const OpArg& arg);
|
||||
|
||||
// SSE/SSE2: Floating point packed bitwise (x4 for float, x2 for double)
|
||||
void ANDPS(X64Reg regOp, OpArg arg);
|
||||
void ANDPD(X64Reg regOp, OpArg arg);
|
||||
void ANDNPS(X64Reg regOp, OpArg arg);
|
||||
void ANDNPD(X64Reg regOp, OpArg arg);
|
||||
void ORPS(X64Reg regOp, OpArg arg);
|
||||
void ORPD(X64Reg regOp, OpArg arg);
|
||||
void XORPS(X64Reg regOp, OpArg arg);
|
||||
void XORPD(X64Reg regOp, OpArg arg);
|
||||
void ANDPS(X64Reg regOp, const OpArg& arg);
|
||||
void ANDPD(X64Reg regOp, const OpArg& arg);
|
||||
void ANDNPS(X64Reg regOp, const OpArg& arg);
|
||||
void ANDNPD(X64Reg regOp, const OpArg& arg);
|
||||
void ORPS(X64Reg regOp, const OpArg& arg);
|
||||
void ORPD(X64Reg regOp, const OpArg& arg);
|
||||
void XORPS(X64Reg regOp, const OpArg& arg);
|
||||
void XORPD(X64Reg regOp, const OpArg& arg);
|
||||
|
||||
// SSE/SSE2: Shuffle components. These are tricky - see Intel documentation.
|
||||
void SHUFPS(X64Reg regOp, OpArg arg, u8 shuffle);
|
||||
void SHUFPD(X64Reg regOp, OpArg arg, u8 shuffle);
|
||||
void SHUFPS(X64Reg regOp, const OpArg& arg, u8 shuffle);
|
||||
void SHUFPD(X64Reg regOp, const OpArg& arg, u8 shuffle);
|
||||
|
||||
// SSE/SSE2: Useful alternative to shuffle in some cases.
|
||||
void MOVDDUP(X64Reg regOp, OpArg arg);
|
||||
void MOVDDUP(X64Reg regOp, const OpArg& arg);
|
||||
|
||||
void UNPCKLPS(X64Reg dest, OpArg src);
|
||||
void UNPCKHPS(X64Reg dest, OpArg src);
|
||||
void UNPCKLPD(X64Reg dest, OpArg src);
|
||||
void UNPCKHPD(X64Reg dest, OpArg src);
|
||||
void UNPCKLPS(X64Reg dest, const OpArg& src);
|
||||
void UNPCKHPS(X64Reg dest, const OpArg& src);
|
||||
void UNPCKLPD(X64Reg dest, const OpArg& src);
|
||||
void UNPCKHPD(X64Reg dest, const OpArg& src);
|
||||
|
||||
// SSE/SSE2: Compares.
|
||||
void COMISS(X64Reg regOp, OpArg arg);
|
||||
void COMISD(X64Reg regOp, OpArg arg);
|
||||
void UCOMISS(X64Reg regOp, OpArg arg);
|
||||
void UCOMISD(X64Reg regOp, OpArg arg);
|
||||
void COMISS(X64Reg regOp, const OpArg& arg);
|
||||
void COMISD(X64Reg regOp, const OpArg& arg);
|
||||
void UCOMISS(X64Reg regOp, const OpArg& arg);
|
||||
void UCOMISD(X64Reg regOp, const OpArg& arg);
|
||||
|
||||
// SSE/SSE2: Moves. Use the right data type for your data, in most cases.
|
||||
void MOVAPS(X64Reg regOp, OpArg arg);
|
||||
void MOVAPD(X64Reg regOp, OpArg arg);
|
||||
void MOVAPS(OpArg arg, X64Reg regOp);
|
||||
void MOVAPD(OpArg arg, X64Reg regOp);
|
||||
void MOVAPS(X64Reg regOp, const OpArg& arg);
|
||||
void MOVAPD(X64Reg regOp, const OpArg& arg);
|
||||
void MOVAPS(const OpArg& arg, X64Reg regOp);
|
||||
void MOVAPD(const OpArg& arg, X64Reg regOp);
|
||||
|
||||
void MOVUPS(X64Reg regOp, OpArg arg);
|
||||
void MOVUPD(X64Reg regOp, OpArg arg);
|
||||
void MOVUPS(OpArg arg, X64Reg regOp);
|
||||
void MOVUPD(OpArg arg, X64Reg regOp);
|
||||
void MOVUPS(X64Reg regOp, const OpArg& arg);
|
||||
void MOVUPD(X64Reg regOp, const OpArg& arg);
|
||||
void MOVUPS(const OpArg& arg, X64Reg regOp);
|
||||
void MOVUPD(const OpArg& arg, X64Reg regOp);
|
||||
|
||||
void MOVDQA(X64Reg regOp, OpArg arg);
|
||||
void MOVDQA(OpArg arg, X64Reg regOp);
|
||||
void MOVDQU(X64Reg regOp, OpArg arg);
|
||||
void MOVDQU(OpArg arg, X64Reg regOp);
|
||||
void MOVDQA(X64Reg regOp, const OpArg& arg);
|
||||
void MOVDQA(const OpArg& arg, X64Reg regOp);
|
||||
void MOVDQU(X64Reg regOp, const OpArg& arg);
|
||||
void MOVDQU(const OpArg& arg, X64Reg regOp);
|
||||
|
||||
void MOVSS(X64Reg regOp, OpArg arg);
|
||||
void MOVSD(X64Reg regOp, OpArg arg);
|
||||
void MOVSS(OpArg arg, X64Reg regOp);
|
||||
void MOVSD(OpArg arg, X64Reg regOp);
|
||||
void MOVSS(X64Reg regOp, const OpArg& arg);
|
||||
void MOVSD(X64Reg regOp, const OpArg& arg);
|
||||
void MOVSS(const OpArg& arg, X64Reg regOp);
|
||||
void MOVSD(const OpArg& arg, X64Reg regOp);
|
||||
|
||||
void MOVLPS(X64Reg regOp, OpArg arg);
|
||||
void MOVLPD(X64Reg regOp, OpArg arg);
|
||||
void MOVLPS(OpArg arg, X64Reg regOp);
|
||||
void MOVLPD(OpArg arg, X64Reg regOp);
|
||||
void MOVLPS(X64Reg regOp, const OpArg& arg);
|
||||
void MOVLPD(X64Reg regOp, const OpArg& arg);
|
||||
void MOVLPS(const OpArg& arg, X64Reg regOp);
|
||||
void MOVLPD(const OpArg& arg, X64Reg regOp);
|
||||
|
||||
void MOVHPS(X64Reg regOp, OpArg arg);
|
||||
void MOVHPD(X64Reg regOp, OpArg arg);
|
||||
void MOVHPS(OpArg arg, X64Reg regOp);
|
||||
void MOVHPD(OpArg arg, X64Reg regOp);
|
||||
void MOVHPS(X64Reg regOp, const OpArg& arg);
|
||||
void MOVHPD(X64Reg regOp, const OpArg& arg);
|
||||
void MOVHPS(const OpArg& arg, X64Reg regOp);
|
||||
void MOVHPD(const OpArg& arg, X64Reg regOp);
|
||||
|
||||
void MOVHLPS(X64Reg regOp1, X64Reg regOp2);
|
||||
void MOVLHPS(X64Reg regOp1, X64Reg regOp2);
|
||||
@ -661,110 +661,110 @@ public:
|
||||
// one is the xmm reg.
|
||||
// ie: "MOVD_xmm(eax, R(xmm1))" generates incorrect code (movd xmm0, rcx)
|
||||
// use "MOVD_xmm(R(eax), xmm1)" instead.
|
||||
void MOVD_xmm(X64Reg dest, const OpArg &arg);
|
||||
void MOVD_xmm(X64Reg dest, const OpArg& arg);
|
||||
void MOVQ_xmm(X64Reg dest, OpArg arg);
|
||||
void MOVD_xmm(const OpArg &arg, X64Reg src);
|
||||
void MOVD_xmm(const OpArg& arg, X64Reg src);
|
||||
void MOVQ_xmm(OpArg arg, X64Reg src);
|
||||
|
||||
// SSE/SSE2: Generates a mask from the high bits of the components of the packed register in question.
|
||||
void MOVMSKPS(X64Reg dest, OpArg arg);
|
||||
void MOVMSKPD(X64Reg dest, OpArg arg);
|
||||
void MOVMSKPS(X64Reg dest, const OpArg& arg);
|
||||
void MOVMSKPD(X64Reg dest, const OpArg& arg);
|
||||
|
||||
// SSE2: Selective byte store, mask in src register. EDI/RDI specifies store address. This is a weird one.
|
||||
void MASKMOVDQU(X64Reg dest, X64Reg src);
|
||||
void LDDQU(X64Reg dest, OpArg src);
|
||||
void LDDQU(X64Reg dest, const OpArg& src);
|
||||
|
||||
// SSE/SSE2: Data type conversions.
|
||||
void CVTPS2PD(X64Reg dest, OpArg src);
|
||||
void CVTPD2PS(X64Reg dest, OpArg src);
|
||||
void CVTSS2SD(X64Reg dest, OpArg src);
|
||||
void CVTSI2SS(X64Reg dest, OpArg src);
|
||||
void CVTSD2SS(X64Reg dest, OpArg src);
|
||||
void CVTSI2SD(X64Reg dest, OpArg src);
|
||||
void CVTDQ2PD(X64Reg regOp, OpArg arg);
|
||||
void CVTPD2DQ(X64Reg regOp, OpArg arg);
|
||||
void CVTDQ2PS(X64Reg regOp, OpArg arg);
|
||||
void CVTPS2DQ(X64Reg regOp, OpArg arg);
|
||||
void CVTPS2PD(X64Reg dest, const OpArg& src);
|
||||
void CVTPD2PS(X64Reg dest, const OpArg& src);
|
||||
void CVTSS2SD(X64Reg dest, const OpArg& src);
|
||||
void CVTSI2SS(X64Reg dest, const OpArg& src);
|
||||
void CVTSD2SS(X64Reg dest, const OpArg& src);
|
||||
void CVTSI2SD(X64Reg dest, const OpArg& src);
|
||||
void CVTDQ2PD(X64Reg regOp, const OpArg& arg);
|
||||
void CVTPD2DQ(X64Reg regOp, const OpArg& arg);
|
||||
void CVTDQ2PS(X64Reg regOp, const OpArg& arg);
|
||||
void CVTPS2DQ(X64Reg regOp, const OpArg& arg);
|
||||
|
||||
void CVTTPS2DQ(X64Reg regOp, OpArg arg);
|
||||
void CVTTPD2DQ(X64Reg regOp, OpArg arg);
|
||||
void CVTTPS2DQ(X64Reg regOp, const OpArg& arg);
|
||||
void CVTTPD2DQ(X64Reg regOp, const OpArg& arg);
|
||||
|
||||
// Destinations are X64 regs (rax, rbx, ...) for these instructions.
|
||||
void CVTSS2SI(X64Reg xregdest, OpArg src);
|
||||
void CVTSD2SI(X64Reg xregdest, OpArg src);
|
||||
void CVTTSS2SI(X64Reg xregdest, OpArg arg);
|
||||
void CVTTSD2SI(X64Reg xregdest, OpArg arg);
|
||||
void CVTSS2SI(X64Reg xregdest, const OpArg& src);
|
||||
void CVTSD2SI(X64Reg xregdest, const OpArg& src);
|
||||
void CVTTSS2SI(X64Reg xregdest, const OpArg& arg);
|
||||
void CVTTSD2SI(X64Reg xregdest, const OpArg& arg);
|
||||
|
||||
// SSE2: Packed integer instructions
|
||||
void PACKSSDW(X64Reg dest, OpArg arg);
|
||||
void PACKSSWB(X64Reg dest, OpArg arg);
|
||||
void PACKUSDW(X64Reg dest, OpArg arg);
|
||||
void PACKUSWB(X64Reg dest, OpArg arg);
|
||||
void PACKSSDW(X64Reg dest, const OpArg& arg);
|
||||
void PACKSSWB(X64Reg dest, const OpArg& arg);
|
||||
void PACKUSDW(X64Reg dest, const OpArg& arg);
|
||||
void PACKUSWB(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PUNPCKLBW(X64Reg dest, const OpArg &arg);
|
||||
void PUNPCKLWD(X64Reg dest, const OpArg &arg);
|
||||
void PUNPCKLDQ(X64Reg dest, const OpArg &arg);
|
||||
void PUNPCKLQDQ(X64Reg dest, const OpArg &arg);
|
||||
void PUNPCKLBW(X64Reg dest, const OpArg& arg);
|
||||
void PUNPCKLWD(X64Reg dest, const OpArg& arg);
|
||||
void PUNPCKLDQ(X64Reg dest, const OpArg& arg);
|
||||
void PUNPCKLQDQ(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PTEST(X64Reg dest, OpArg arg);
|
||||
void PAND(X64Reg dest, OpArg arg);
|
||||
void PANDN(X64Reg dest, OpArg arg);
|
||||
void PXOR(X64Reg dest, OpArg arg);
|
||||
void POR(X64Reg dest, OpArg arg);
|
||||
void PTEST(X64Reg dest, const OpArg& arg);
|
||||
void PAND(X64Reg dest, const OpArg& arg);
|
||||
void PANDN(X64Reg dest, const OpArg& arg);
|
||||
void PXOR(X64Reg dest, const OpArg& arg);
|
||||
void POR(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PADDB(X64Reg dest, OpArg arg);
|
||||
void PADDW(X64Reg dest, OpArg arg);
|
||||
void PADDD(X64Reg dest, OpArg arg);
|
||||
void PADDQ(X64Reg dest, OpArg arg);
|
||||
void PADDB(X64Reg dest, const OpArg& arg);
|
||||
void PADDW(X64Reg dest, const OpArg& arg);
|
||||
void PADDD(X64Reg dest, const OpArg& arg);
|
||||
void PADDQ(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PADDSB(X64Reg dest, OpArg arg);
|
||||
void PADDSW(X64Reg dest, OpArg arg);
|
||||
void PADDUSB(X64Reg dest, OpArg arg);
|
||||
void PADDUSW(X64Reg dest, OpArg arg);
|
||||
void PADDSB(X64Reg dest, const OpArg& arg);
|
||||
void PADDSW(X64Reg dest, const OpArg& arg);
|
||||
void PADDUSB(X64Reg dest, const OpArg& arg);
|
||||
void PADDUSW(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PSUBB(X64Reg dest, OpArg arg);
|
||||
void PSUBW(X64Reg dest, OpArg arg);
|
||||
void PSUBD(X64Reg dest, OpArg arg);
|
||||
void PSUBQ(X64Reg dest, OpArg arg);
|
||||
void PSUBB(X64Reg dest, const OpArg& arg);
|
||||
void PSUBW(X64Reg dest, const OpArg& arg);
|
||||
void PSUBD(X64Reg dest, const OpArg& arg);
|
||||
void PSUBQ(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PSUBSB(X64Reg dest, OpArg arg);
|
||||
void PSUBSW(X64Reg dest, OpArg arg);
|
||||
void PSUBUSB(X64Reg dest, OpArg arg);
|
||||
void PSUBUSW(X64Reg dest, OpArg arg);
|
||||
void PSUBSB(X64Reg dest, const OpArg& arg);
|
||||
void PSUBSW(X64Reg dest, const OpArg& arg);
|
||||
void PSUBUSB(X64Reg dest, const OpArg& arg);
|
||||
void PSUBUSW(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PAVGB(X64Reg dest, OpArg arg);
|
||||
void PAVGW(X64Reg dest, OpArg arg);
|
||||
void PAVGB(X64Reg dest, const OpArg& arg);
|
||||
void PAVGW(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PCMPEQB(X64Reg dest, OpArg arg);
|
||||
void PCMPEQW(X64Reg dest, OpArg arg);
|
||||
void PCMPEQD(X64Reg dest, OpArg arg);
|
||||
void PCMPEQB(X64Reg dest, const OpArg& arg);
|
||||
void PCMPEQW(X64Reg dest, const OpArg& arg);
|
||||
void PCMPEQD(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PCMPGTB(X64Reg dest, OpArg arg);
|
||||
void PCMPGTW(X64Reg dest, OpArg arg);
|
||||
void PCMPGTD(X64Reg dest, OpArg arg);
|
||||
void PCMPGTB(X64Reg dest, const OpArg& arg);
|
||||
void PCMPGTW(X64Reg dest, const OpArg& arg);
|
||||
void PCMPGTD(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PEXTRW(X64Reg dest, OpArg arg, u8 subreg);
|
||||
void PINSRW(X64Reg dest, OpArg arg, u8 subreg);
|
||||
void PEXTRW(X64Reg dest, const OpArg& arg, u8 subreg);
|
||||
void PINSRW(X64Reg dest, const OpArg& arg, u8 subreg);
|
||||
|
||||
void PMADDWD(X64Reg dest, OpArg arg);
|
||||
void PSADBW(X64Reg dest, OpArg arg);
|
||||
void PMADDWD(X64Reg dest, const OpArg& arg);
|
||||
void PSADBW(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PMAXSW(X64Reg dest, OpArg arg);
|
||||
void PMAXUB(X64Reg dest, OpArg arg);
|
||||
void PMINSW(X64Reg dest, OpArg arg);
|
||||
void PMINUB(X64Reg dest, OpArg arg);
|
||||
void PMAXSW(X64Reg dest, const OpArg& arg);
|
||||
void PMAXUB(X64Reg dest, const OpArg& arg);
|
||||
void PMINSW(X64Reg dest, const OpArg& arg);
|
||||
void PMINUB(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PMOVMSKB(X64Reg dest, OpArg arg);
|
||||
void PSHUFD(X64Reg dest, OpArg arg, u8 shuffle);
|
||||
void PSHUFB(X64Reg dest, OpArg arg);
|
||||
void PMOVMSKB(X64Reg dest, const OpArg& arg);
|
||||
void PSHUFD(X64Reg dest, const OpArg& arg, u8 shuffle);
|
||||
void PSHUFB(X64Reg dest, const OpArg& arg);
|
||||
|
||||
void PSHUFLW(X64Reg dest, OpArg arg, u8 shuffle);
|
||||
void PSHUFHW(X64Reg dest, OpArg arg, u8 shuffle);
|
||||
void PSHUFLW(X64Reg dest, const OpArg& arg, u8 shuffle);
|
||||
void PSHUFHW(X64Reg dest, const OpArg& arg, u8 shuffle);
|
||||
|
||||
void PSRLW(X64Reg reg, int shift);
|
||||
void PSRLD(X64Reg reg, int shift);
|
||||
void PSRLQ(X64Reg reg, int shift);
|
||||
void PSRLQ(X64Reg reg, OpArg arg);
|
||||
void PSRLQ(X64Reg reg, const OpArg& arg);
|
||||
void PSRLDQ(X64Reg reg, int shift);
|
||||
|
||||
void PSLLW(X64Reg reg, int shift);
|
||||
@ -776,162 +776,162 @@ public:
|
||||
void PSRAD(X64Reg reg, int shift);
|
||||
|
||||
// SSE4: data type conversions
|
||||
void PMOVSXBW(X64Reg dest, OpArg arg);
|
||||
void PMOVSXBD(X64Reg dest, OpArg arg);
|
||||
void PMOVSXBQ(X64Reg dest, OpArg arg);
|
||||
void PMOVSXWD(X64Reg dest, OpArg arg);
|
||||
void PMOVSXWQ(X64Reg dest, OpArg arg);
|
||||
void PMOVSXDQ(X64Reg dest, OpArg arg);
|
||||
void PMOVZXBW(X64Reg dest, OpArg arg);
|
||||
void PMOVZXBD(X64Reg dest, OpArg arg);
|
||||
void PMOVZXBQ(X64Reg dest, OpArg arg);
|
||||
void PMOVZXWD(X64Reg dest, OpArg arg);
|
||||
void PMOVZXWQ(X64Reg dest, OpArg arg);
|
||||
void PMOVZXDQ(X64Reg dest, OpArg arg);
|
||||
void PMOVSXBW(X64Reg dest, const OpArg& arg);
|
||||
void PMOVSXBD(X64Reg dest, const OpArg& arg);
|
||||
void PMOVSXBQ(X64Reg dest, const OpArg& arg);
|
||||
void PMOVSXWD(X64Reg dest, const OpArg& arg);
|
||||
void PMOVSXWQ(X64Reg dest, const OpArg& arg);
|
||||
void PMOVSXDQ(X64Reg dest, const OpArg& arg);
|
||||
void PMOVZXBW(X64Reg dest, const OpArg& arg);
|
||||
void PMOVZXBD(X64Reg dest, const OpArg& arg);
|
||||
void PMOVZXBQ(X64Reg dest, const OpArg& arg);
|
||||
void PMOVZXWD(X64Reg dest, const OpArg& arg);
|
||||
void PMOVZXWQ(X64Reg dest, const OpArg& arg);
|
||||
void PMOVZXDQ(X64Reg dest, const OpArg& arg);
|
||||
|
||||
// SSE4: blend instructions
|
||||
void PBLENDVB(X64Reg dest, OpArg arg);
|
||||
void BLENDVPS(X64Reg dest, OpArg arg);
|
||||
void BLENDVPD(X64Reg dest, OpArg arg);
|
||||
void BLENDPS(X64Reg dest, OpArg arg, u8 blend);
|
||||
void BLENDPD(X64Reg dest, OpArg arg, u8 blend);
|
||||
void PBLENDVB(X64Reg dest, const OpArg& arg);
|
||||
void BLENDVPS(X64Reg dest, const OpArg& arg);
|
||||
void BLENDVPD(X64Reg dest, const OpArg& arg);
|
||||
void BLENDPS(X64Reg dest, const OpArg& arg, u8 blend);
|
||||
void BLENDPD(X64Reg dest, const OpArg& arg, u8 blend);
|
||||
|
||||
// AVX
|
||||
void VADDSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VSUBSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VMULSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VDIVSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VADDPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VSUBPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VMULPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VDIVPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VSQRTSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VCMPPD(X64Reg regOp1, X64Reg regOp2, OpArg arg, u8 compare);
|
||||
void VSHUFPD(X64Reg regOp1, X64Reg regOp2, OpArg arg, u8 shuffle);
|
||||
void VUNPCKLPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VUNPCKHPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VBLENDVPD(X64Reg regOp1, X64Reg regOp2, OpArg arg, X64Reg mask);
|
||||
void VADDSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VSUBSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VMULSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VDIVSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VADDPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VSUBPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VMULPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VDIVPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VSQRTSD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VCMPPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg, u8 compare);
|
||||
void VSHUFPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg, u8 shuffle);
|
||||
void VUNPCKLPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VUNPCKHPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VBLENDVPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg, X64Reg mask);
|
||||
|
||||
void VANDPS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VANDPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VANDNPS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VANDNPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VORPS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VORPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VXORPS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VXORPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VANDPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VANDPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VANDNPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VANDNPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VORPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VORPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VXORPS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VXORPD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
|
||||
void VPAND(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VPANDN(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VPOR(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VPXOR(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VPAND(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VPANDN(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VPOR(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VPXOR(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
|
||||
// FMA3
|
||||
void VFMADD132PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD213PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD231PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD132PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD213PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD231PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD132SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD213SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD231SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD132SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD213SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD231SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB132PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB213PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB231PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB132PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB213PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB231PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB132SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB213SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB231SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB132SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB213SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUB231SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD132PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD213PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD231PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD132PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD213PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD231PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD132SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD213SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD231SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD132SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD213SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMADD231SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB132PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB213PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB231PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB132PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB213PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB231PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB132SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB213SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB231SS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB132SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB213SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFNMSUB231SD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADDSUB132PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADDSUB213PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADDSUB231PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADDSUB132PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADDSUB213PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADDSUB231PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUBADD132PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUBADD213PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUBADD231PS(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUBADD132PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUBADD213PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMSUBADD231PD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void VFMADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADD231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUB231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMADD231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB132SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB213SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB231SS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB132SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB213SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFNMSUB231SD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADDSUB132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADDSUB213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADDSUB231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADDSUB132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADDSUB213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMADDSUB231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUBADD132PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUBADD213PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUBADD231PS(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUBADD132PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUBADD213PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void VFMSUBADD231PD(X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
|
||||
// VEX GPR instructions
|
||||
void SARX(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
|
||||
void SHLX(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
|
||||
void SHRX(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
|
||||
void RORX(int bits, X64Reg regOp, OpArg arg, u8 rotate);
|
||||
void PEXT(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void PDEP(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void MULX(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void BZHI(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
|
||||
void BLSR(int bits, X64Reg regOp, OpArg arg);
|
||||
void BLSMSK(int bits, X64Reg regOp, OpArg arg);
|
||||
void BLSI(int bits, X64Reg regOp, OpArg arg);
|
||||
void BEXTR(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
|
||||
void ANDN(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
|
||||
void SARX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2);
|
||||
void SHLX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2);
|
||||
void SHRX(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2);
|
||||
void RORX(int bits, X64Reg regOp, const OpArg& arg, u8 rotate);
|
||||
void PEXT(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void PDEP(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void MULX(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
void BZHI(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2);
|
||||
void BLSR(int bits, X64Reg regOp, const OpArg& arg);
|
||||
void BLSMSK(int bits, X64Reg regOp, const OpArg& arg);
|
||||
void BLSI(int bits, X64Reg regOp, const OpArg& arg);
|
||||
void BEXTR(int bits, X64Reg regOp1, const OpArg& arg, X64Reg regOp2);
|
||||
void ANDN(int bits, X64Reg regOp1, X64Reg regOp2, const OpArg& arg);
|
||||
|
||||
void RDTSC();
|
||||
|
||||
// Utility functions
|
||||
// The difference between this and CALL is that this aligns the stack
|
||||
// where appropriate.
|
||||
void ABI_CallFunction(const void *func);
|
||||
void ABI_CallFunction(const void* func);
|
||||
|
||||
void ABI_CallFunctionC16(const void *func, u16 param1);
|
||||
void ABI_CallFunctionCC16(const void *func, u32 param1, u16 param2);
|
||||
void ABI_CallFunctionC16(const void* func, u16 param1);
|
||||
void ABI_CallFunctionCC16(const void* func, u32 param1, u16 param2);
|
||||
|
||||
// These only support u32 parameters, but that's enough for a lot of uses.
|
||||
// These will destroy the 1 or 2 first "parameter regs".
|
||||
void ABI_CallFunctionC(const void *func, u32 param1);
|
||||
void ABI_CallFunctionCC(const void *func, u32 param1, u32 param2);
|
||||
void ABI_CallFunctionCP(const void *func, u32 param1, void *param2);
|
||||
void ABI_CallFunctionCCC(const void *func, u32 param1, u32 param2, u32 param3);
|
||||
void ABI_CallFunctionCCP(const void *func, u32 param1, u32 param2, void *param3);
|
||||
void ABI_CallFunctionCCCP(const void *func, u32 param1, u32 param2,u32 param3, void *param4);
|
||||
void ABI_CallFunctionPC(const void *func, void *param1, u32 param2);
|
||||
void ABI_CallFunctionPPC(const void *func, void *param1, void *param2, u32 param3);
|
||||
void ABI_CallFunctionAC(int bits, const void *func, const OpArg &arg1, u32 param2);
|
||||
void ABI_CallFunctionA(int bits, const void *func, const OpArg &arg1);
|
||||
void ABI_CallFunctionC(const void* func, u32 param1);
|
||||
void ABI_CallFunctionCC(const void* func, u32 param1, u32 param2);
|
||||
void ABI_CallFunctionCP(const void* func, u32 param1, void* param2);
|
||||
void ABI_CallFunctionCCC(const void* func, u32 param1, u32 param2, u32 param3);
|
||||
void ABI_CallFunctionCCP(const void* func, u32 param1, u32 param2, void* param3);
|
||||
void ABI_CallFunctionCCCP(const void* func, u32 param1, u32 param2,u32 param3, void* param4);
|
||||
void ABI_CallFunctionPC(const void* func, void* param1, u32 param2);
|
||||
void ABI_CallFunctionPPC(const void* func, void* param1, void* param2, u32 param3);
|
||||
void ABI_CallFunctionAC(int bits, const void* func, const OpArg& arg1, u32 param2);
|
||||
void ABI_CallFunctionA(int bits, const void* func, const OpArg& arg1);
|
||||
|
||||
// Pass a register as a parameter.
|
||||
void ABI_CallFunctionR(const void *func, X64Reg reg1);
|
||||
void ABI_CallFunctionRR(const void *func, X64Reg reg1, X64Reg reg2);
|
||||
void ABI_CallFunctionR(const void* func, X64Reg reg1);
|
||||
void ABI_CallFunctionRR(const void* func, X64Reg reg1, X64Reg reg2);
|
||||
|
||||
// Helper method for the above, or can be used separately.
|
||||
void MOVTwo(int bits, Gen::X64Reg dst1, Gen::X64Reg src1, s32 offset, Gen::X64Reg dst2, Gen::X64Reg src2);
|
||||
void MOVTwo(int bits, X64Reg dst1, X64Reg src1, s32 offset, X64Reg dst2,X64Reg src2);
|
||||
|
||||
// Saves/restores the registers and adjusts the stack to be aligned as
|
||||
// required by the ABI, where the previous alignment was as specified.
|
||||
|
@ -140,8 +140,8 @@ public:
|
||||
typedef u32 (*Operation)(u32 a, u32 b);
|
||||
void regimmop(int d, int a, bool binary, u32 value, Operation doop, void (Gen::XEmitter::*op)(int, const Gen::OpArg&, const Gen::OpArg&),
|
||||
bool Rc = false, bool carry = false);
|
||||
void fp_tri_op(int d, int a, int b, bool reversible, bool single, void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, Gen::OpArg),
|
||||
void (Gen::XEmitter::*sseOp)(Gen::X64Reg, Gen::OpArg), bool packed = false, bool roundRHS = false);
|
||||
void fp_tri_op(int d, int a, int b, bool reversible, bool single, void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, const Gen::OpArg&),
|
||||
void (Gen::XEmitter::*sseOp)(Gen::X64Reg, const Gen::OpArg&), bool packed = false, bool roundRHS = false);
|
||||
void FloatCompare(UGeckoInstruction inst, bool upper = false);
|
||||
|
||||
// OPCODES
|
||||
|
@ -16,8 +16,8 @@ static const u64 GC_ALIGNED16(psAbsMask[2]) = {0x7FFFFFFFFFFFFFFFULL, 0xFFFFFFF
|
||||
static const u64 GC_ALIGNED16(psAbsMask2[2]) = {0x7FFFFFFFFFFFFFFFULL, 0x7FFFFFFFFFFFFFFFULL};
|
||||
static const double GC_ALIGNED16(half_qnan_and_s32_max[2]) = {0x7FFFFFFF, -0x80000};
|
||||
|
||||
void Jit64::fp_tri_op(int d, int a, int b, bool reversible, bool single, void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg),
|
||||
void (XEmitter::*sseOp)(X64Reg, OpArg), bool packed, bool roundRHS)
|
||||
void Jit64::fp_tri_op(int d, int a, int b, bool reversible, bool single, void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&),
|
||||
void (XEmitter::*sseOp)(X64Reg, const OpArg&), bool packed, bool roundRHS)
|
||||
{
|
||||
fpr.Lock(d, a, b);
|
||||
fpr.BindToRegister(d, d == a || d == b || !single);
|
||||
|
@ -488,7 +488,7 @@ static void regEmitBinInst(RegInfo& RI, InstLoc I,
|
||||
regNormalRegClear(RI, I);
|
||||
}
|
||||
|
||||
static void fregEmitBinInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(X64Reg, OpArg))
|
||||
static void fregEmitBinInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(X64Reg, const OpArg&))
|
||||
{
|
||||
X64Reg reg;
|
||||
|
||||
@ -640,7 +640,7 @@ static void regEmitMemStore(RegInfo& RI, InstLoc I, unsigned Size)
|
||||
regClearInst(RI, getOp1(I));
|
||||
}
|
||||
|
||||
static void regEmitShiftInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(int, OpArg, OpArg))
|
||||
static void regEmitShiftInst(RegInfo& RI, InstLoc I, void (JitIL::*op)(int, const OpArg&, const OpArg&))
|
||||
{
|
||||
X64Reg reg = regBinLHSReg(RI, I);
|
||||
|
||||
|
@ -156,7 +156,7 @@ private:
|
||||
|
||||
// Generate the proper MOV instruction depending on whether the read should
|
||||
// be sign extended or zero extended.
|
||||
void MoveOpArgToReg(int sbits, Gen::OpArg arg)
|
||||
void MoveOpArgToReg(int sbits, const Gen::OpArg& arg)
|
||||
{
|
||||
if (m_sign_extend)
|
||||
m_code->MOVSX(32, sbits, m_dst_reg, arg);
|
||||
@ -233,7 +233,7 @@ void EmuCodeBlock::MMIOLoadToReg(MMIO::Mapping* mmio, Gen::X64Reg reg_value,
|
||||
}
|
||||
}
|
||||
|
||||
FixupBranch EmuCodeBlock::CheckIfSafeAddress(OpArg reg_value, X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask)
|
||||
FixupBranch EmuCodeBlock::CheckIfSafeAddress(const OpArg& reg_value, X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask)
|
||||
{
|
||||
registers_in_use[reg_addr] = true;
|
||||
if (reg_value.IsSimpleReg())
|
||||
@ -397,7 +397,7 @@ void EmuCodeBlock::SafeLoadToReg(X64Reg reg_value, const Gen::OpArg & opAddress,
|
||||
}
|
||||
}
|
||||
|
||||
static OpArg SwapImmediate(int accessSize, OpArg reg_value)
|
||||
static OpArg SwapImmediate(int accessSize, const OpArg& reg_value)
|
||||
{
|
||||
if (accessSize == 32)
|
||||
return Imm32(Common::swap32(reg_value.Imm32()));
|
||||
@ -640,7 +640,7 @@ void EmuCodeBlock::WriteToConstRamAddress(int accessSize, OpArg arg, u32 address
|
||||
MOV(accessSize, MRegSum(RMEM, RSCRATCH2), R(reg));
|
||||
}
|
||||
|
||||
void EmuCodeBlock::ForceSinglePrecision(X64Reg output, OpArg input, bool packed, bool duplicate)
|
||||
void EmuCodeBlock::ForceSinglePrecision(X64Reg output, const OpArg& input, bool packed, bool duplicate)
|
||||
{
|
||||
// Most games don't need these. Zelda requires it though - some platforms get stuck without them.
|
||||
if (jit->jo.accurateSinglePrecision)
|
||||
@ -668,8 +668,8 @@ void EmuCodeBlock::ForceSinglePrecision(X64Reg output, OpArg input, bool packed,
|
||||
}
|
||||
|
||||
// Abstract between AVX and SSE: automatically handle 3-operand instructions
|
||||
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg), void (XEmitter::*sseOp)(X64Reg, OpArg),
|
||||
X64Reg regOp, OpArg arg1, OpArg arg2, bool packed, bool reversible)
|
||||
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&), void (XEmitter::*sseOp)(X64Reg, const OpArg&),
|
||||
X64Reg regOp, const OpArg& arg1, const OpArg& arg2, bool packed, bool reversible)
|
||||
{
|
||||
if (arg1.IsSimpleReg() && regOp == arg1.GetSimpleReg())
|
||||
{
|
||||
@ -715,8 +715,8 @@ void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg), void (
|
||||
}
|
||||
|
||||
// Abstract between AVX and SSE: automatically handle 3-operand instructions
|
||||
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, OpArg, u8), void (XEmitter::*sseOp)(X64Reg, OpArg, u8),
|
||||
X64Reg regOp, OpArg arg1, OpArg arg2, u8 imm)
|
||||
void EmuCodeBlock::avx_op(void (XEmitter::*avxOp)(X64Reg, X64Reg, const OpArg&, u8), void (XEmitter::*sseOp)(X64Reg, const OpArg&, u8),
|
||||
X64Reg regOp, const OpArg& arg1, const OpArg& arg2, u8 imm)
|
||||
{
|
||||
if (arg1.IsSimpleReg() && regOp == arg1.GetSimpleReg())
|
||||
{
|
||||
@ -755,7 +755,7 @@ static const u64 GC_ALIGNED16(psRoundBit[2]) = {0x8000000, 0x8000000};
|
||||
// a single precision multiply. To be precise, it drops the low 28 bits of the mantissa,
|
||||
// rounding to nearest as it does.
|
||||
// It needs a temp, so let the caller pass that in.
|
||||
void EmuCodeBlock::Force25BitPrecision(X64Reg output, OpArg input, X64Reg tmp)
|
||||
void EmuCodeBlock::Force25BitPrecision(X64Reg output, const OpArg& input, X64Reg tmp)
|
||||
{
|
||||
if (jit->jo.accurateSinglePrecision)
|
||||
{
|
||||
|
@ -68,7 +68,7 @@ public:
|
||||
SetCodePtr(nearcode);
|
||||
}
|
||||
|
||||
Gen::FixupBranch CheckIfSafeAddress(Gen::OpArg reg_value, Gen::X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask);
|
||||
Gen::FixupBranch CheckIfSafeAddress(const Gen::OpArg& reg_value, Gen::X64Reg reg_addr, BitSet32 registers_in_use, u32 mem_mask);
|
||||
void UnsafeLoadRegToReg(Gen::X64Reg reg_addr, Gen::X64Reg reg_value, int accessSize, s32 offset = 0, bool signExtend = false);
|
||||
void UnsafeLoadRegToRegNoSwap(Gen::X64Reg reg_addr, Gen::X64Reg reg_value, int accessSize, s32 offset, bool signExtend = false);
|
||||
// these return the address of the MOV, for backpatching
|
||||
@ -116,13 +116,13 @@ public:
|
||||
void JitSetCAIf(Gen::CCFlags conditionCode);
|
||||
void JitClearCA();
|
||||
|
||||
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, Gen::OpArg), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, Gen::OpArg),
|
||||
Gen::X64Reg regOp, Gen::OpArg arg1, Gen::OpArg arg2, bool packed = true, bool reversible = false);
|
||||
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, Gen::OpArg, u8), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, Gen::OpArg, u8),
|
||||
Gen::X64Reg regOp, Gen::OpArg arg1, Gen::OpArg arg2, u8 imm);
|
||||
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, const Gen::OpArg&), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, const Gen::OpArg&),
|
||||
Gen::X64Reg regOp, const Gen::OpArg& arg1, const Gen::OpArg& arg2, bool packed = true, bool reversible = false);
|
||||
void avx_op(void (Gen::XEmitter::*avxOp)(Gen::X64Reg, Gen::X64Reg, const Gen::OpArg&, u8), void (Gen::XEmitter::*sseOp)(Gen::X64Reg, const Gen::OpArg&, u8),
|
||||
Gen::X64Reg regOp, const Gen::OpArg& arg1, const Gen::OpArg& arg2, u8 imm);
|
||||
|
||||
void ForceSinglePrecision(Gen::X64Reg output, Gen::OpArg input, bool packed = true, bool duplicate = false);
|
||||
void Force25BitPrecision(Gen::X64Reg output, Gen::OpArg input, Gen::X64Reg tmp);
|
||||
void ForceSinglePrecision(Gen::X64Reg output, const Gen::OpArg& input, bool packed = true, bool duplicate = false);
|
||||
void Force25BitPrecision(Gen::X64Reg output, const Gen::OpArg& input, Gen::X64Reg tmp);
|
||||
|
||||
// RSCRATCH might get trashed
|
||||
void ConvertSingleToDouble(Gen::X64Reg dst, Gen::X64Reg src, bool src_is_gpr = false);
|
||||
|
Loading…
Reference in New Issue
Block a user