Merge pull request #5250 from MerryMage/psq_st

Jit_LoadStorePaired: Make psq_st PIE-compliant
This commit is contained in:
Markus Wick 2017-04-12 20:52:59 +02:00 committed by GitHub
commit 60d8ee4916
4 changed files with 24 additions and 20 deletions

View File

@ -120,28 +120,29 @@ void XEmitter::ReserveCodeSpace(int bytes)
*code++ = 0xCC; *code++ = 0xCC;
} }
const u8* XEmitter::AlignCodeTo(size_t alignment)
{
_assert_msg_(DYNA_REC, alignment != 0 && (alignment & (alignment - 1)) == 0,
"Alignment must be power of two");
u64 c = reinterpret_cast<u64>(code) & (alignment - 1);
if (c)
ReserveCodeSpace(static_cast<int>(alignment - c));
return code;
}
const u8* XEmitter::AlignCode4() const u8* XEmitter::AlignCode4()
{ {
int c = int((u64)code & 3); return AlignCodeTo(4);
if (c)
ReserveCodeSpace(4 - c);
return code;
} }
const u8* XEmitter::AlignCode16() const u8* XEmitter::AlignCode16()
{ {
int c = int((u64)code & 15); return AlignCodeTo(16);
if (c)
ReserveCodeSpace(16 - c);
return code;
} }
const u8* XEmitter::AlignCodePage() const u8* XEmitter::AlignCodePage()
{ {
int c = int((u64)code & 4095); return AlignCodeTo(4096);
if (c)
ReserveCodeSpace(4096 - c);
return code;
} }
// This operation modifies flags; check to see the flags are locked. // This operation modifies flags; check to see the flags are locked.

View File

@ -412,6 +412,7 @@ public:
virtual ~XEmitter() {} virtual ~XEmitter() {}
void SetCodePtr(u8* ptr); void SetCodePtr(u8* ptr);
void ReserveCodeSpace(int bytes); void ReserveCodeSpace(int bytes);
const u8* AlignCodeTo(size_t alignment);
const u8* AlignCode4(); const u8* AlignCode4();
const u8* AlignCode16(); const u8* AlignCode16();
const u8* AlignCodePage(); const u8* AlignCodePage();

View File

@ -89,12 +89,12 @@ void Jit64::psq_stXX(UGeckoInstruction inst)
// 0b0011111100000111, or 0x3F07. // 0b0011111100000111, or 0x3F07.
MOV(32, R(RSCRATCH2), Imm32(0x3F07)); MOV(32, R(RSCRATCH2), Imm32(0x3F07));
AND(32, R(RSCRATCH2), PPCSTATE(spr[SPR_GQR0 + i])); AND(32, R(RSCRATCH2), PPCSTATE(spr[SPR_GQR0 + i]));
MOVZX(32, 8, RSCRATCH, R(RSCRATCH2)); LEA(64, RSCRATCH, M(w ? asm_routines.singleStoreQuantized : asm_routines.pairedStoreQuantized));
// 8-bit operations do not zero upper 32-bits of 64-bit registers.
if (w) // Here we know that RSCRATCH's least significant byte is zero.
CALLptr(MScaled(RSCRATCH, SCALE_8, PtrOffset(asm_routines.singleStoreQuantized))); OR(8, R(RSCRATCH), R(RSCRATCH2));
else SHL(8, R(RSCRATCH), Imm8(3));
CALLptr(MScaled(RSCRATCH, SCALE_8, PtrOffset(asm_routines.pairedStoreQuantized))); CALLptr(MatR(RSCRATCH));
} }
if (update && jo.memcheck) if (update && jo.memcheck)

View File

@ -243,7 +243,8 @@ constexpr std::array<u8, 8> sizes{{32, 0, 0, 0, 8, 16, 8, 16}};
void CommonAsmRoutines::GenQuantizedStores() void CommonAsmRoutines::GenQuantizedStores()
{ {
pairedStoreQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCode16())); // Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX).
pairedStoreQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCodeTo(256)));
ReserveCodeSpace(8 * sizeof(u8*)); ReserveCodeSpace(8 * sizeof(u8*));
for (int type = 0; type < 8; type++) for (int type = 0; type < 8; type++)
@ -253,7 +254,8 @@ void CommonAsmRoutines::GenQuantizedStores()
// See comment in header for in/outs. // See comment in header for in/outs.
void CommonAsmRoutines::GenQuantizedSingleStores() void CommonAsmRoutines::GenQuantizedSingleStores()
{ {
singleStoreQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCode16())); // Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX).
singleStoreQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCodeTo(256)));
ReserveCodeSpace(8 * sizeof(u8*)); ReserveCodeSpace(8 * sizeof(u8*));
for (int type = 0; type < 8; type++) for (int type = 0; type < 8; type++)