Skip to content

Commit

Permalink
VTLB: disable MMX optimization
Browse files Browse the repository at this point in the history
memory copy will be done in SSE or X86 only. It is very unlikely that
it was used anyway (need 64 bits transfer and no XMM register available)

Remove the now useless _allocMMXreg and _getFreeMMXreg too
  • Loading branch information
gregory38 committed Feb 7, 2016
1 parent 15390cd commit a0e619b
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 165 deletions.
2 changes: 0 additions & 2 deletions pcsx2/x86/iCore.h
Expand Up @@ -312,8 +312,6 @@ struct _mmxregs {
};

void _initMMXregs();
int _getFreeMMXreg();
int _allocMMXreg(int MMXreg, int reg, int mode);
int _checkMMXreg(int reg, int mode);
void _clearNeededMMXregs();
void _deleteMMXreg(int reg, int flush);
Expand Down
144 changes: 0 additions & 144 deletions pcsx2/x86/ix86-32/iCore-32.cpp
Expand Up @@ -500,150 +500,6 @@ __fi void* _MMXGetAddr(int reg)
return NULL;
}

int _getFreeMMXreg()
{
uint i;
int tempi = -1;
u32 bestcount = 0x10000;

for (i=0; i<iREGCNT_MMX; i++) {
if (mmxregs[(s_mmxchecknext+i)%iREGCNT_MMX].inuse == 0) {
int ret = (s_mmxchecknext+i)%iREGCNT_MMX;
s_mmxchecknext = (s_mmxchecknext+i+1)%iREGCNT_MMX;
return ret;
}
}

// check for dead regs
for (i=0; i<iREGCNT_MMX; i++) {
if (mmxregs[i].needed) continue;
if (MMX_ISGPR(mmxregs[i].reg)) {
if( !(g_pCurInstInfo->regs[mmxregs[i].reg-MMX_GPR] & (EEINST_LIVE0)) ) {
_freeMMXreg(i);
return i;
}
if( !(g_pCurInstInfo->regs[mmxregs[i].reg-MMX_GPR]&EEINST_USED) ) {
_freeMMXreg(i);
return i;
}
}
}

// check for future xmm usage
for (i=0; i<iREGCNT_MMX; i++) {
if (mmxregs[i].needed) continue;
if (MMX_ISGPR(mmxregs[i].reg)) {
_freeMMXreg(i);
return i;
}
}

for (i=0; i<iREGCNT_MMX; i++) {
if (mmxregs[i].needed) continue;
if (mmxregs[i].reg != MMX_TEMP) {

if( mmxregs[i].counter < bestcount ) {
tempi = i;
bestcount = mmxregs[i].counter;
}
continue;
}

_freeMMXreg(i);
return i;
}

if( tempi != -1 ) {
_freeMMXreg(tempi);
return tempi;
}

pxFailDev( "mmx register allocation error" );
throw Exception::FailedToAllocateRegister();
}

int _allocMMXreg(int mmxreg, int reg, int mode)
{
uint i;

if( reg != MMX_TEMP ) {
for (i=0; i<iREGCNT_MMX; i++) {
if (mmxregs[i].inuse == 0 || mmxregs[i].reg != reg ) continue;

if( MMX_ISGPR(reg)) {
pxAssert( _checkXMMreg(XMMTYPE_GPRREG, reg-MMX_GPR, 0) == -1 );
}

mmxregs[i].needed = 1;

if( !(mmxregs[i].mode & MODE_READ) && (mode&MODE_READ) && reg != MMX_TEMP ) {

SetMMXstate();
if( reg == MMX_GPR ) {
// moving in 0s
xPXOR(xRegisterMMX(i), xRegisterMMX(i));
}
else {
if( MMX_ISGPR(reg) ) _flushConstReg(reg-MMX_GPR);
if( (mode & MODE_READHALF) || (MMX_IS32BITS(reg)&&(mode&MODE_READ)) )
xMOVDZX(xRegisterMMX(i), ptr[(_MMXGetAddr(reg))]);
else
xMOVQ(xRegisterMMX(i), ptr[(_MMXGetAddr(reg))]);
}

mmxregs[i].mode |= MODE_READ;
}

mmxregs[i].counter = g_mmxAllocCounter++;
mmxregs[i].mode|= mode;
return i;
}
}

if (mmxreg == -1)
mmxreg = _getFreeMMXreg();

mmxregs[mmxreg].inuse = 1;
mmxregs[mmxreg].reg = reg;
mmxregs[mmxreg].mode = mode&~MODE_READHALF;
mmxregs[mmxreg].needed = 1;
mmxregs[mmxreg].counter = g_mmxAllocCounter++;

SetMMXstate();
if( reg == MMX_GPR ) {
// moving in 0s
xPXOR(xRegisterMMX(mmxreg), xRegisterMMX(mmxreg));
}
else {
int xmmreg;
if( MMX_ISGPR(reg) && (xmmreg = _checkXMMreg(XMMTYPE_GPRREG, reg-MMX_GPR, 0)) >= 0 ) {
xMOVH.PS(ptr[(void*)((uptr)_MMXGetAddr(reg)+8)], xRegisterSSE(xmmreg));
if( mode & MODE_READ )
xMOVQ(xRegisterMMX(mmxreg), xRegisterSSE(xmmreg));

if( xmmregs[xmmreg].mode & MODE_WRITE )
mmxregs[mmxreg].mode |= MODE_WRITE;

// don't flush
xmmregs[xmmreg].inuse = 0;
}
else {
if( MMX_ISGPR(reg) ) {
if(mode&(MODE_READHALF|MODE_READ)) _flushConstReg(reg-MMX_GPR);
}

if( (mode & MODE_READHALF) || (MMX_IS32BITS(reg)&&(mode&MODE_READ)) ) {
xMOVDZX(xRegisterMMX(mmxreg), ptr[(_MMXGetAddr(reg))]);
}
else if( mode & MODE_READ ) {
xMOVQ(xRegisterMMX(mmxreg), ptr[(_MMXGetAddr(reg))]);
}
}
}

return mmxreg;
}

int _checkMMXreg(int reg, int mode)
{
uint i;
Expand Down
24 changes: 5 additions & 19 deletions pcsx2/x86/ix86-32/recVTLB.cpp
Expand Up @@ -68,11 +68,7 @@ static void iMOV128_SSE( const xIndirectVoid& destRm, const xIndirectVoid& srcRm
xMOVDQA( destRm, reg );
}

// Moves 64 bits of data from point B to point A, using either MMX, SSE, or x86 registers
// if neither MMX nor SSE is available to the task.
//
// Optimizations: This method uses MMX is the cpu is in MMX mode, or SSE if it's in FPU
// mode (saving on potential xEMMS uses).
// Moves 64 bits of data from point B to point A, using either SSE, or x86 registers
//
static void iMOV64_Smart( const xIndirectVoid& destRm, const xIndirectVoid& srcRm )
{
Expand All @@ -86,20 +82,10 @@ static void iMOV64_Smart( const xIndirectVoid& destRm, const xIndirectVoid& srcR
return;
}

if( _hasFreeMMXreg() )
{
xRegisterMMX reg( _allocMMXreg(-1, MMX_TEMP, 0) );
xMOVQ( reg, srcRm );
xMOVQ( destRm, reg );
_freeMMXreg( reg.Id );
}
else
{
xMOV( eax, srcRm );
xMOV( destRm, eax );
xMOV( eax, srcRm+4 );
xMOV( destRm+4, eax );
}
xMOV( eax, srcRm );
xMOV( destRm, eax );
xMOV( eax, srcRm+4 );
xMOV( destRm+4, eax );
}

/*
Expand Down

0 comments on commit a0e619b

Please sign in to comment.