Skip to content

Commit

Permalink
Merge pull request #9670 from unknownbrackets/vertexjit
Browse files Browse the repository at this point in the history
Remove unused vertex decode steps, jit through s16
  • Loading branch information
hrydgard committed May 7, 2017
2 parents 42a6943 + 7699fa5 commit c55aa83
Show file tree
Hide file tree
Showing 10 changed files with 41 additions and 200 deletions.
6 changes: 4 additions & 2 deletions Core/MIPS/MIPSAsm.cpp
Expand Up @@ -89,8 +89,10 @@ bool MipsAssembleOpcode(const char* line, DebugInterface* cpu, u32 address)
args.silent = true;
args.memoryFile = &file;
args.errorsResult = &errors;

g_symbolMap->GetLabels(args.labels);

if (g_symbolMap) {
g_symbolMap->GetLabels(args.labels);
}

errorText = L"";
if (!runArmips(args))
Expand Down
41 changes: 17 additions & 24 deletions GPU/Common/VertexDecoderArm.cpp
Expand Up @@ -121,16 +121,13 @@ static const JitLookup jitLookup[] = {
{&VertexDecoder::Step_TcFloat, &VertexDecoderJitCache::Jit_TcFloat},
{&VertexDecoder::Step_TcU8ToFloat, &VertexDecoderJitCache::Jit_TcU8ToFloat},
{&VertexDecoder::Step_TcU16ToFloat, &VertexDecoderJitCache::Jit_TcU16ToFloat},
{&VertexDecoder::Step_TcU16Double, &VertexDecoderJitCache::Jit_TcU16Double},

{&VertexDecoder::Step_TcU8Prescale, &VertexDecoderJitCache::Jit_TcU8Prescale},
{&VertexDecoder::Step_TcU16Prescale, &VertexDecoderJitCache::Jit_TcU16Prescale},
{&VertexDecoder::Step_TcFloatPrescale, &VertexDecoderJitCache::Jit_TcFloatPrescale},

{&VertexDecoder::Step_TcU16Through, &VertexDecoderJitCache::Jit_TcU16Through},
{&VertexDecoder::Step_TcFloatThrough, &VertexDecoderJitCache::Jit_TcFloatThrough},
{&VertexDecoder::Step_TcU16ThroughDouble, &VertexDecoderJitCache::Jit_TcU16ThroughDouble},
// {&VertexDecoder::Step_TcU16ThroughToFloat, &VertexDecoderJitCache::Jit_TcU16ThroughToFloat},
{&VertexDecoder::Step_TcU16ThroughToFloat, &VertexDecoderJitCache::Jit_TcU16ThroughToFloat},

{&VertexDecoder::Step_NormalS8, &VertexDecoderJitCache::Jit_NormalS8},
{&VertexDecoder::Step_NormalS16, &VertexDecoderJitCache::Jit_NormalS16},
Expand Down Expand Up @@ -571,11 +568,10 @@ void VertexDecoderJitCache::Jit_TcFloat() {
STR(tempReg2, dstReg, dec_->decFmt.uvoff + 4);
}

void VertexDecoderJitCache::Jit_TcU16Through() {
void VertexDecoderJitCache::Jit_TcU16ThroughToFloat() {
LDRH(tempReg1, srcReg, dec_->tcoff);
LDRH(tempReg2, srcReg, dec_->tcoff + 2);

// TODO: Cleanup.
MOVP2R(scratchReg, &gstate_c.vertBounds.minU);

auto updateSide = [&](ARMReg r, CCFlags cc, u32 off) {
Expand All @@ -592,8 +588,21 @@ void VertexDecoderJitCache::Jit_TcU16Through() {
updateSide(tempReg2, CC_LT, offsetof(KnownVertexBounds, minV));
updateSide(tempReg2, CC_GT, offsetof(KnownVertexBounds, maxV));

ORR(tempReg1, tempReg1, Operand2(tempReg2, ST_LSL, 16));
STR(tempReg1, dstReg, dec_->decFmt.uvoff);
if (cpu_info.bNEON) {
ADD(scratchReg, srcReg, dec_->tcoff);
VLD1_lane(I_32, neonScratchReg, scratchReg, 0, false);
VMOVL(I_16 | I_UNSIGNED, neonScratchRegQ, neonScratchReg); // Widen to 32-bit
VCVT(F_32 | I_UNSIGNED, neonScratchRegQ, neonScratchRegQ);
ADD(scratchReg2, dstReg, dec_->decFmt.uvoff);
VST1(F_32, neonScratchReg, scratchReg2, 1, ALIGN_NONE);
} else {
VMOV(fpScratchReg, tempReg1);
VMOV(fpScratchReg2, tempReg2);
VCVT(fpScratchReg, fpScratchReg, TO_FLOAT);
VCVT(fpScratchReg2, fpScratchReg2, TO_FLOAT);
VSTR(fpScratchReg, dstReg, dec_->decFmt.uvoff);
VSTR(fpScratchReg2, dstReg, dec_->decFmt.uvoff + 4);
}
}

void VertexDecoderJitCache::Jit_TcFloatThrough() {
Expand All @@ -603,22 +612,6 @@ void VertexDecoderJitCache::Jit_TcFloatThrough() {
STR(tempReg2, dstReg, dec_->decFmt.uvoff + 4);
}

void VertexDecoderJitCache::Jit_TcU16Double() {
LDRH(tempReg1, srcReg, dec_->tcoff);
LDRH(tempReg2, srcReg, dec_->tcoff + 2);
LSL(tempReg1, tempReg1, 1);
ORR(tempReg1, tempReg1, Operand2(tempReg2, ST_LSL, 17));
STR(tempReg1, dstReg, dec_->decFmt.uvoff);
}

void VertexDecoderJitCache::Jit_TcU16ThroughDouble() {
LDRH(tempReg1, srcReg, dec_->tcoff);
LDRH(tempReg2, srcReg, dec_->tcoff + 2);
LSL(tempReg1, tempReg1, 1);
ORR(tempReg1, tempReg1, Operand2(tempReg2, ST_LSL, 17));
STR(tempReg1, dstReg, dec_->decFmt.uvoff);
}

void VertexDecoderJitCache::Jit_TcU8Prescale() {
if (cpu_info.bNEON) {
// TODO: Needs testing
Expand Down
29 changes: 6 additions & 23 deletions GPU/Common/VertexDecoderArm64.cpp
Expand Up @@ -95,16 +95,13 @@ static const JitLookup jitLookup[] = {
{&VertexDecoder::Step_TcFloat, &VertexDecoderJitCache::Jit_TcFloat},
{&VertexDecoder::Step_TcU8ToFloat, &VertexDecoderJitCache::Jit_TcU8ToFloat},
{&VertexDecoder::Step_TcU16ToFloat, &VertexDecoderJitCache::Jit_TcU16ToFloat},
{&VertexDecoder::Step_TcU16Double, &VertexDecoderJitCache::Jit_TcU16Double},

{&VertexDecoder::Step_TcU8Prescale, &VertexDecoderJitCache::Jit_TcU8Prescale},
{&VertexDecoder::Step_TcU16Prescale, &VertexDecoderJitCache::Jit_TcU16Prescale},
{&VertexDecoder::Step_TcFloatPrescale, &VertexDecoderJitCache::Jit_TcFloatPrescale},

{&VertexDecoder::Step_TcU16Through, &VertexDecoderJitCache::Jit_TcU16Through},
{&VertexDecoder::Step_TcFloatThrough, &VertexDecoderJitCache::Jit_TcFloatThrough},
{&VertexDecoder::Step_TcU16ThroughDouble, &VertexDecoderJitCache::Jit_TcU16ThroughDouble},
// {&VertexDecoder::Step_TcU16ThroughToFloat, &VertexDecoderJitCache::Jit_TcU16ThroughToFloat},
{&VertexDecoder::Step_TcU16ThroughToFloat, &VertexDecoderJitCache::Jit_TcU16ThroughToFloat},

{&VertexDecoder::Step_NormalS8, &VertexDecoderJitCache::Jit_NormalS8},
{&VertexDecoder::Step_NormalS16, &VertexDecoderJitCache::Jit_NormalS16},
Expand Down Expand Up @@ -582,7 +579,7 @@ void VertexDecoderJitCache::Jit_Color5551() {
CSEL(fullAlphaReg, fullAlphaReg, WZR, CC_EQ);
}

void VertexDecoderJitCache::Jit_TcU16Through() {
void VertexDecoderJitCache::Jit_TcU16ThroughToFloat() {
LDRH(INDEX_UNSIGNED, tempReg1, srcReg, dec_->tcoff);
LDRH(INDEX_UNSIGNED, tempReg2, srcReg, dec_->tcoff + 2);

Expand All @@ -596,31 +593,17 @@ void VertexDecoderJitCache::Jit_TcU16Through() {
updateSide(tempReg2, CC_LT, boundsMinVReg);
updateSide(tempReg2, CC_GT, boundsMaxVReg);

ORR(tempReg1, tempReg1, tempReg2, ArithOption(tempReg2, ST_LSL, 16));
STR(INDEX_UNSIGNED, tempReg1, dstReg, dec_->decFmt.uvoff);
fp.LDUR(32, neonScratchRegD, srcReg, dec_->tcoff);
fp.UXTL(16, neonScratchRegQ, neonScratchRegD); // Widen to 32-bit
fp.UCVTF(32, neonScratchRegD, neonScratchRegD);
fp.STUR(64, neonScratchRegD, dstReg, dec_->decFmt.uvoff);
}

void VertexDecoderJitCache::Jit_TcFloatThrough() {
LDP(INDEX_SIGNED, tempReg1, tempReg2, srcReg, dec_->tcoff);
STP(INDEX_SIGNED, tempReg1, tempReg2, dstReg, dec_->decFmt.uvoff);
}

void VertexDecoderJitCache::Jit_TcU16Double() {
LDRH(INDEX_UNSIGNED, tempReg1, srcReg, dec_->tcoff);
LDRH(INDEX_UNSIGNED, tempReg2, srcReg, dec_->tcoff + 2);
LSL(tempReg1, tempReg1, 1);
ORR(tempReg1, tempReg1, tempReg2, ArithOption(tempReg2, ST_LSL, 17));
STR(INDEX_UNSIGNED, tempReg1, dstReg, dec_->decFmt.uvoff);
}

void VertexDecoderJitCache::Jit_TcU16ThroughDouble() {
LDRH(INDEX_UNSIGNED, tempReg1, srcReg, dec_->tcoff);
LDRH(INDEX_UNSIGNED, tempReg2, srcReg, dec_->tcoff + 2);
LSL(tempReg1, tempReg1, 1);
ORR(tempReg1, tempReg1, tempReg2, ArithOption(tempReg2, ST_LSL, 17));
STR(INDEX_UNSIGNED, tempReg1, dstReg, dec_->decFmt.uvoff);
}

void VertexDecoderJitCache::Jit_TcFloat() {
LDP(INDEX_SIGNED, tempReg1, tempReg2, srcReg, dec_->tcoff);
STP(INDEX_SIGNED, tempReg1, tempReg2, dstReg, dec_->decFmt.uvoff);
Expand Down
88 changes: 0 additions & 88 deletions GPU/Common/VertexDecoderCommon.cpp
Expand Up @@ -281,35 +281,6 @@ void VertexDecoder::Step_TcU16ToFloat() const
uv[1] = uvdata[1] * (1.0f / 32768.0f);
}

void VertexDecoder::Step_TcU16Double() const
{
u16 *uv = (u16*)(decoded_ + decFmt.uvoff);
const u16 *uvdata = (const u16_le*)(ptr_ + tcoff);
uv[0] = uvdata[0] * 2;
uv[1] = uvdata[1] * 2;
}

void VertexDecoder::Step_TcU16Through() const
{
u16 *uv = (u16 *)(decoded_ + decFmt.uvoff);
const u16 *uvdata = (const u16_le*)(ptr_ + tcoff);
uv[0] = uvdata[0];
uv[1] = uvdata[1];

gstate_c.vertBounds.minU = std::min(gstate_c.vertBounds.minU, uvdata[0]);
gstate_c.vertBounds.maxU = std::max(gstate_c.vertBounds.maxU, uvdata[0]);
gstate_c.vertBounds.minV = std::min(gstate_c.vertBounds.minV, uvdata[1]);
gstate_c.vertBounds.maxV = std::max(gstate_c.vertBounds.maxV, uvdata[1]);
}

void VertexDecoder::Step_TcU16ThroughDouble() const
{
u16 *uv = (u16 *)(decoded_ + decFmt.uvoff);
const u16 *uvdata = (const u16_le*)(ptr_ + tcoff);
uv[0] = uvdata[0] * 2;
uv[1] = uvdata[1] * 2;
}

void VertexDecoder::Step_TcU16DoubleToFloat() const
{
float *uv = (float*)(decoded_ + decFmt.uvoff);
Expand Down Expand Up @@ -388,51 +359,6 @@ void VertexDecoder::Step_TcFloatPrescale() const {
uv[1] = uvdata[1] * gstate_c.uv.vScale + gstate_c.uv.vOff;
}

void VertexDecoder::Step_TcU8Morph() const {
float uv[2] = { 0, 0 };
for (int n = 0; n < morphcount; n++) {
float w = gstate_c.morphWeights[n];
const u8 *uvdata = (const u8 *)(ptr_ + onesize_*n + tcoff);

uv[0] += (float)uvdata[0] * w;
uv[1] += (float)uvdata[1] * w;
}

u8 *out = decoded_ + decFmt.uvoff;
out[0] = (int)uv[0];
out[1] = (int)uv[1];
}

void VertexDecoder::Step_TcU16Morph() const {
float uv[2] = { 0, 0 };
for (int n = 0; n < morphcount; n++) {
float w = gstate_c.morphWeights[n];
const u16_le *uvdata = (const u16_le *)(ptr_ + onesize_*n + tcoff);

uv[0] += (float)uvdata[0] * w;
uv[1] += (float)uvdata[1] * w;
}

u16_le *out = (u16_le *)(decoded_ + decFmt.uvoff);
out[0] = (int)uv[0];
out[1] = (int)uv[1];
}

void VertexDecoder::Step_TcU16DoubleMorph() const {
float uv[2] = { 0, 0 };
for (int n = 0; n < morphcount; n++) {
float w = gstate_c.morphWeights[n];
const u16_le *uvdata = (const u16_le *)(ptr_ + onesize_*n + tcoff);

uv[0] += (float)uvdata[0] * w;
uv[1] += (float)uvdata[1] * w;
}

u16_le *out = (u16_le *)(decoded_ + decFmt.uvoff);
out[0] = (int)(uv[0] * 2.0f);
out[1] = (int)(uv[1] * 2.0f);
}

void VertexDecoder::Step_TcU8MorphToFloat() const {
float uv[2] = { 0, 0 };
for (int n = 0; n < morphcount; n++) {
Expand Down Expand Up @@ -922,20 +848,6 @@ static const StepFunction tcstep_prescale_morph_remaster[4] = {
&VertexDecoder::Step_TcFloatPrescaleMorph,
};

static const StepFunction tcstep_morph[4] = {
0,
&VertexDecoder::Step_TcU8Morph,
&VertexDecoder::Step_TcU16Morph,
&VertexDecoder::Step_TcFloatMorph,
};

static const StepFunction tcstep_morph_remaster[4] = {
0,
&VertexDecoder::Step_TcU8Morph,
&VertexDecoder::Step_TcU16DoubleMorph,
&VertexDecoder::Step_TcFloatMorph,
};

static const StepFunction tcstep_morphToFloat[4] = {
0,
&VertexDecoder::Step_TcU8MorphToFloat,
Expand Down
10 changes: 0 additions & 10 deletions GPU/Common/VertexDecoderCommon.h
Expand Up @@ -514,17 +514,11 @@ class VertexDecoder {
void Step_TcU16DoublePrescale() const;
void Step_TcFloatPrescale() const;

void Step_TcU16Double() const;
void Step_TcU16Through() const;
void Step_TcU16ThroughDouble() const;
void Step_TcU16DoubleToFloat() const;
void Step_TcU16ThroughToFloat() const;
void Step_TcU16ThroughDoubleToFloat() const;
void Step_TcFloatThrough() const;

void Step_TcU8Morph() const;
void Step_TcU16Morph() const;
void Step_TcU16DoubleMorph() const;
void Step_TcU8MorphToFloat() const;
void Step_TcU16MorphToFloat() const;
void Step_TcU16DoubleMorphToFloat() const;
Expand Down Expand Up @@ -675,10 +669,6 @@ class VertexDecoderJitCache : public FakeGen::FakeXCodeBlock {
void Jit_TcU16PrescaleMorph();
void Jit_TcFloatPrescaleMorph();

void Jit_TcU16Double();
void Jit_TcU16ThroughDouble();

void Jit_TcU16Through();
void Jit_TcU16ThroughToFloat();
void Jit_TcFloatThrough();

Expand Down
42 changes: 0 additions & 42 deletions GPU/Common/VertexDecoderX86.cpp
Expand Up @@ -100,16 +100,13 @@ static const JitLookup jitLookup[] = {
{&VertexDecoder::Step_TcFloat, &VertexDecoderJitCache::Jit_TcFloat},
{&VertexDecoder::Step_TcU8ToFloat, &VertexDecoderJitCache::Jit_TcU8ToFloat},
{&VertexDecoder::Step_TcU16ToFloat, &VertexDecoderJitCache::Jit_TcU16ToFloat},
{&VertexDecoder::Step_TcU16Double, &VertexDecoderJitCache::Jit_TcU16Double},

{&VertexDecoder::Step_TcU8Prescale, &VertexDecoderJitCache::Jit_TcU8Prescale},
{&VertexDecoder::Step_TcU16Prescale, &VertexDecoderJitCache::Jit_TcU16Prescale},
{&VertexDecoder::Step_TcFloatPrescale, &VertexDecoderJitCache::Jit_TcFloatPrescale},

{&VertexDecoder::Step_TcU16Through, &VertexDecoderJitCache::Jit_TcU16Through},
{&VertexDecoder::Step_TcU16ThroughToFloat, &VertexDecoderJitCache::Jit_TcU16ThroughToFloat},
{&VertexDecoder::Step_TcFloatThrough, &VertexDecoderJitCache::Jit_TcFloatThrough},
{&VertexDecoder::Step_TcU16ThroughDouble, &VertexDecoderJitCache::Jit_TcU16ThroughDouble},

{&VertexDecoder::Step_TcU8MorphToFloat, &VertexDecoderJitCache::Jit_TcU8MorphToFloat},
{&VertexDecoder::Step_TcU16MorphToFloat, &VertexDecoderJitCache::Jit_TcU16MorphToFloat},
Expand Down Expand Up @@ -696,15 +693,6 @@ void VertexDecoderJitCache::Jit_TcU16ToFloat() {
MOVQ_xmm(MDisp(dstReg, dec_->decFmt.uvoff), XMM3);
}

void VertexDecoderJitCache::Jit_TcU16Double() {
MOVZX(32, 16, tempReg1, MDisp(srcReg, dec_->tcoff));
MOVZX(32, 16, tempReg2, MDisp(srcReg, dec_->tcoff + 2));
SHL(16, R(tempReg1), Imm8(1)); // 16 to get a wall to shift into
SHL(32, R(tempReg2), Imm8(17));
OR(32, R(tempReg1), R(tempReg2));
MOV(32, MDisp(dstReg, dec_->decFmt.uvoff), R(tempReg1));
}

void VertexDecoderJitCache::Jit_TcFloat() {
#ifdef _M_X64
MOV(64, R(tempReg1), MDisp(srcReg, dec_->tcoff));
Expand Down Expand Up @@ -851,27 +839,6 @@ void VertexDecoderJitCache::Jit_TcFloatPrescaleMorph() {
MOVQ_xmm(MDisp(dstReg, dec_->decFmt.uvoff), fpScratchReg);
}

void VertexDecoderJitCache::Jit_TcU16Through() {
MOV(32, R(tempReg1), MDisp(srcReg, dec_->tcoff));
MOV(32, MDisp(dstReg, dec_->decFmt.uvoff), R(tempReg1));

MOV(32, R(tempReg2), R(tempReg1));
SHR(32, R(tempReg2), Imm8(16));

auto updateSide = [&](X64Reg r, CCFlags skipCC, u16 *value) {
CMP(16, R(r), M(value));
FixupBranch skip = J_CC(skipCC);
MOV(16, M(value), R(r));
SetJumpTarget(skip);
};

// TODO: Can this actually be fast? Hmm, floats aren't better.
updateSide(tempReg1, CC_GE, &gstate_c.vertBounds.minU);
updateSide(tempReg1, CC_LE, &gstate_c.vertBounds.maxU);
updateSide(tempReg2, CC_GE, &gstate_c.vertBounds.minV);
updateSide(tempReg2, CC_LE, &gstate_c.vertBounds.maxV);
}

void VertexDecoderJitCache::Jit_TcU16ThroughToFloat() {
PXOR(fpScratchReg2, R(fpScratchReg2));
MOV(32, R(tempReg1), MDisp(srcReg, dec_->tcoff));
Expand All @@ -897,15 +864,6 @@ void VertexDecoderJitCache::Jit_TcU16ThroughToFloat() {
updateSide(tempReg2, CC_LE, &gstate_c.vertBounds.maxV);
}

void VertexDecoderJitCache::Jit_TcU16ThroughDouble() {
MOVZX(32, 16, tempReg1, MDisp(srcReg, dec_->tcoff));
MOVZX(32, 16, tempReg2, MDisp(srcReg, dec_->tcoff + 2));
SHL(16, R(tempReg1), Imm8(1)); // 16 to get a wall to shift into
SHL(32, R(tempReg2), Imm8(17));
OR(32, R(tempReg1), R(tempReg2));
MOV(32, MDisp(dstReg, dec_->decFmt.uvoff), R(tempReg1));
}

void VertexDecoderJitCache::Jit_TcFloatThrough() {
#ifdef _M_X64
MOV(64, R(tempReg1), MDisp(srcReg, dec_->tcoff));
Expand Down
4 changes: 2 additions & 2 deletions ext/native/thin3d/thin3d_vulkan.cpp
Expand Up @@ -378,7 +378,7 @@ class VKContext : public DrawContext {
curIBufferOffset_ = offset;
}

void UpdateDynamicUniformBuffer(const void *ub, size_t size);
void UpdateDynamicUniformBuffer(const void *ub, size_t size) override;

// TODO: Add more sophisticated draws.
void Draw(int vertexCount, int offset) override;
Expand Down Expand Up @@ -1322,4 +1322,4 @@ void VKContext::HandleEvent(Event ev, int width, int height, void *param1, void
// Noop
}

} // namespace Draw
} // namespace Draw

0 comments on commit c55aa83

Please sign in to comment.