Skip to content

Commit

Permalink
target-alpha: Use movcond
Browse files Browse the repository at this point in the history
For proper cmov insns, as well as the non-goto-tb case
of conditional branch.

Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
  • Loading branch information
rth7680 authored and mdroth committed Oct 9, 2012
1 parent 6314cc0 commit 72bdbb7
Showing 1 changed file with 48 additions and 54 deletions.
102 changes: 48 additions & 54 deletions target-alpha/translate.c
Expand Up @@ -426,27 +426,15 @@ static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,

return EXIT_GOTO_TB;
} else {
int lab_over = gen_new_label();

/* ??? Consider using either
movi pc, next
addi tmp, pc, disp
movcond pc, cond, 0, tmp, pc
or
setcond tmp, cond, 0
movi pc, next
neg tmp, tmp
andi tmp, tmp, disp
add pc, pc, tmp
The current diamond subgraph surely isn't efficient. */
TCGv_i64 z = tcg_const_i64(0);
TCGv_i64 d = tcg_const_i64(dest);
TCGv_i64 p = tcg_const_i64(ctx->pc);

tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
tcg_gen_movi_i64(cpu_pc, ctx->pc);
tcg_gen_br(lab_over);
gen_set_label(lab_true);
tcg_gen_movi_i64(cpu_pc, dest);
gen_set_label(lab_over);
tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);

tcg_temp_free_i64(z);
tcg_temp_free_i64(d);
tcg_temp_free_i64(p);
return EXIT_PC_UPDATED;
}
}
Expand Down Expand Up @@ -521,61 +509,67 @@ static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
int islit, uint8_t lit, int mask)
{
TCGCond inv_cond = tcg_invert_cond(cond);
int l1;
TCGv_i64 c1, z, v1;

if (unlikely(rc == 31))
if (unlikely(rc == 31)) {
return;
}

l1 = gen_new_label();

if (ra != 31) {
if (mask) {
TCGv tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
tcg_temp_free(tmp);
} else
tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
} else {
if (ra == 31) {
/* Very uncommon case - Do not bother to optimize. */
TCGv tmp = tcg_const_i64(0);
tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
tcg_temp_free(tmp);
c1 = tcg_const_i64(0);
} else if (mask) {
c1 = tcg_const_i64(1);
tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
} else {
c1 = cpu_ir[ra];
}
if (islit) {
v1 = tcg_const_i64(lit);
} else {
v1 = cpu_ir[rb];
}
z = tcg_const_i64(0);

if (islit)
tcg_gen_movi_i64(cpu_ir[rc], lit);
else
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
gen_set_label(l1);
tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);

tcg_temp_free_i64(z);
if (ra == 31 || mask) {
tcg_temp_free_i64(c1);
}
if (islit) {
tcg_temp_free_i64(v1);
}
}

static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
{
TCGv cmp_tmp;
int l1;
TCGv_i64 c1, z, v1;

if (unlikely(rc == 31)) {
return;
}

cmp_tmp = tcg_temp_new();
c1 = tcg_temp_new_i64();
if (unlikely(ra == 31)) {
tcg_gen_movi_i64(cmp_tmp, 0);
tcg_gen_movi_i64(c1, 0);
} else {
gen_fold_mzero(cond, c1, cpu_fir[ra]);
}
if (rb == 31) {
v1 = tcg_const_i64(0);
} else {
gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
v1 = cpu_fir[rb];
}
z = tcg_const_i64(0);

l1 = gen_new_label();
tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
tcg_temp_free(cmp_tmp);
tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);

if (rb != 31)
tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
else
tcg_gen_movi_i64(cpu_fir[rc], 0);
gen_set_label(l1);
tcg_temp_free_i64(z);
tcg_temp_free_i64(c1);
if (rb == 31) {
tcg_temp_free_i64(v1);
}
}

#define QUAL_RM_N 0x080 /* Round mode nearest even */
Expand Down

0 comments on commit 72bdbb7

Please sign in to comment.