Skip to content

Commit

Permalink
tcg/i386: Allow bmi2 shiftx to have non-matching operands
Browse files Browse the repository at this point in the history
Previously we could not have different constraints for different ISA levels,
which prevented us from eliding the matching constraint for shifts.

We do now have to make sure that the operands match for constant shifts.
We can also handle some small left shifts via lea.

Signed-off-by: Richard Henderson <rth@twiddle.net>
  • Loading branch information
rth7680 committed Jan 10, 2017
1 parent 42d5b51 commit 6a5aed4
Showing 1 changed file with 19 additions and 14 deletions.
33 changes: 19 additions & 14 deletions tcg/i386/tcg-target.inc.c
Expand Up @@ -179,7 +179,6 @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
break;
case 'c':
case_c:
ct->ct |= TCG_CT_REG;
tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
break;
Expand Down Expand Up @@ -208,21 +207,13 @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
tcg_regset_set32(ct->u.regs, 0, 0xf);
break;
case 'r':
case_r:
ct->ct |= TCG_CT_REG;
if (TCG_TARGET_REG_BITS == 64) {
tcg_regset_set32(ct->u.regs, 0, 0xffff);
} else {
tcg_regset_set32(ct->u.regs, 0, 0xff);
}
break;
case 'C':
/* With SHRX et al, we need not use ECX as shift count register. */
if (have_bmi2) {
goto case_r;
} else {
goto case_c;
}

/* qemu_ld/st address constraint */
case 'L':
Expand Down Expand Up @@ -1959,6 +1950,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;

OP_32_64(shl):
/* For small constant 3-operand shift, use LEA. */
if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
if (a2 - 1 == 0) {
/* shl $1,a1,a0 -> lea (a1,a1),a0 */
tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
} else {
/* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
}
break;
}
c = SHIFT_SHL;
vexop = OPC_SHLX;
goto gen_shift_maybe_vex;
Expand All @@ -1977,9 +1979,12 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
c = SHIFT_ROR;
goto gen_shift;
gen_shift_maybe_vex:
if (have_bmi2 && !const_a2) {
tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
break;
if (have_bmi2) {
if (!const_a2) {
tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
break;
}
tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
}
/* FALLTHRU */
gen_shift:
Expand Down Expand Up @@ -2190,9 +2195,9 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } };
static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } };
static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
static const TCGTargetOpDef r_0_Ci = { .args_ct_str = { "r", "0", "Ci" } };
static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
Expand Down Expand Up @@ -2266,7 +2271,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_shr_i64:
case INDEX_op_sar_i32:
case INDEX_op_sar_i64:
return &r_0_Ci;
return have_bmi2 ? &r_r_ri : &r_0_ci;
case INDEX_op_rotl_i32:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i32:
Expand Down

0 comments on commit 6a5aed4

Please sign in to comment.