From aa0f34ec3fc7f2f3a4c523ee8db8181b04f4efbe Mon Sep 17 00:00:00 2001 From: Matheus Ferst Date: Wed, 2 Mar 2022 06:51:37 +0100 Subject: [PATCH] target/ppc: implement vrlq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Richard Henderson Signed-off-by: Matheus Ferst Message-Id: <20220225210936.1749575-26-matheus.ferst@eldorado.org.br> Signed-off-by: Cédric Le Goater --- target/ppc/insn32.decode | 1 + target/ppc/translate/vmx-impl.c.inc | 48 +++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index e788dc51523d..c3d47a881573 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -491,6 +491,7 @@ VRLB 000100 ..... ..... ..... 00000000100 @VX VRLH 000100 ..... ..... ..... 00001000100 @VX VRLW 000100 ..... ..... ..... 00010000100 @VX VRLD 000100 ..... ..... ..... 00011000100 @VX +VRLQ 000100 ..... ..... ..... 00000000101 @VX VRLWMI 000100 ..... ..... ..... 00010000101 @VX VRLDMI 000100 ..... ..... ..... 00011000101 @VX diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 09d6c88e62a8..478a62440db3 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -1055,6 +1055,54 @@ TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false); TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false); TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true); +static bool trans_VRLQ(DisasContext *ctx, arg_VX *a) +{ + TCGv_i64 ah, al, n, t0, t1, zero = tcg_constant_i64(0); + + REQUIRE_VECTOR(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + + ah = tcg_temp_new_i64(); + al = tcg_temp_new_i64(); + n = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(ah, a->vra, true); + get_avr64(al, a->vra, false); + get_avr64(n, a->vrb, true); + + tcg_gen_mov_i64(t0, ah); + tcg_gen_andi_i64(t1, n, 64); + tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah); + tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al); + tcg_gen_andi_i64(n, n, 0x3F); + + tcg_gen_shl_i64(t0, ah, n); + tcg_gen_shl_i64(t1, al, n); + + tcg_gen_xori_i64(n, n, 63); + + tcg_gen_shr_i64(al, al, n); + tcg_gen_shri_i64(al, al, 1); + tcg_gen_or_i64(t0, al, t0); + + tcg_gen_shr_i64(ah, ah, n); + tcg_gen_shri_i64(ah, ah, 1); + tcg_gen_or_i64(t1, ah, t1); + + set_avr64(a->vrt, t0, true); + set_avr64(a->vrt, t1, false); + + tcg_temp_free_i64(ah); + tcg_temp_free_i64(al); + tcg_temp_free_i64(n); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + #define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \ static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \ TCGv_vec sat, TCGv_vec a, \