diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index 07f83e98471fa..a66fde5b56986 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -648,45 +648,46 @@ multiclass VPatReductionVL { } } -multiclass VPatBinarySDNodeExt_V_WV { +multiclass VPatBinarySDNodeExt_V_WV_WX { foreach vti = AllWidenableIntVectors in { def : Pat< (vti.Vti.Vector (riscv_trunc_vector_vl (op (vti.Wti.Vector vti.Wti.RegClass:$rs2), (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs1)))), - (riscv_vmset_vl X0), - X0)), + (riscv_vmset_vl VLOpFrag), + VLOpFrag)), (!cast(instruction_name#"_WV_"#vti.Vti.LMul.MX) vti.Wti.RegClass:$rs2, vti.Vti.RegClass:$rs1, vti.Vti.AVL, vti.Vti.Log2SEW)>; - } -} - -multiclass VPatBinarySDNodeExt_V_WX { - foreach vti = AllWidenableIntVectors in { def : Pat< (vti.Vti.Vector (riscv_trunc_vector_vl (op (vti.Wti.Vector vti.Wti.RegClass:$rs2), (vti.Wti.Vector (extop (vti.Vti.Vector (SplatPat GPR:$rs1))))), - (riscv_vmset_vl X0), - X0)), + (riscv_vmset_vl VLOpFrag), + VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.Vti.LMul.MX) vti.Wti.RegClass:$rs2, GPR:$rs1, vti.Vti.AVL, vti.Vti.Log2SEW)>; } } - -multiclass VPatBinarySDNode_V_WV { - defm : VPatBinarySDNodeExt_V_WV; - defm : VPatBinarySDNodeExt_V_WV; -} - -multiclass VPatBinarySDNode_V_WX { - defm : VPatBinarySDNodeExt_V_WX; - defm : VPatBinarySDNodeExt_V_WX; +multiclass VPatBinarySDNode_V_WV_WX_WI { + defm : VPatBinarySDNodeExt_V_WV_WX; + defm : VPatBinarySDNodeExt_V_WV_WX; + foreach vti = AllWidenableIntVectors in { + def : Pat< + (vti.Vti.Vector + (riscv_trunc_vector_vl + (op (vti.Wti.Vector vti.Wti.RegClass:$rs2), + (vti.Wti.Vector (SplatPat_uimm5 uimm5:$rs1))), + (riscv_vmset_vl VLOpFrag), + VLOpFrag)), + (!cast(instruction_name#"_WI_"#vti.Vti.LMul.MX) + vti.Wti.RegClass:$rs2, uimm5:$rs1, + vti.Vti.AVL, vti.Vti.Log2SEW)>; + } } multiclass VPatWidenReductionVL { @@ -796,10 +797,8 @@ foreach vti = AllIntegerVectors in { } // 12.7. Vector Narrowing Integer Right Shift Instructions -defm : VPatBinarySDNode_V_WV; -defm : VPatBinarySDNode_V_WX; -defm : VPatBinarySDNode_V_WV; -defm : VPatBinarySDNode_V_WX; +defm : VPatBinarySDNode_V_WV_WX_WI; +defm : VPatBinarySDNode_V_WV_WX_WI; foreach vtiTowti = AllWidenableIntVectors in { defvar vti = vtiTowti.Vti; diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll index 2c950916f9ce5..a44e59e2accdc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -48,10 +48,8 @@ define <2 x i16> @fixedlen(<2 x i32> %x) { define @scalable( %x) { ; CHECK-LABEL: scalable: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vsrl.vi v8, v8, 16 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; CHECK-NEXT: vnsrl.wx v8, v8, zero +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vnsrl.wi v8, v8, 16 ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll index e307e2630f3cd..b08d67c5f5ebb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -58,9 +58,8 @@ define @ctlz_nxv1i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v9, v9 -; RV32D-NEXT: vsrl.vi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vnsrl.wi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32D-NEXT: vnsrl.wx v9, v9, zero ; RV32D-NEXT: li a0, 134 @@ -74,9 +73,8 @@ define @ctlz_nxv1i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v9, v9 -; RV64D-NEXT: vsrl.vi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vnsrl.wi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64D-NEXT: vnsrl.wx v9, v9, zero ; RV64D-NEXT: li a0, 134 @@ -143,9 +141,8 @@ define @ctlz_nxv2i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v9, v9 -; RV32D-NEXT: vsrl.vi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vnsrl.wi v9, v9, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32D-NEXT: vnsrl.wx v9, v9, zero ; RV32D-NEXT: li a0, 134 @@ -159,9 +156,8 @@ define @ctlz_nxv2i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v9, v9 -; RV64D-NEXT: vsrl.vi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vnsrl.wi v9, v9, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64D-NEXT: vnsrl.wx v9, v9, zero ; RV64D-NEXT: li a0, 134 @@ -228,9 +224,8 @@ define @ctlz_nxv4i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32D-NEXT: vzext.vf4 v10, v8 ; RV32D-NEXT: vfcvt.f.xu.v v10, v10 -; RV32D-NEXT: vsrl.vi v10, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vnsrl.wi v9, v10, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32D-NEXT: vnsrl.wx v9, v9, zero ; RV32D-NEXT: li a0, 134 @@ -244,9 +239,8 @@ define @ctlz_nxv4i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64D-NEXT: vzext.vf4 v10, v8 ; RV64D-NEXT: vfcvt.f.xu.v v10, v10 -; RV64D-NEXT: vsrl.vi v10, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vnsrl.wi v9, v10, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64D-NEXT: vnsrl.wx v9, v9, zero ; RV64D-NEXT: li a0, 134 @@ -313,9 +307,8 @@ define @ctlz_nxv8i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32D-NEXT: vzext.vf4 v12, v8 ; RV32D-NEXT: vfcvt.f.xu.v v12, v12 -; RV32D-NEXT: vsrl.vi v12, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vnsrl.wi v10, v12, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32D-NEXT: vnsrl.wx v9, v10, zero ; RV32D-NEXT: li a0, 134 @@ -329,9 +322,8 @@ define @ctlz_nxv8i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64D-NEXT: vzext.vf4 v12, v8 ; RV64D-NEXT: vfcvt.f.xu.v v12, v12 -; RV64D-NEXT: vsrl.vi v12, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vnsrl.wi v10, v12, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64D-NEXT: vnsrl.wx v9, v10, zero ; RV64D-NEXT: li a0, 134 @@ -398,9 +390,8 @@ define @ctlz_nxv16i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV32D-NEXT: vzext.vf4 v16, v8 ; RV32D-NEXT: vfcvt.f.xu.v v16, v16 -; RV32D-NEXT: vsrl.vi v16, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vnsrl.wi v12, v16, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32D-NEXT: vnsrl.wx v10, v12, zero ; RV32D-NEXT: li a0, 134 @@ -414,9 +405,8 @@ define @ctlz_nxv16i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV64D-NEXT: vzext.vf4 v16, v8 ; RV64D-NEXT: vfcvt.f.xu.v v16, v16 -; RV64D-NEXT: vsrl.vi v16, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v16, zero +; RV64D-NEXT: vnsrl.wi v12, v16, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64D-NEXT: vnsrl.wx v10, v12, zero ; RV64D-NEXT: li a0, 134 @@ -558,10 +548,7 @@ define @ctlz_nxv1i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vsrl.vi v9, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vnsrl.wi v9, v9, 23 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -573,10 +560,7 @@ define @ctlz_nxv1i16( %va) { ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vsrl.vi v9, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vnsrl.wi v9, v9, 23 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -659,10 +643,7 @@ define @ctlz_nxv2i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vsrl.vi v9, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v9, zero +; RV32D-NEXT: vnsrl.wi v9, v9, 23 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -674,10 +655,7 @@ define @ctlz_nxv2i16( %va) { ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vsrl.vi v9, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v9, zero +; RV64D-NEXT: vnsrl.wi v9, v9, 23 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -760,10 +738,7 @@ define @ctlz_nxv4i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vsrl.vi v10, v10, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v9, v10, zero +; RV32D-NEXT: vnsrl.wi v9, v10, 23 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v9, v9, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -775,10 +750,7 @@ define @ctlz_nxv4i16( %va) { ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vsrl.vi v10, v10, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v9, v10, zero +; RV64D-NEXT: vnsrl.wi v9, v10, 23 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v9, v9, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -861,10 +833,7 @@ define @ctlz_nxv8i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vsrl.vi v12, v12, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v12, zero +; RV32D-NEXT: vnsrl.wi v10, v12, 23 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v10, v10, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -876,10 +845,7 @@ define @ctlz_nxv8i16( %va) { ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vsrl.vi v12, v12, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v12, zero +; RV64D-NEXT: vnsrl.wi v10, v12, 23 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v10, v10, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -962,10 +928,7 @@ define @ctlz_nxv16i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV32D-NEXT: vsrl.vi v16, v16, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v16, zero +; RV32D-NEXT: vnsrl.wi v12, v16, 23 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v12, v12, a0 ; RV32D-NEXT: vmseq.vi v0, v8, 0 @@ -977,10 +940,7 @@ define @ctlz_nxv16i16( %va) { ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV64D-NEXT: vsrl.vi v16, v16, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v16, zero +; RV64D-NEXT: vnsrl.wi v12, v16, 23 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v12, v12, a0 ; RV64D-NEXT: vmseq.vi v0, v8, 0 @@ -2046,9 +2006,8 @@ define @ctlz_zero_undef_nxv1i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v8, zero ; RV32D-NEXT: li a0, 134 @@ -2060,9 +2019,8 @@ define @ctlz_zero_undef_nxv1i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v8, zero ; RV64D-NEXT: li a0, 134 @@ -2126,9 +2084,8 @@ define @ctlz_zero_undef_nxv2i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v8, zero ; RV32D-NEXT: li a0, 134 @@ -2140,9 +2097,8 @@ define @ctlz_zero_undef_nxv2i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v8, zero ; RV64D-NEXT: li a0, 134 @@ -2206,9 +2162,8 @@ define @ctlz_zero_undef_nxv4i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32D-NEXT: vzext.vf4 v10, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v10 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vnsrl.wi v10, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v10, zero ; RV32D-NEXT: li a0, 134 @@ -2220,9 +2175,8 @@ define @ctlz_zero_undef_nxv4i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64D-NEXT: vzext.vf4 v10, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v10 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vnsrl.wi v10, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v10, zero ; RV64D-NEXT: li a0, 134 @@ -2286,9 +2240,8 @@ define @ctlz_zero_undef_nxv8i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32D-NEXT: vzext.vf4 v12, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v12 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vnsrl.wi v12, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v12, zero ; RV32D-NEXT: li a0, 134 @@ -2300,9 +2253,8 @@ define @ctlz_zero_undef_nxv8i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64D-NEXT: vzext.vf4 v12, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v12 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vnsrl.wi v12, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v12, zero ; RV64D-NEXT: li a0, 134 @@ -2366,9 +2318,8 @@ define @ctlz_zero_undef_nxv16i8( %va) { ; RV32D-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV32D-NEXT: vzext.vf4 v16, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v16 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vnsrl.wi v16, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v16, zero ; RV32D-NEXT: li a0, 134 @@ -2380,9 +2331,8 @@ define @ctlz_zero_undef_nxv16i8( %va) { ; RV64D-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV64D-NEXT: vzext.vf4 v16, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v16 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vnsrl.wi v16, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v16, zero ; RV64D-NEXT: li a0, 134 @@ -2519,10 +2469,7 @@ define @ctlz_zero_undef_nxv1i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vsrl.vi v8, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v9, 23 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2531,10 +2478,7 @@ define @ctlz_zero_undef_nxv1i16( %va) { ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vsrl.vi v8, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v9, 23 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2613,10 +2557,7 @@ define @ctlz_zero_undef_nxv2i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vsrl.vi v8, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v9, 23 ; RV32D-NEXT: li a0, 142 ; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2625,10 +2566,7 @@ define @ctlz_zero_undef_nxv2i16( %va) { ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vsrl.vi v8, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v9, 23 ; RV64D-NEXT: li a0, 142 ; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2707,24 +2645,18 @@ define @ctlz_zero_undef_nxv4i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vsrl.vi v8, v10, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v10, 23 ; RV32D-NEXT: li a0, 142 -; RV32D-NEXT: vrsub.vx v8, v10, a0 +; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret ; ; RV64D-LABEL: ctlz_zero_undef_nxv4i16: ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vsrl.vi v8, v10, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v10, 23 ; RV64D-NEXT: li a0, 142 -; RV64D-NEXT: vrsub.vx v8, v10, a0 +; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret %a = call @llvm.ctlz.nxv4i16( %va, i1 true) ret %a @@ -2801,24 +2733,18 @@ define @ctlz_zero_undef_nxv8i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vsrl.vi v8, v12, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v12, 23 ; RV32D-NEXT: li a0, 142 -; RV32D-NEXT: vrsub.vx v8, v12, a0 +; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret ; ; RV64D-LABEL: ctlz_zero_undef_nxv8i16: ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vsrl.vi v8, v12, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v12, 23 ; RV64D-NEXT: li a0, 142 -; RV64D-NEXT: vrsub.vx v8, v12, a0 +; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret %a = call @llvm.ctlz.nxv8i16( %va, i1 true) ret %a @@ -2895,24 +2821,18 @@ define @ctlz_zero_undef_nxv16i16( %va) { ; RV32D: # %bb.0: ; RV32D-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV32D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV32D-NEXT: vsrl.vi v8, v16, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v16, 23 ; RV32D-NEXT: li a0, 142 -; RV32D-NEXT: vrsub.vx v8, v16, a0 +; RV32D-NEXT: vrsub.vx v8, v8, a0 ; RV32D-NEXT: ret ; ; RV64D-LABEL: ctlz_zero_undef_nxv16i16: ; RV64D: # %bb.0: ; RV64D-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV64D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV64D-NEXT: vsrl.vi v8, v16, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v16, 23 ; RV64D-NEXT: li a0, 142 -; RV64D-NEXT: vrsub.vx v8, v16, a0 +; RV64D-NEXT: vrsub.vx v8, v8, a0 ; RV64D-NEXT: ret %a = call @llvm.ctlz.nxv16i16( %va, i1 true) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll index 73e8585fe3431..d33ad60e09618 100644 --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -57,9 +57,8 @@ define @cttz_nxv1i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v8, zero ; RV32D-NEXT: li a0, 127 @@ -77,9 +76,8 @@ define @cttz_nxv1i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v8, zero ; RV64D-NEXT: li a0, 127 @@ -144,9 +142,8 @@ define @cttz_nxv2i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v8, zero ; RV32D-NEXT: li a0, 127 @@ -164,9 +161,8 @@ define @cttz_nxv2i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v8, zero ; RV64D-NEXT: li a0, 127 @@ -231,9 +227,8 @@ define @cttz_nxv4i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV32D-NEXT: vzext.vf4 v10, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v10 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vnsrl.wi v10, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v10, zero ; RV32D-NEXT: li a0, 127 @@ -251,9 +246,8 @@ define @cttz_nxv4i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64D-NEXT: vzext.vf4 v10, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v10 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vnsrl.wi v10, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v10, zero ; RV64D-NEXT: li a0, 127 @@ -318,9 +312,8 @@ define @cttz_nxv8i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV32D-NEXT: vzext.vf4 v12, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v12 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vnsrl.wi v12, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v12, zero ; RV32D-NEXT: li a0, 127 @@ -338,9 +331,8 @@ define @cttz_nxv8i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64D-NEXT: vzext.vf4 v12, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v12 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vnsrl.wi v12, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v12, zero ; RV64D-NEXT: li a0, 127 @@ -405,9 +397,8 @@ define @cttz_nxv16i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV32D-NEXT: vzext.vf4 v16, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v16 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vnsrl.wi v16, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v16, zero ; RV32D-NEXT: li a0, 127 @@ -425,9 +416,8 @@ define @cttz_nxv16i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV64D-NEXT: vzext.vf4 v16, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v16 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vnsrl.wi v16, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v16, zero ; RV64D-NEXT: li a0, 127 @@ -556,10 +546,7 @@ define @cttz_nxv1i16( %va) { ; RV32D-NEXT: vrsub.vi v9, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v9 ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vsrl.vi v8, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v9, 23 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: li a0, 16 @@ -574,10 +561,7 @@ define @cttz_nxv1i16( %va) { ; RV64D-NEXT: vrsub.vi v9, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v9 ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vsrl.vi v8, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v9, 23 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 16 @@ -653,10 +637,7 @@ define @cttz_nxv2i16( %va) { ; RV32D-NEXT: vrsub.vi v9, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v9 ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vsrl.vi v8, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v9, 23 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: li a0, 16 @@ -671,10 +652,7 @@ define @cttz_nxv2i16( %va) { ; RV64D-NEXT: vrsub.vi v9, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v9 ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vsrl.vi v8, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v9, 23 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 16 @@ -750,12 +728,9 @@ define @cttz_nxv4i16( %va) { ; RV32D-NEXT: vrsub.vi v9, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v9 ; RV32D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vsrl.vi v8, v10, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v10, 23 ; RV32D-NEXT: li a0, 127 -; RV32D-NEXT: vsub.vx v8, v10, a0 +; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: li a0, 16 ; RV32D-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV32D-NEXT: ret @@ -768,12 +743,9 @@ define @cttz_nxv4i16( %va) { ; RV64D-NEXT: vrsub.vi v9, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v9 ; RV64D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vsrl.vi v8, v10, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v10, 23 ; RV64D-NEXT: li a0, 127 -; RV64D-NEXT: vsub.vx v8, v10, a0 +; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 16 ; RV64D-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64D-NEXT: ret @@ -847,12 +819,9 @@ define @cttz_nxv8i16( %va) { ; RV32D-NEXT: vrsub.vi v10, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v10 ; RV32D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vsrl.vi v8, v12, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v12, 23 ; RV32D-NEXT: li a0, 127 -; RV32D-NEXT: vsub.vx v8, v12, a0 +; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: li a0, 16 ; RV32D-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV32D-NEXT: ret @@ -865,12 +834,9 @@ define @cttz_nxv8i16( %va) { ; RV64D-NEXT: vrsub.vi v10, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v10 ; RV64D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vsrl.vi v8, v12, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v12, 23 ; RV64D-NEXT: li a0, 127 -; RV64D-NEXT: vsub.vx v8, v12, a0 +; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 16 ; RV64D-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64D-NEXT: ret @@ -944,12 +910,9 @@ define @cttz_nxv16i16( %va) { ; RV32D-NEXT: vrsub.vi v12, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v12 ; RV32D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV32D-NEXT: vsrl.vi v8, v16, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v16, 23 ; RV32D-NEXT: li a0, 127 -; RV32D-NEXT: vsub.vx v8, v16, a0 +; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: li a0, 16 ; RV32D-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV32D-NEXT: ret @@ -962,12 +925,9 @@ define @cttz_nxv16i16( %va) { ; RV64D-NEXT: vrsub.vi v12, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v12 ; RV64D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV64D-NEXT: vsrl.vi v8, v16, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v16, 23 ; RV64D-NEXT: li a0, 127 -; RV64D-NEXT: vsub.vx v8, v16, a0 +; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: li a0, 16 ; RV64D-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64D-NEXT: ret @@ -1887,9 +1847,8 @@ define @cttz_zero_undef_nxv1i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v8, zero ; RV32D-NEXT: li a0, 127 @@ -1904,9 +1863,8 @@ define @cttz_zero_undef_nxv1i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v8, zero ; RV64D-NEXT: li a0, 127 @@ -1967,9 +1925,8 @@ define @cttz_zero_undef_nxv2i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV32D-NEXT: vzext.vf4 v9, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v9 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v8, zero ; RV32D-NEXT: li a0, 127 @@ -1984,9 +1941,8 @@ define @cttz_zero_undef_nxv2i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; RV64D-NEXT: vzext.vf4 v9, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v9 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v8, zero ; RV64D-NEXT: li a0, 127 @@ -2047,9 +2003,8 @@ define @cttz_zero_undef_nxv4i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV32D-NEXT: vzext.vf4 v10, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v10 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vnsrl.wi v10, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v10, zero ; RV32D-NEXT: li a0, 127 @@ -2064,9 +2019,8 @@ define @cttz_zero_undef_nxv4i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64D-NEXT: vzext.vf4 v10, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v10 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vnsrl.wi v10, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v10, zero ; RV64D-NEXT: li a0, 127 @@ -2127,9 +2081,8 @@ define @cttz_zero_undef_nxv8i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV32D-NEXT: vzext.vf4 v12, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v12 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vnsrl.wi v12, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v12, zero ; RV32D-NEXT: li a0, 127 @@ -2144,9 +2097,8 @@ define @cttz_zero_undef_nxv8i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64D-NEXT: vzext.vf4 v12, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v12 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vnsrl.wi v12, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v12, zero ; RV64D-NEXT: li a0, 127 @@ -2207,9 +2159,8 @@ define @cttz_zero_undef_nxv16i8( %va) { ; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV32D-NEXT: vzext.vf4 v16, v8 ; RV32D-NEXT: vfcvt.f.xu.v v8, v16 -; RV32D-NEXT: vsrl.vi v8, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vnsrl.wi v16, v8, 23 ; RV32D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32D-NEXT: vnsrl.wx v8, v16, zero ; RV32D-NEXT: li a0, 127 @@ -2224,9 +2175,8 @@ define @cttz_zero_undef_nxv16i8( %va) { ; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; RV64D-NEXT: vzext.vf4 v16, v8 ; RV64D-NEXT: vfcvt.f.xu.v v8, v16 -; RV64D-NEXT: vsrl.vi v8, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vnsrl.wi v16, v8, 23 ; RV64D-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64D-NEXT: vnsrl.wx v8, v16, zero ; RV64D-NEXT: li a0, 127 @@ -2349,10 +2299,7 @@ define @cttz_zero_undef_nxv1i16( %va) { ; RV32D-NEXT: vrsub.vi v9, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v9 ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV32D-NEXT: vsrl.vi v8, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v9, 23 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2363,10 +2310,7 @@ define @cttz_zero_undef_nxv1i16( %va) { ; RV64D-NEXT: vrsub.vi v9, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v9 ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu -; RV64D-NEXT: vsrl.vi v8, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v9, 23 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2437,10 +2381,7 @@ define @cttz_zero_undef_nxv2i16( %va) { ; RV32D-NEXT: vrsub.vi v9, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v9 ; RV32D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV32D-NEXT: vsrl.vi v8, v9, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV32D-NEXT: vnsrl.wx v8, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v9, 23 ; RV32D-NEXT: li a0, 127 ; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret @@ -2451,10 +2392,7 @@ define @cttz_zero_undef_nxv2i16( %va) { ; RV64D-NEXT: vrsub.vi v9, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v9 ; RV64D-NEXT: vfwcvt.f.xu.v v9, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m1, ta, mu -; RV64D-NEXT: vsrl.vi v8, v9, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu -; RV64D-NEXT: vnsrl.wx v8, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v9, 23 ; RV64D-NEXT: li a0, 127 ; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret @@ -2525,12 +2463,9 @@ define @cttz_zero_undef_nxv4i16( %va) { ; RV32D-NEXT: vrsub.vi v9, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v9 ; RV32D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV32D-NEXT: vsrl.vi v8, v10, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV32D-NEXT: vnsrl.wx v10, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v10, 23 ; RV32D-NEXT: li a0, 127 -; RV32D-NEXT: vsub.vx v8, v10, a0 +; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret ; ; RV64D-LABEL: cttz_zero_undef_nxv4i16: @@ -2539,12 +2474,9 @@ define @cttz_zero_undef_nxv4i16( %va) { ; RV64D-NEXT: vrsub.vi v9, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v9 ; RV64D-NEXT: vfwcvt.f.xu.v v10, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m2, ta, mu -; RV64D-NEXT: vsrl.vi v8, v10, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m1, ta, mu -; RV64D-NEXT: vnsrl.wx v10, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v10, 23 ; RV64D-NEXT: li a0, 127 -; RV64D-NEXT: vsub.vx v8, v10, a0 +; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret %a = call @llvm.cttz.nxv4i16( %va, i1 true) ret %a @@ -2613,12 +2545,9 @@ define @cttz_zero_undef_nxv8i16( %va) { ; RV32D-NEXT: vrsub.vi v10, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v10 ; RV32D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV32D-NEXT: vsrl.vi v8, v12, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV32D-NEXT: vnsrl.wx v12, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v12, 23 ; RV32D-NEXT: li a0, 127 -; RV32D-NEXT: vsub.vx v8, v12, a0 +; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret ; ; RV64D-LABEL: cttz_zero_undef_nxv8i16: @@ -2627,12 +2556,9 @@ define @cttz_zero_undef_nxv8i16( %va) { ; RV64D-NEXT: vrsub.vi v10, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v10 ; RV64D-NEXT: vfwcvt.f.xu.v v12, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m4, ta, mu -; RV64D-NEXT: vsrl.vi v8, v12, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m2, ta, mu -; RV64D-NEXT: vnsrl.wx v12, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v12, 23 ; RV64D-NEXT: li a0, 127 -; RV64D-NEXT: vsub.vx v8, v12, a0 +; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret %a = call @llvm.cttz.nxv8i16( %va, i1 true) ret %a @@ -2701,12 +2627,9 @@ define @cttz_zero_undef_nxv16i16( %va) { ; RV32D-NEXT: vrsub.vi v12, v8, 0 ; RV32D-NEXT: vand.vv v8, v8, v12 ; RV32D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV32D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV32D-NEXT: vsrl.vi v8, v16, 23 -; RV32D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV32D-NEXT: vnsrl.wx v16, v8, zero +; RV32D-NEXT: vnsrl.wi v8, v16, 23 ; RV32D-NEXT: li a0, 127 -; RV32D-NEXT: vsub.vx v8, v16, a0 +; RV32D-NEXT: vsub.vx v8, v8, a0 ; RV32D-NEXT: ret ; ; RV64D-LABEL: cttz_zero_undef_nxv16i16: @@ -2715,12 +2638,9 @@ define @cttz_zero_undef_nxv16i16( %va) { ; RV64D-NEXT: vrsub.vi v12, v8, 0 ; RV64D-NEXT: vand.vv v8, v8, v12 ; RV64D-NEXT: vfwcvt.f.xu.v v16, v8 -; RV64D-NEXT: vsetvli zero, zero, e32, m8, ta, mu -; RV64D-NEXT: vsrl.vi v8, v16, 23 -; RV64D-NEXT: vsetvli zero, zero, e16, m4, ta, mu -; RV64D-NEXT: vnsrl.wx v16, v8, zero +; RV64D-NEXT: vnsrl.wi v8, v16, 23 ; RV64D-NEXT: li a0, 127 -; RV64D-NEXT: vsub.vx v8, v16, a0 +; RV64D-NEXT: vsub.vx v8, v8, a0 ; RV64D-NEXT: ret %a = call @llvm.cttz.nxv16i16( %va, i1 true) ret %a diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll index 7aea161155239..47d04a80573b4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll @@ -30,6 +30,20 @@ define @vnsra_wx_i32_nxv1i32_sext( %va, i32 ret %y } +define @vnsra_wi_i32_nxv1i32_sext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv1i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vnsra.wi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsra_wv_nxv2i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv2i32_sext: ; CHECK: # %bb.0: @@ -58,6 +72,21 @@ define @vnsra_wx_i32_nxv2i32_sext( %va, i32 ret %y } +define @vnsra_wi_i32_nxv2i32_sext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv2i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vnsra.wi v10, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsra_wv_nxv4i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv4i32_sext: ; CHECK: # %bb.0: @@ -86,6 +115,21 @@ define @vnsra_wx_i32_nxv4i32_sext( %va, i32 ret %y } +define @vnsra_wi_i32_nxv4i32_sext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv4i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vnsra.wi v12, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsra_wv_nxv8i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv8i32_sext: ; CHECK: # %bb.0: @@ -114,6 +158,21 @@ define @vnsra_wx_i32_nxv8i32_sext( %va, i32 ret %y } +define @vnsra_wi_i32_nxv8i32_sext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv8i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vnsra.wi v16, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsra_wv_nxv1i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv1i32_zext: ; CHECK: # %bb.0: @@ -140,6 +199,20 @@ define @vnsra_wx_i32_nxv1i32_zext( %va, i32 ret %y } +define @vnsra_wi_i32_nxv1i32_zext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv1i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vnsra.wi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsra_wv_nxv2i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv2i32_zext: ; CHECK: # %bb.0: @@ -168,6 +241,21 @@ define @vnsra_wx_i32_nxv2i32_zext( %va, i32 ret %y } +define @vnsra_wi_i32_nxv2i32_zext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv2i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vnsra.wi v10, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsra_wv_nxv4i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv4i32_zext: ; CHECK: # %bb.0: @@ -196,6 +284,21 @@ define @vnsra_wx_i32_nxv4i32_zext( %va, i32 ret %y } +define @vnsra_wi_i32_nxv4i32_zext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv4i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vnsra.wi v12, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsra_wv_nxv8i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv8i32_zext: ; CHECK: # %bb.0: @@ -223,3 +326,18 @@ define @vnsra_wx_i32_nxv8i32_zext( %va, i32 %y = trunc %x to ret %y } + +define @vnsra_wi_i32_nxv8i32_zext( %va) { +; CHECK-LABEL: vnsra_wi_i32_nxv8i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vnsra.wi v16, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = ashr %va, %vb + %y = trunc %x to + ret %y +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll index 4016d223c4407..70141889e10ab 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll @@ -30,6 +30,20 @@ define @vnsrl_wx_i32_nxv1i32_sext( %va, i32 ret %y } +define @vnsrl_wi_i32_nxv1i32_sext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vnsrl.wi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsrl_wv_nxv2i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv2i32_sext: ; CHECK: # %bb.0: @@ -58,6 +72,21 @@ define @vnsrl_wx_i32_nxv2i32_sext( %va, i32 ret %y } +define @vnsrl_wi_i32_nxv2i32_sext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vnsrl.wi v10, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsrl_wv_nxv4i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv4i32_sext: ; CHECK: # %bb.0: @@ -86,6 +115,21 @@ define @vnsrl_wx_i32_nxv4i32_sext( %va, i32 ret %y } +define @vnsrl_wi_i32_nxv4i32_sext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vnsrl.wi v12, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsrl_wv_nxv8i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv8i32_sext: ; CHECK: # %bb.0: @@ -114,6 +158,21 @@ define @vnsrl_wx_i32_nxv8i32_sext( %va, i32 ret %y } +define @vnsrl_wi_i32_nxv8i32_sext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_sext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vnsrl.wi v16, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = sext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsrl_wv_nxv1i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv1i32_zext: ; CHECK: # %bb.0: @@ -140,6 +199,20 @@ define @vnsrl_wx_i32_nxv1i32_zext( %va, i32 ret %y } +define @vnsrl_wi_i32_nxv1i32_zext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vnsrl.wi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsrl_wv_nxv2i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv2i32_zext: ; CHECK: # %bb.0: @@ -168,6 +241,21 @@ define @vnsrl_wx_i32_nxv2i32_zext( %va, i32 ret %y } +define @vnsrl_wi_i32_nxv2i32_zext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vnsrl.wi v10, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsrl_wv_nxv4i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv4i32_zext: ; CHECK: # %bb.0: @@ -196,6 +284,21 @@ define @vnsrl_wx_i32_nxv4i32_zext( %va, i32 ret %y } +define @vnsrl_wi_i32_nxv4i32_zext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vnsrl.wi v12, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +} + define @vnsrl_wv_nxv8i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv8i32_zext: ; CHECK: # %bb.0: @@ -223,3 +326,18 @@ define @vnsrl_wx_i32_nxv8i32_zext( %va, i32 %y = trunc %x to ret %y } + +define @vnsrl_wi_i32_nxv8i32_zext( %va) { +; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vnsrl.wi v16, v8, 15 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i32 15, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %vb = zext %splat to + %x = lshr %va, %vb + %y = trunc %x to + ret %y +}