36 changes: 18 additions & 18 deletions llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x
; CHECK-NEXT: vfabs.v v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -57,8 +57,8 @@ define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x
; CHECK-NEXT: vfabs.v v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -99,8 +99,8 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
; CHECK-NEXT: vfabs.v v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -142,8 +142,8 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -186,8 +186,8 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -230,8 +230,8 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -273,8 +273,8 @@ define <vscale x 1 x float> @vp_floor_nxv1f32(<vscale x 1 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -315,8 +315,8 @@ define <vscale x 2 x float> @vp_floor_nxv2f32(<vscale x 2 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -358,8 +358,8 @@ define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -402,8 +402,8 @@ define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -446,8 +446,8 @@ define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vsca
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -489,8 +489,8 @@ define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -532,8 +532,8 @@ define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -576,8 +576,8 @@ define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -620,8 +620,8 @@ define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -664,8 +664,8 @@ define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -726,8 +726,8 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: vfabs.v v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a2, 2
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
; CHECK-NEXT: fsrm a2
Expand All @@ -750,8 +750,8 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down
84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/frm-insert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,19 +13,19 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
define <vscale x 1 x float> @test(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: vfadd.vv v8, v8, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
;
; UNOPT-LABEL: test:
; UNOPT: # %bb.0: # %entry
; UNOPT-NEXT: fsrmi a1, 0
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v8
; UNOPT-NEXT: fsrm a0
Expand All @@ -48,20 +48,20 @@ entry:
define <vscale x 1 x float> @test2(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: fsrmi 1
; CHECK-NEXT: vfadd.vv v8, v8, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
;
; UNOPT-LABEL: test2:
; UNOPT: # %bb.0: # %entry
; UNOPT-NEXT: fsrmi a1, 0
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: fsrmi a0, 1
; UNOPT-NEXT: vfadd.vv v8, v8, v8
; UNOPT-NEXT: fsrm a0
Expand Down Expand Up @@ -132,12 +132,12 @@ define <vscale x 1 x float> @before_call1(<vscale x 1 x float> %0, <vscale x 1 x
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: addi a1, sp, 32
; CHECK-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: addi a0, sp, 32
; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: call foo
; CHECK-NEXT: addi a0, sp, 32
; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
Expand All @@ -155,12 +155,12 @@ define <vscale x 1 x float> @before_call1(<vscale x 1 x float> %0, <vscale x 1 x
; UNOPT-NEXT: csrr a1, vlenb
; UNOPT-NEXT: slli a1, a1, 1
; UNOPT-NEXT: sub sp, sp, a1
; UNOPT-NEXT: fsrmi a1, 0
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: addi a1, sp, 32
; UNOPT-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: addi a0, sp, 32
; UNOPT-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: call foo
; UNOPT-NEXT: addi a0, sp, 32
; UNOPT-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
Expand Down Expand Up @@ -240,12 +240,12 @@ define <vscale x 1 x float> @after_call1(<vscale x 1 x float> %0, <vscale x 1 x
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: addi a1, sp, 32
; CHECK-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: addi a0, sp, 32
; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: call foo
; CHECK-NEXT: addi a0, sp, 32
; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
Expand All @@ -263,12 +263,12 @@ define <vscale x 1 x float> @after_call1(<vscale x 1 x float> %0, <vscale x 1 x
; UNOPT-NEXT: csrr a1, vlenb
; UNOPT-NEXT: slli a1, a1, 1
; UNOPT-NEXT: sub sp, sp, a1
; UNOPT-NEXT: fsrmi a1, 0
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: addi a1, sp, 32
; UNOPT-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: addi a0, sp, 32
; UNOPT-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: call foo
; UNOPT-NEXT: addi a0, sp, 32
; UNOPT-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
Expand Down Expand Up @@ -360,20 +360,20 @@ entry:
define <vscale x 1 x float> @before_asm1(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: before_asm1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: ret
;
; UNOPT-LABEL: before_asm1:
; UNOPT: # %bb.0: # %entry
; UNOPT-NEXT: fsrmi a1, 0
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: #APP
; UNOPT-NEXT: #NO_APP
; UNOPT-NEXT: ret
Expand Down Expand Up @@ -416,20 +416,20 @@ entry:
define <vscale x 1 x float> @after_asm1(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: after_asm1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: ret
;
; UNOPT-LABEL: after_asm1:
; UNOPT: # %bb.0: # %entry
; UNOPT-NEXT: fsrmi a1, 0
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: #APP
; UNOPT-NEXT: #NO_APP
; UNOPT-NEXT: ret
Expand Down Expand Up @@ -476,10 +476,10 @@ declare i32 @llvm.get.rounding()
define <vscale x 1 x float> @test5(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, ptr %p) nounwind {
; CHECK-LABEL: test5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a2, 0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a2
; CHECK-NEXT: frrm a0
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: lui a2, 66
Expand All @@ -492,10 +492,10 @@ define <vscale x 1 x float> @test5(<vscale x 1 x float> %0, <vscale x 1 x float>
;
; UNOPT-LABEL: test5:
; UNOPT: # %bb.0: # %entry
; UNOPT-NEXT: fsrmi a2, 0
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 0
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: fsrm a2
; UNOPT-NEXT: frrm a0
; UNOPT-NEXT: slli a0, a0, 2
; UNOPT-NEXT: lui a2, 66
Expand Down Expand Up @@ -559,10 +559,10 @@ define <vscale x 1 x float> @after_fsrm2(<vscale x 1 x float> %0, <vscale x 1 x
; UNOPT-LABEL: after_fsrm2:
; UNOPT: # %bb.0: # %entry
; UNOPT-NEXT: fsrmi 4
; UNOPT-NEXT: fsrmi a1, 4
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 4
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: ret
entry:
call void @llvm.set.rounding(i32 4)
Expand All @@ -579,19 +579,19 @@ define <vscale x 1 x float> @after_fsrm3(<vscale x 1 x float> %0, <vscale x 1 x
; CHECK-LABEL: after_fsrm3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi 4
; CHECK-NEXT: fsrmi a1, 5
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 5
; CHECK-NEXT: vfadd.vv v8, v8, v9
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
;
; UNOPT-LABEL: after_fsrm3:
; UNOPT: # %bb.0: # %entry
; UNOPT-NEXT: fsrmi 4
; UNOPT-NEXT: fsrmi a1, 5
; UNOPT-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; UNOPT-NEXT: fsrmi a0, 5
; UNOPT-NEXT: vfadd.vv v8, v8, v9
; UNOPT-NEXT: fsrm a0
; UNOPT-NEXT: fsrm a1
; UNOPT-NEXT: ret
entry:
call void @llvm.set.rounding(i32 4)
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -249,8 +249,8 @@ declare <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half>)
define <vscale x 1 x i8> @ceil_nxv1f16_to_si8(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_si8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfncvt.x.f.w v9, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
Expand All @@ -263,8 +263,8 @@ define <vscale x 1 x i8> @ceil_nxv1f16_to_si8(<vscale x 1 x half> %x) {
define <vscale x 1 x i8> @ceil_nxv1f16_to_ui8(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_ui8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfncvt.xu.f.w v9, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
Expand All @@ -277,8 +277,8 @@ define <vscale x 1 x i8> @ceil_nxv1f16_to_ui8(<vscale x 1 x half> %x) {
define <vscale x 1 x i16> @ceil_nxv1f16_to_si16(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_si16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
Expand All @@ -290,8 +290,8 @@ define <vscale x 1 x i16> @ceil_nxv1f16_to_si16(<vscale x 1 x half> %x) {
define <vscale x 1 x i16> @ceil_nxv1f16_to_ui16(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_ui16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfcvt.xu.f.v v8, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
Expand All @@ -303,8 +303,8 @@ define <vscale x 1 x i16> @ceil_nxv1f16_to_ui16(<vscale x 1 x half> %x) {
define <vscale x 1 x i32> @ceil_nxv1f16_to_si32(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_si32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v9, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
Expand All @@ -317,8 +317,8 @@ define <vscale x 1 x i32> @ceil_nxv1f16_to_si32(<vscale x 1 x half> %x) {
define <vscale x 1 x i32> @ceil_nxv1f16_to_ui32(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_ui32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvt.xu.f.v v9, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
Expand Down Expand Up @@ -451,8 +451,8 @@ declare <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half>)
define <vscale x 4 x i8> @ceil_nxv4f16_to_si8(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_si8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vfncvt.x.f.w v9, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
Expand All @@ -465,8 +465,8 @@ define <vscale x 4 x i8> @ceil_nxv4f16_to_si8(<vscale x 4 x half> %x) {
define <vscale x 4 x i8> @ceil_nxv4f16_to_ui8(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_ui8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vfncvt.xu.f.w v9, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv1r.v v8, v9
Expand All @@ -479,8 +479,8 @@ define <vscale x 4 x i8> @ceil_nxv4f16_to_ui8(<vscale x 4 x half> %x) {
define <vscale x 4 x i16> @ceil_nxv4f16_to_si16(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_si16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
Expand All @@ -492,8 +492,8 @@ define <vscale x 4 x i16> @ceil_nxv4f16_to_si16(<vscale x 4 x half> %x) {
define <vscale x 4 x i16> @ceil_nxv4f16_to_ui16(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_ui16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfcvt.xu.f.v v8, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: ret
Expand All @@ -505,8 +505,8 @@ define <vscale x 4 x i16> @ceil_nxv4f16_to_ui16(<vscale x 4 x half> %x) {
define <vscale x 4 x i32> @ceil_nxv4f16_to_si32(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_si32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v10, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv2r.v v8, v10
Expand All @@ -519,8 +519,8 @@ define <vscale x 4 x i32> @ceil_nxv4f16_to_si32(<vscale x 4 x half> %x) {
define <vscale x 4 x i32> @ceil_nxv4f16_to_ui32(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_ui32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vfwcvt.xu.f.v v10, v8
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vmv2r.v v8, v10
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
Original file line number Diff line number Diff line change
Expand Up @@ -516,8 +516,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: csrwi vxrm, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vaadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -543,8 +543,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -570,8 +570,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vssrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -597,8 +597,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vnclip.wv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -489,8 +489,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -515,8 +515,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -541,8 +541,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -567,8 +567,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
Original file line number Diff line number Diff line change
Expand Up @@ -489,8 +489,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -515,8 +515,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -541,8 +541,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -567,8 +567,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -489,8 +489,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: csrwi vxrm, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -515,8 +515,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -541,8 +541,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand All @@ -567,8 +567,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
Expand Down
52 changes: 26 additions & 26 deletions llvm/test/CodeGen/RISCV/rvv/round-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -38,8 +38,8 @@ define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
Expand Down Expand Up @@ -101,8 +101,8 @@ define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -120,8 +120,8 @@ define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
Expand Down Expand Up @@ -183,8 +183,8 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -203,8 +203,8 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -268,8 +268,8 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand All @@ -289,8 +289,8 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -354,8 +354,8 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand All @@ -375,8 +375,8 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -440,8 +440,8 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand Down Expand Up @@ -479,8 +479,8 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a2, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
Expand All @@ -501,8 +501,8 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -567,8 +567,8 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a2, 4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
Expand Down Expand Up @@ -615,8 +615,8 @@ define <vscale x 1 x float> @vp_round_nxv1f32(<vscale x 1 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -657,8 +657,8 @@ define <vscale x 2 x float> @vp_round_nxv2f32(<vscale x 2 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -700,8 +700,8 @@ define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -744,8 +744,8 @@ define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -788,8 +788,8 @@ define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vsca
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -831,8 +831,8 @@ define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -874,8 +874,8 @@ define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -918,8 +918,8 @@ define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -962,8 +962,8 @@ define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -1006,8 +1006,8 @@ define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscal
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -1068,8 +1068,8 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: vfabs.v v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a2, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
; CHECK-NEXT: fsrm a2
Expand All @@ -1092,8 +1092,8 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down
52 changes: 26 additions & 26 deletions llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscal
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -38,8 +38,8 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscal
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
Expand Down Expand Up @@ -101,8 +101,8 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscal
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -120,8 +120,8 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscal
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
Expand Down Expand Up @@ -183,8 +183,8 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -203,8 +203,8 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -268,8 +268,8 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand All @@ -289,8 +289,8 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -354,8 +354,8 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand All @@ -375,8 +375,8 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -440,8 +440,8 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand Down Expand Up @@ -479,8 +479,8 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a2, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
Expand All @@ -501,8 +501,8 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -567,8 +567,8 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a2, 0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
Expand Down Expand Up @@ -615,8 +615,8 @@ define <vscale x 1 x float> @vp_roundeven_nxv1f32(<vscale x 1 x float> %va, <vsc
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -657,8 +657,8 @@ define <vscale x 2 x float> @vp_roundeven_nxv2f32(<vscale x 2 x float> %va, <vsc
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -700,8 +700,8 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vsc
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -744,8 +744,8 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vsc
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -788,8 +788,8 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -831,8 +831,8 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <v
; CHECK-NEXT: vfabs.v v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -874,8 +874,8 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <v
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -918,8 +918,8 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <v
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -962,8 +962,8 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <v
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -1006,8 +1006,8 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <v
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -1068,8 +1068,8 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: vfabs.v v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a2, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
; CHECK-NEXT: fsrm a2
Expand All @@ -1092,8 +1092,8 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down
52 changes: 26 additions & 26 deletions llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vsc
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -38,8 +38,8 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vsc
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
Expand Down Expand Up @@ -101,8 +101,8 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vsc
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -120,8 +120,8 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vsc
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v0, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
Expand Down Expand Up @@ -183,8 +183,8 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand All @@ -203,8 +203,8 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -268,8 +268,8 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand All @@ -289,8 +289,8 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -354,8 +354,8 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand All @@ -375,8 +375,8 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -440,8 +440,8 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
Expand Down Expand Up @@ -479,8 +479,8 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a2, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
Expand All @@ -501,8 +501,8 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a0, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a0
Expand Down Expand Up @@ -567,8 +567,8 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: fsrmi a2, 1
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
Expand Down Expand Up @@ -615,8 +615,8 @@ define <vscale x 1 x float> @vp_roundtozero_nxv1f32(<vscale x 1 x float> %va, <v
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -657,8 +657,8 @@ define <vscale x 2 x float> @vp_roundtozero_nxv2f32(<vscale x 2 x float> %va, <v
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -700,8 +700,8 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <v
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -744,8 +744,8 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <v
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -788,8 +788,8 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va,
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -831,8 +831,8 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va,
; CHECK-NEXT: vfabs.v v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
Expand Down Expand Up @@ -874,8 +874,8 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va,
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -918,8 +918,8 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va,
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -962,8 +962,8 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va,
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -1006,8 +1006,8 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va,
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down Expand Up @@ -1068,8 +1068,8 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: vfabs.v v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a2, 1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
; CHECK-NEXT: fsrm a2
Expand All @@ -1092,8 +1092,8 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,10 +215,10 @@ declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2
define <vscale x 2 x i32> @vmerge_vfcvt_rm(<vscale x 2 x i32> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmerge_vfcvt_rm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 2
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%floor = call <vscale x 2 x float> @llvm.floor.nxv2f32(<vscale x 2 x float> %a)
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
Original file line number Diff line number Diff line change
Expand Up @@ -919,8 +919,8 @@ entry:
define <vscale x 1 x i16> @test_vaaddu(<vscale x 1 x i16> %var_11, i16 zeroext %var_9, <vscale x 1 x i1> %var_5, <vscale x 1 x i16> %var_0) {
; CHECK-LABEL: test_vaaddu:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 3, e16, mf4, ta, mu
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetivli zero, 3, e16, mf4, ta, mu
; CHECK-NEXT: vaaddu.vx v9, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand Down Expand Up @@ -966,13 +966,13 @@ declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
define <vscale x 2 x float> @vfredusum(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl) {
; CHECK-LABEL: vfredusum:
; CHECK: # %bb.0:
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vfredusum.vs v11, v9, v10
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
; CHECK-NEXT: vmerge.vvm v8, v8, v11, v0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
%a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
<vscale x 2 x float> %passthru,
Expand Down Expand Up @@ -1002,10 +1002,10 @@ define <vscale x 2 x i32> @vredsum_allones_mask(<vscale x 2 x i32> %passthru, <v
define <vscale x 2 x float> @vfredusum_allones_mask(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, i64 %vl) {
; CHECK-LABEL: vfredusum_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vfredusum.vs v8, v9, v10
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
%a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
<vscale x 2 x float> %passthru,
Expand Down Expand Up @@ -1136,10 +1136,10 @@ define <vscale x 2 x i64> @vpmerge_vwsub.w_tied(<vscale x 2 x i64> %passthru, <v
define <vscale x 2 x double> @vpmerge_vfwsub.w_tied(<vscale x 2 x double> %passthru, <vscale x 2 x double> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %mask, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vfwsub.w_tied:
; CHECK: # %bb.0:
; CHECK-NEXT: fsrmi a1, 1
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: fsrmi a0, 1
; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
%vl.zext = zext i32 %vl to i64
%a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(<vscale x 2 x double> %passthru, <vscale x 2 x double> %passthru, <vscale x 2 x float> %y, i64 1, i64 %vl.zext)
Expand Down
40 changes: 20 additions & 20 deletions llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen(
define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
entry:
Expand All @@ -39,10 +39,10 @@ declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXL
define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen(
Expand All @@ -64,10 +64,10 @@ declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen(
define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
entry:
Expand All @@ -90,10 +90,10 @@ declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXL
define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen(
Expand All @@ -115,10 +115,10 @@ declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen(
define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v10, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
entry:
Expand All @@ -141,10 +141,10 @@ declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXL
define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v10, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen(
Expand All @@ -166,10 +166,10 @@ declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen(
define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v12, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
Expand All @@ -192,10 +192,10 @@ declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXL
define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v12, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen(
Expand All @@ -217,10 +217,10 @@ declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen
define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v16, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
Expand All @@ -243,10 +243,10 @@ declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.
define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.x.f.qf v8, v16, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen(
Expand Down
40 changes: 20 additions & 20 deletions llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
entry:
Expand All @@ -39,10 +39,10 @@ declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iX
define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
Expand All @@ -64,10 +64,10 @@ declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v9, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
entry:
Expand All @@ -90,10 +90,10 @@ declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iX
define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
Expand All @@ -115,10 +115,10 @@ declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v10, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
entry:
Expand All @@ -141,10 +141,10 @@ declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iX
define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v10, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
Expand All @@ -166,10 +166,10 @@ declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v12, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
entry:
Expand All @@ -192,10 +192,10 @@ declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iX
define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v12, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
Expand All @@ -217,10 +217,10 @@ declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLe
define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v16, v8, fa0
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
entry:
Expand All @@ -243,10 +243,10 @@ declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32
define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fsrmi a1, 0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: sf.vfnrclip.xu.f.qf v8, v16, fa0, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: fsrm a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
Expand Down
22 changes: 11 additions & 11 deletions llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vaadd.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand All @@ -133,8 +133,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vaaddu.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand Down Expand Up @@ -199,8 +199,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.rm.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vasub.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand All @@ -222,8 +222,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vasubu.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand Down Expand Up @@ -950,8 +950,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vnclip.wv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand All @@ -974,8 +974,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vnclipu.wv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand Down Expand Up @@ -1271,8 +1271,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vsmul.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand Down Expand Up @@ -1301,16 +1301,16 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
; RV32-NEXT: csrwi vxrm, 0
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
; RV32-NEXT: vsmul.vv v8, v9, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
; RV64-NEXT: csrwi vxrm, 0
; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
; RV64-NEXT: vsmul.vx v8, v9, a0
; RV64-NEXT: ret
entry:
Expand Down Expand Up @@ -1376,8 +1376,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vssra.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand All @@ -1400,8 +1400,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vssrl.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
Expand Down
Loading