76 changes: 38 additions & 38 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i
define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand All @@ -32,7 +32,7 @@ define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x ha
define <2 x half> @vfma_vf_v2f16(<2 x half> %va, half %b, <2 x half> %vc, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -60,7 +60,7 @@ declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i
define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand All @@ -83,7 +83,7 @@ define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x ha
define <4 x half> @vfma_vf_v4f16(<4 x half> %va, half %b, <4 x half> %vc, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -111,9 +111,9 @@ declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i
define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.fma.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 %evl)
ret <8 x half> %v
Expand All @@ -134,7 +134,7 @@ define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x ha
define <8 x half> @vfma_vf_v8f16(<8 x half> %va, half %b, <8 x half> %vc, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -162,9 +162,9 @@ declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <
define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.fma.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 %evl)
ret <16 x half> %v
Expand All @@ -185,7 +185,7 @@ define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16
define <16 x half> @vfma_vf_v16f16(<16 x half> %va, half %b, <16 x half> %vc, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -213,7 +213,7 @@ declare <2 x float> @llvm.vp.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, <2
define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand All @@ -236,7 +236,7 @@ define <2 x float> @vfma_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, <2 x
define <2 x float> @vfma_vf_v2f32(<2 x float> %va, float %b, <2 x float> %vc, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -264,9 +264,9 @@ declare <4 x float> @llvm.vp.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, <4
define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl)
ret <4 x float> %v
Expand All @@ -287,7 +287,7 @@ define <4 x float> @vfma_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, <4 x
define <4 x float> @vfma_vf_v4f32(<4 x float> %va, float %b, <4 x float> %vc, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -315,9 +315,9 @@ declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, <8
define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl)
ret <8 x float> %v
Expand All @@ -338,7 +338,7 @@ define <8 x float> @vfma_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, <8 x
define <8 x float> @vfma_vf_v8f32(<8 x float> %va, float %b, <8 x float> %vc, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -366,9 +366,9 @@ declare <16 x float> @llvm.vp.fma.v16f32(<16 x float>, <16 x float>, <16 x float
define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl)
ret <16 x float> %v
Expand All @@ -389,7 +389,7 @@ define <16 x float> @vfma_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
define <16 x float> @vfma_vf_v16f32(<16 x float> %va, float %b, <16 x float> %vc, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -417,9 +417,9 @@ declare <2 x double> @llvm.vp.fma.v2f64(<2 x double>, <2 x double>, <2 x double>
define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
Expand All @@ -440,7 +440,7 @@ define <2 x double> @vfma_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, <
define <2 x double> @vfma_vf_v2f64(<2 x double> %va, double %b, <2 x double> %vc, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -468,9 +468,9 @@ declare <4 x double> @llvm.vp.fma.v4f64(<4 x double>, <4 x double>, <4 x double>
define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
Expand All @@ -491,7 +491,7 @@ define <4 x double> @vfma_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, <
define <4 x double> @vfma_vf_v4f64(<4 x double> %va, double %b, <4 x double> %vc, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -519,9 +519,9 @@ declare <8 x double> @llvm.vp.fma.v8f64(<8 x double>, <8 x double>, <8 x double>
define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
Expand All @@ -542,7 +542,7 @@ define <8 x double> @vfma_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, <
define <8 x double> @vfma_vf_v8f64(<8 x double> %va, double %b, <8 x double> %vc, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -572,9 +572,9 @@ define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%v = call <15 x double> @llvm.vp.fma.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
Expand All @@ -601,9 +601,9 @@ define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
Expand All @@ -626,7 +626,7 @@ define <16 x double> @vfma_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %
define <16 x double> @vfma_vf_v16f64(<16 x double> %va, double %b, <16 x double> %vc, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfma_vf_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -705,7 +705,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 24
Expand Down Expand Up @@ -735,7 +735,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB50_4:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a4, e64, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
Expand All @@ -750,7 +750,7 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 40
; CHECK-NEXT: mul a0, a0, a1
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ define <32 x i8> @vpgather_baseidx_v32i8(i8* %base, <32 x i8> %idxs, <32 x i1> %
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: li a0, 32
; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; RV64-NEXT: vslideup.vi v8, v12, 16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs
Expand Down
38 changes: 19 additions & 19 deletions llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_0(<vscale x 4 x i8> %vec, <vscale
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 0
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
Expand All @@ -76,7 +76,7 @@ define <vscale x 4 x i8> @insert_nxv1i8_nxv4i8_3(<vscale x 4 x i8> %vec, <vscale
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
Expand Down Expand Up @@ -214,7 +214,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec,
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 0)
Expand All @@ -227,7 +227,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 1)
Expand All @@ -239,7 +239,7 @@ define <vscale x 16 x i32> @insert_nxv16i32_nxv1i32_6(<vscale x 16 x i32> %vec,
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v11, v16, 0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 6)
Expand All @@ -251,7 +251,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_0(<vscale x 16 x i8> %vec, <vsc
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v10, 0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
Expand All @@ -264,7 +264,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_1(<vscale x 16 x i8> %vec, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 1)
Expand All @@ -278,7 +278,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_2(<vscale x 16 x i8> %vec, <vsc
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 2)
Expand All @@ -293,7 +293,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_3(<vscale x 16 x i8> %vec, <vsc
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
Expand All @@ -307,7 +307,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_7(<vscale x 16 x i8> %vec, <vsc
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 7)
Expand All @@ -321,7 +321,7 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_15(<vscale x 16 x i8> %vec, <vs
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vx v9, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 15)
Expand All @@ -333,7 +333,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_0(<vscale x 32 x half> %vec
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 0)
Expand All @@ -346,7 +346,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_2(<vscale x 32 x half> %vec
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 2)
Expand All @@ -359,7 +359,7 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_26(<vscale x 32 x half> %ve
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vx v14, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 26)
Expand All @@ -382,7 +382,7 @@ define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_26(<vscale x 1 x half
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vx v22, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
Expand All @@ -395,7 +395,7 @@ define <vscale x 32 x i1> @insert_nxv32i1_nxv8i1_0(<vscale x 32 x i1> %v, <vscal
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v0, v8, 0
; CHECK-NEXT: ret
%vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 0)
Expand All @@ -408,7 +408,7 @@ define <vscale x 32 x i1> @insert_nxv32i1_nxv8i1_8(<vscale x 32 x i1> %v, <vscal
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v0, v8, a0
; CHECK-NEXT: ret
%vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 8)
Expand All @@ -427,7 +427,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_0(<vscale x 4 x i1> %v, <vscale x
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v0, v9, 0
Expand All @@ -450,7 +450,7 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_2(<vscale x 4 x i1> %v, <vscale x
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v9, v8, a0
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v0, v9, 0
Expand Down
60 changes: 30 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
Expand All @@ -30,7 +30,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
Expand All @@ -52,7 +52,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
Expand All @@ -65,7 +65,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
Expand All @@ -87,7 +87,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
Expand All @@ -100,7 +100,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
Expand All @@ -122,7 +122,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
Expand All @@ -135,7 +135,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
Expand All @@ -157,7 +157,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, hal
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
Expand All @@ -170,7 +170,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, hal
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
Expand All @@ -192,7 +192,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, hal
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
Expand All @@ -205,7 +205,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, hal
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
Expand All @@ -227,7 +227,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
Expand All @@ -240,7 +240,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
Expand All @@ -262,7 +262,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
Expand All @@ -275,7 +275,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
Expand All @@ -297,7 +297,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
Expand All @@ -310,7 +310,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
Expand All @@ -332,7 +332,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
Expand All @@ -345,7 +345,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
Expand All @@ -367,7 +367,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, f
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
Expand All @@ -380,7 +380,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, f
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
Expand All @@ -402,7 +402,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
Expand All @@ -415,7 +415,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
Expand All @@ -437,7 +437,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
Expand All @@ -450,7 +450,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
Expand All @@ -472,7 +472,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
Expand All @@ -485,7 +485,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
Expand All @@ -507,7 +507,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
Expand All @@ -520,7 +520,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
Expand Down
60 changes: 30 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-fp-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
Expand All @@ -30,7 +30,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
Expand All @@ -52,7 +52,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
Expand All @@ -65,7 +65,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
Expand All @@ -87,7 +87,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
Expand All @@ -100,7 +100,7 @@ define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
Expand All @@ -122,7 +122,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
Expand All @@ -135,7 +135,7 @@ define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
Expand All @@ -157,7 +157,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, hal
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
Expand All @@ -170,7 +170,7 @@ define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, hal
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
Expand All @@ -192,7 +192,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, hal
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
Expand All @@ -205,7 +205,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, hal
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
Expand All @@ -227,7 +227,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
Expand All @@ -240,7 +240,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
Expand All @@ -262,7 +262,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
Expand All @@ -275,7 +275,7 @@ define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
Expand All @@ -297,7 +297,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
Expand All @@ -310,7 +310,7 @@ define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
Expand All @@ -332,7 +332,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, floa
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
Expand All @@ -345,7 +345,7 @@ define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, floa
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
Expand All @@ -367,7 +367,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, f
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
Expand All @@ -380,7 +380,7 @@ define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, f
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
Expand All @@ -402,7 +402,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
Expand All @@ -415,7 +415,7 @@ define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
Expand All @@ -437,7 +437,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
Expand All @@ -450,7 +450,7 @@ define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
Expand All @@ -472,7 +472,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
Expand All @@ -485,7 +485,7 @@ define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
Expand All @@ -507,7 +507,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, do
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
Expand All @@ -520,7 +520,7 @@ define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, do
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ define <vscale x 1 x i1> @insertelt_nxv1i1(<vscale x 1 x i1> %x, i1 %elt) {
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: vsetivli zero, 3, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -27,7 +27,7 @@ define <vscale x 1 x i1> @insertelt_idx_nxv1i1(<vscale x 1 x i1> %x, i1 %elt, i6
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -44,7 +44,7 @@ define <vscale x 2 x i1> @insertelt_nxv2i1(<vscale x 2 x i1> %x, i1 %elt) {
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: vsetivli zero, 3, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -62,7 +62,7 @@ define <vscale x 2 x i1> @insertelt_idx_nxv2i1(<vscale x 2 x i1> %x, i1 %elt, i6
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -79,7 +79,7 @@ define <vscale x 4 x i1> @insertelt_nxv4i1(<vscale x 4 x i1> %x, i1 %elt) {
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -97,7 +97,7 @@ define <vscale x 4 x i1> @insertelt_idx_nxv4i1(<vscale x 4 x i1> %x, i1 %elt, i6
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -114,7 +114,7 @@ define <vscale x 8 x i1> @insertelt_nxv8i1(<vscale x 8 x i1> %x, i1 %elt) {
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: vsetivli zero, 3, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 3, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -132,7 +132,7 @@ define <vscale x 8 x i1> @insertelt_idx_nxv8i1(<vscale x 8 x i1> %x, i1 %elt, i6
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
Expand All @@ -149,7 +149,7 @@ define <vscale x 16 x i1> @insertelt_nxv16i1(<vscale x 16 x i1> %x, i1 %elt) {
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: vsetivli zero, 3, e8, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 3, e8, m2, tu, mu
; CHECK-NEXT: vslideup.vi v10, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vand.vi v8, v10, 1
Expand All @@ -167,7 +167,7 @@ define <vscale x 16 x i1> @insertelt_idx_nxv16i1(<vscale x 16 x i1> %x, i1 %elt,
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: addi a0, a1, 1
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vslideup.vx v10, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vand.vi v8, v10, 1
Expand All @@ -184,7 +184,7 @@ define <vscale x 32 x i1> @insertelt_nxv32i1(<vscale x 32 x i1> %x, i1 %elt) {
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
; CHECK-NEXT: vsetivli zero, 3, e8, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 3, e8, m4, tu, mu
; CHECK-NEXT: vslideup.vi v12, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vand.vi v8, v12, 1
Expand All @@ -202,7 +202,7 @@ define <vscale x 32 x i1> @insertelt_idx_nxv32i1(<vscale x 32 x i1> %x, i1 %elt,
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
; CHECK-NEXT: addi a0, a1, 1
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vslideup.vx v12, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vand.vi v8, v12, 1
Expand All @@ -219,7 +219,7 @@ define <vscale x 64 x i1> @insertelt_nxv64i1(<vscale x 64 x i1> %x, i1 %elt) {
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: vsetivli zero, 3, e8, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 3, e8, m8, tu, mu
; CHECK-NEXT: vslideup.vi v16, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vand.vi v8, v16, 1
Expand All @@ -237,7 +237,7 @@ define <vscale x 64 x i1> @insertelt_idx_nxv64i1(<vscale x 64 x i1> %x, i1 %elt,
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: addi a0, a1, 1
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vslideup.vx v16, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vand.vi v8, v16, 1
Expand Down
104 changes: 52 additions & 52 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll

Large diffs are not rendered by default.

88 changes: 44 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2521,7 +2521,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: add a1, a0, a0
; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; RV32-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
; RV32-NEXT: vslideup.vx v0, v24, a0
; RV32-NEXT: ret
;
Expand All @@ -2534,7 +2534,7 @@ define <vscale x 16 x i1> @fcmp_oeq_vf_nx16f64(<vscale x 16 x double> %va) {
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: srli a0, a0, 3
; RV64-NEXT: add a1, a0, a0
; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; RV64-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
; RV64-NEXT: vslideup.vx v0, v24, a0
; RV64-NEXT: ret
%vc = fcmp oeq <vscale x 16 x double> %va, zeroinitializer
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3235,7 +3235,7 @@ define <vscale x 16 x i1> @icmp_eq_vi_nx16i64(<vscale x 16 x i64> %va) {
; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; CHECK-NEXT: vmseq.vi v24, v16, 0
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
; CHECK-NEXT: vslideup.vx v0, v24, a0
; CHECK-NEXT: ret
%vc = icmp eq <vscale x 16 x i64> %va, zeroinitializer
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3693,7 +3693,7 @@ define void @sink_splat_vp_fma(float* noalias nocapture %a, float* nocapture rea
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3736,7 +3736,7 @@ define void @sink_splat_vp_fma_commute(float* noalias nocapture %a, float* nocap
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, mu
; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down
222 changes: 111 additions & 111 deletions llvm/test/CodeGen/RISCV/rvv/vector-splice.ll

Large diffs are not rendered by default.

122 changes: 61 additions & 61 deletions llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll

Large diffs are not rendered by default.