50 changes: 25 additions & 25 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
Expand All @@ -31,7 +31,7 @@ define <2 x half> @vfsub_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -59,7 +59,7 @@ declare <3 x half> @llvm.vp.fsub.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v3f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <3 x half> @llvm.vp.fsub.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
Expand All @@ -71,7 +71,7 @@ declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
Expand All @@ -93,7 +93,7 @@ define <4 x half> @vfsub_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -121,7 +121,7 @@ declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
Expand All @@ -143,7 +143,7 @@ define <8 x half> @vfsub_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -171,7 +171,7 @@ declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i3
define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
Expand All @@ -193,7 +193,7 @@ define <16 x half> @vfsub_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -221,7 +221,7 @@ declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
Expand All @@ -243,7 +243,7 @@ define <2 x float> @vfsub_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -271,7 +271,7 @@ declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
Expand All @@ -293,7 +293,7 @@ define <4 x float> @vfsub_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -321,7 +321,7 @@ declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
Expand All @@ -343,7 +343,7 @@ define <8 x float> @vfsub_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -371,7 +371,7 @@ declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>,
define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
Expand All @@ -393,7 +393,7 @@ define <16 x float> @vfsub_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
Expand Down Expand Up @@ -421,7 +421,7 @@ declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i
define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
Expand All @@ -443,7 +443,7 @@ define <2 x double> @vfsub_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -471,7 +471,7 @@ declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i
define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
Expand All @@ -493,7 +493,7 @@ define <4 x double> @vfsub_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -521,7 +521,7 @@ declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i
define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
Expand All @@ -543,7 +543,7 @@ define <8 x double> @vfsub_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -571,7 +571,7 @@ declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i
define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
Expand All @@ -593,7 +593,7 @@ define <16 x double> @vfsub_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
define <16 x double> @vfsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v16f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
Expand Down
100 changes: 50 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll

Large diffs are not rendered by default.

100 changes: 50 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll

Large diffs are not rendered by default.

100 changes: 50 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll

Large diffs are not rendered by default.

100 changes: 50 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll

Large diffs are not rendered by default.

80 changes: 40 additions & 40 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll

Large diffs are not rendered by default.

114 changes: 57 additions & 57 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll

Large diffs are not rendered by default.

402 changes: 201 additions & 201 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll

Large diffs are not rendered by default.

56 changes: 28 additions & 28 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ declare <2 x i8> @llvm.vp.load.v2i8.p0v2i8(<2 x i8>*, <2 x i1>, i32)
define <2 x i8> @vpload_v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i8> @llvm.vp.load.v2i8.p0v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 %evl)
Expand All @@ -21,7 +21,7 @@ declare <3 x i8> @llvm.vp.load.v3i8.p0v3i8(<3 x i8>*, <3 x i1>, i32)
define <3 x i8> @vpload_v3i8(<3 x i8>* %ptr, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v3i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <3 x i8> @llvm.vp.load.v3i8.p0v3i8(<3 x i8>* %ptr, <3 x i1> %m, i32 %evl)
Expand All @@ -33,7 +33,7 @@ declare <4 x i8> @llvm.vp.load.v4i8.p0v4i8(<4 x i8>*, <4 x i1>, i32)
define <4 x i8> @vpload_v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i8> @llvm.vp.load.v4i8.p0v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 %evl)
Expand All @@ -57,7 +57,7 @@ declare <8 x i8> @llvm.vp.load.v8i8.p0v8i8(<8 x i8>*, <8 x i1>, i32)
define <8 x i8> @vpload_v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i8> @llvm.vp.load.v8i8.p0v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 %evl)
Expand All @@ -69,7 +69,7 @@ declare <2 x i16> @llvm.vp.load.v2i16.p0v2i16(<2 x i16>*, <2 x i1>, i32)
define <2 x i16> @vpload_v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i16> @llvm.vp.load.v2i16.p0v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 %evl)
Expand All @@ -81,7 +81,7 @@ declare <4 x i16> @llvm.vp.load.v4i16.p0v4i16(<4 x i16>*, <4 x i1>, i32)
define <4 x i16> @vpload_v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i16> @llvm.vp.load.v4i16.p0v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 %evl)
Expand All @@ -93,7 +93,7 @@ declare <8 x i16> @llvm.vp.load.v8i16.p0v8i16(<8 x i16>*, <8 x i1>, i32)
define <8 x i16> @vpload_v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i16> @llvm.vp.load.v8i16.p0v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 %evl)
Expand All @@ -117,7 +117,7 @@ declare <2 x i32> @llvm.vp.load.v2i32.p0v2i32(<2 x i32>*, <2 x i1>, i32)
define <2 x i32> @vpload_v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i32> @llvm.vp.load.v2i32.p0v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 %evl)
Expand All @@ -129,7 +129,7 @@ declare <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>*, <4 x i1>, i32)
define <4 x i32> @vpload_v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 %evl)
Expand All @@ -141,7 +141,7 @@ declare <6 x i32> @llvm.vp.load.v6i32.p0v6i32(<6 x i32>*, <6 x i1>, i32)
define <6 x i32> @vpload_v6i32(<6 x i32>* %ptr, <6 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v6i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <6 x i32> @llvm.vp.load.v6i32.p0v6i32(<6 x i32>* %ptr, <6 x i1> %m, i32 %evl)
Expand All @@ -165,7 +165,7 @@ declare <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>*, <8 x i1>, i32)
define <8 x i32> @vpload_v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 %evl)
Expand All @@ -189,7 +189,7 @@ declare <2 x i64> @llvm.vp.load.v2i64.p0v2i64(<2 x i64>*, <2 x i1>, i32)
define <2 x i64> @vpload_v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i64> @llvm.vp.load.v2i64.p0v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 %evl)
Expand All @@ -201,7 +201,7 @@ declare <4 x i64> @llvm.vp.load.v4i64.p0v4i64(<4 x i64>*, <4 x i1>, i32)
define <4 x i64> @vpload_v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i64> @llvm.vp.load.v4i64.p0v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 %evl)
Expand All @@ -225,7 +225,7 @@ declare <8 x i64> @llvm.vp.load.v8i64.p0v8i64(<8 x i64>*, <8 x i1>, i32)
define <8 x i64> @vpload_v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i64> @llvm.vp.load.v8i64.p0v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 %evl)
Expand All @@ -237,7 +237,7 @@ declare <2 x half> @llvm.vp.load.v2f16.p0v2f16(<2 x half>*, <2 x i1>, i32)
define <2 x half> @vpload_v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x half> @llvm.vp.load.v2f16.p0v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 %evl)
Expand All @@ -261,7 +261,7 @@ declare <4 x half> @llvm.vp.load.v4f16.p0v4f16(<4 x half>*, <4 x i1>, i32)
define <4 x half> @vpload_v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x half> @llvm.vp.load.v4f16.p0v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 %evl)
Expand All @@ -273,7 +273,7 @@ declare <8 x half> @llvm.vp.load.v8f16.p0v8f16(<8 x half>*, <8 x i1>, i32)
define <8 x half> @vpload_v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x half> @llvm.vp.load.v8f16.p0v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 %evl)
Expand All @@ -285,7 +285,7 @@ declare <2 x float> @llvm.vp.load.v2f32.p0v2f32(<2 x float>*, <2 x i1>, i32)
define <2 x float> @vpload_v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x float> @llvm.vp.load.v2f32.p0v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 %evl)
Expand All @@ -297,7 +297,7 @@ declare <4 x float> @llvm.vp.load.v4f32.p0v4f32(<4 x float>*, <4 x i1>, i32)
define <4 x float> @vpload_v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x float> @llvm.vp.load.v4f32.p0v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 %evl)
Expand All @@ -309,7 +309,7 @@ declare <8 x float> @llvm.vp.load.v8f32.p0v8f32(<8 x float>*, <8 x i1>, i32)
define <8 x float> @vpload_v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x float> @llvm.vp.load.v8f32.p0v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 %evl)
Expand All @@ -333,7 +333,7 @@ declare <2 x double> @llvm.vp.load.v2f64.p0v2f64(<2 x double>*, <2 x i1>, i32)
define <2 x double> @vpload_v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x double> @llvm.vp.load.v2f64.p0v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 %evl)
Expand All @@ -345,7 +345,7 @@ declare <4 x double> @llvm.vp.load.v4f64.p0v4f64(<4 x double>*, <4 x i1>, i32)
define <4 x double> @vpload_v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x double> @llvm.vp.load.v4f64.p0v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 %evl)
Expand All @@ -369,7 +369,7 @@ declare <8 x double> @llvm.vp.load.v8f64.p0v8f64(<8 x double>*, <8 x i1>, i32)
define <8 x double> @vpload_v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x double> @llvm.vp.load.v8f64.p0v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 %evl)
Expand All @@ -391,14 +391,14 @@ define <32 x double> @vpload_v32f64(<32 x double>* %ptr, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
; CHECK-NEXT: addi a3, a0, 128
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a3), v0.t
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: bltu a1, a2, .LBB31_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB31_4:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
Expand Down Expand Up @@ -429,7 +429,7 @@ define <33 x double> @vpload_v33f64(<33 x double>* %ptr, <33 x i1> %m, i32 zeroe
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
; CHECK-NEXT: addi a5, a1, 128
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a5), v0.t
; CHECK-NEXT: addi a5, a2, -32
; CHECK-NEXT: li a4, 0
Expand All @@ -445,13 +445,13 @@ define <33 x double> @vpload_v33f64(<33 x double>* %ptr, <33 x i1> %m, i32 zeroe
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 4
; CHECK-NEXT: addi a5, a1, 256
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a5), v0.t
; CHECK-NEXT: bltu a3, a2, .LBB32_10
; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB32_10:
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a1), v0.t
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
Expand Down
76 changes: 38 additions & 38 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll

Large diffs are not rendered by default.

76 changes: 38 additions & 38 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll

Large diffs are not rendered by default.

72 changes: 36 additions & 36 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll

Large diffs are not rendered by default.

108 changes: 54 additions & 54 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll

Large diffs are not rendered by default.

108 changes: 54 additions & 54 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll

Large diffs are not rendered by default.

108 changes: 54 additions & 54 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll

Large diffs are not rendered by default.

78 changes: 39 additions & 39 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll

Large diffs are not rendered by default.

148 changes: 74 additions & 74 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_26(<vscale x 1 x half
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v22, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
define <vscale x 1 x half> @masked_load_nxv1f16(<vscale x 1 x half>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
Expand All @@ -16,7 +16,7 @@ declare <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>*, i32,
define <vscale x 1 x float> @masked_load_nxv1f32(<vscale x 1 x float>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
Expand All @@ -27,7 +27,7 @@ declare <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>*, i3
define <vscale x 1 x double> @masked_load_nxv1f64(<vscale x 1 x double>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
Expand All @@ -38,7 +38,7 @@ declare <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>*,
define <vscale x 2 x half> @masked_load_nxv2f16(<vscale x 2 x half>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
Expand All @@ -49,7 +49,7 @@ declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>*, i32,
define <vscale x 2 x float> @masked_load_nxv2f32(<vscale x 2 x float>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
Expand All @@ -60,7 +60,7 @@ declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>*, i3
define <vscale x 2 x double> @masked_load_nxv2f64(<vscale x 2 x double>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
Expand All @@ -71,7 +71,7 @@ declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>*,
define <vscale x 4 x half> @masked_load_nxv4f16(<vscale x 4 x half>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
Expand All @@ -82,7 +82,7 @@ declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>*, i32,
define <vscale x 4 x float> @masked_load_nxv4f32(<vscale x 4 x float>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
Expand All @@ -93,7 +93,7 @@ declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>*, i3
define <vscale x 4 x double> @masked_load_nxv4f64(<vscale x 4 x double>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
Expand All @@ -104,7 +104,7 @@ declare <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>*,
define <vscale x 8 x half> @masked_load_nxv8f16(<vscale x 8 x half>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
Expand All @@ -115,7 +115,7 @@ declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>*, i32,
define <vscale x 8 x float> @masked_load_nxv8f32(<vscale x 8 x float>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
Expand All @@ -126,7 +126,7 @@ declare <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>*, i3
define <vscale x 8 x double> @masked_load_nxv8f64(<vscale x 8 x double>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
Expand All @@ -137,7 +137,7 @@ declare <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>*,
define <vscale x 16 x half> @masked_load_nxv16f16(<vscale x 16 x half>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
Expand All @@ -148,7 +148,7 @@ declare <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>*, i
define <vscale x 16 x float> @masked_load_nxv16f32(<vscale x 16 x float>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
Expand All @@ -159,7 +159,7 @@ declare <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>*,
define <vscale x 32 x half> @masked_load_nxv32f16(<vscale x 32 x half>* %a, <vscale x 32 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
Expand Down
44 changes: 22 additions & 22 deletions llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
define <vscale x 1 x i8> @masked_load_nxv1i8(<vscale x 1 x i8>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(<vscale x 1 x i8>* %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
Expand All @@ -16,7 +16,7 @@ declare <vscale x 1 x i8> @llvm.masked.load.nxv1i8(<vscale x 1 x i8>*, i32, <vsc
define <vscale x 1 x i16> @masked_load_nxv1i16(<vscale x 1 x i16>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(<vscale x 1 x i16>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
Expand All @@ -27,7 +27,7 @@ declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(<vscale x 1 x i16>*, i32, <
define <vscale x 1 x i32> @masked_load_nxv1i32(<vscale x 1 x i32>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(<vscale x 1 x i32>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
Expand All @@ -38,7 +38,7 @@ declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(<vscale x 1 x i32>*, i32, <
define <vscale x 1 x i64> @masked_load_nxv1i64(<vscale x 1 x i64>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(<vscale x 1 x i64>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
Expand All @@ -49,7 +49,7 @@ declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64(<vscale x 1 x i64>*, i32, <
define <vscale x 2 x i8> @masked_load_nxv2i8(<vscale x 2 x i8>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
Expand All @@ -60,7 +60,7 @@ declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vsc
define <vscale x 2 x i16> @masked_load_nxv2i16(<vscale x 2 x i16>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
Expand All @@ -71,7 +71,7 @@ declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <
define <vscale x 2 x i32> @masked_load_nxv2i32(<vscale x 2 x i32>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
Expand All @@ -82,7 +82,7 @@ declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <
define <vscale x 2 x i64> @masked_load_nxv2i64(<vscale x 2 x i64>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
Expand All @@ -93,7 +93,7 @@ declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>*, i32, <
define <vscale x 4 x i8> @masked_load_nxv4i8(<vscale x 4 x i8>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
Expand All @@ -104,7 +104,7 @@ declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>*, i32, <vsc
define <vscale x 4 x i16> @masked_load_nxv4i16(<vscale x 4 x i16>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
Expand All @@ -115,7 +115,7 @@ declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <
define <vscale x 4 x i32> @masked_load_nxv4i32(<vscale x 4 x i32>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
Expand All @@ -126,7 +126,7 @@ declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <
define <vscale x 4 x i64> @masked_load_nxv4i64(<vscale x 4 x i64>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(<vscale x 4 x i64>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
Expand All @@ -137,7 +137,7 @@ declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64(<vscale x 4 x i64>*, i32, <
define <vscale x 8 x i8> @masked_load_nxv8i8(<vscale x 8 x i8>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
Expand All @@ -148,7 +148,7 @@ declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vsc
define <vscale x 8 x i16> @masked_load_nxv8i16(<vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
Expand All @@ -159,7 +159,7 @@ declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <
define <vscale x 8 x i32> @masked_load_nxv8i32(<vscale x 8 x i32>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
Expand All @@ -170,7 +170,7 @@ declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>*, i32, <
define <vscale x 8 x i64> @masked_load_nxv8i64(<vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
Expand All @@ -181,7 +181,7 @@ declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>*, i32, <
define <vscale x 16 x i8> @masked_load_nxv16i8(<vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
Expand All @@ -192,7 +192,7 @@ declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <
define <vscale x 16 x i16> @masked_load_nxv16i16(<vscale x 16 x i16>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(<vscale x 16 x i16>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
Expand All @@ -203,7 +203,7 @@ declare <vscale x 16 x i16> @llvm.masked.load.nxv16i16(<vscale x 16 x i16>*, i32
define <vscale x 16 x i32> @masked_load_nxv16i32(<vscale x 16 x i32>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(<vscale x 16 x i32>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
Expand All @@ -214,7 +214,7 @@ declare <vscale x 16 x i32> @llvm.masked.load.nxv16i32(<vscale x 16 x i32>*, i32
define <vscale x 32 x i8> @masked_load_nxv32i8(<vscale x 32 x i8>* %a, <vscale x 32 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>* %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
Expand All @@ -225,7 +225,7 @@ declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>*, i32, <
define <vscale x 32 x i16> @masked_load_nxv32i16(<vscale x 32 x i16>* %a, <vscale x 32 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
Expand All @@ -236,7 +236,7 @@ declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>*, i32
define <vscale x 64 x i8> @masked_load_nxv64i8(<vscale x 64 x i8>* %a, <vscale x 64 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(<vscale x 64 x i8>* %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
Expand Down
38 changes: 19 additions & 19 deletions llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2955,7 +2955,7 @@ define void @sink_splat_vp_mul(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: .LBB46_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -2995,7 +2995,7 @@ define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: .LBB47_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3037,7 +3037,7 @@ define void @sink_splat_vp_add_commute(i32* nocapture %a, i32 signext %x, <4 x i
; CHECK-NEXT: .LBB48_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v9, v8, v9, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
Expand Down Expand Up @@ -3077,7 +3077,7 @@ define void @sink_splat_vp_sub(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: .LBB49_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3115,7 +3115,7 @@ define void @sink_splat_vp_rsub(i32* nocapture %a, i32 signext %x, <4 x i1> %m,
; CHECK-NEXT: .LBB50_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3155,7 +3155,7 @@ define void @sink_splat_vp_shl(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i
; CHECK-NEXT: .LBB51_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3195,7 +3195,7 @@ define void @sink_splat_vp_lshr(i32* nocapture %a, i32 signext %x, <4 x i1> %m,
; CHECK-NEXT: .LBB52_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3235,7 +3235,7 @@ define void @sink_splat_vp_ashr(i32* nocapture %a, i32 signext %x, <4 x i1> %m,
; CHECK-NEXT: .LBB53_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3275,7 +3275,7 @@ define void @sink_splat_vp_fmul(float* nocapture %a, float %x, <4 x i1> %m, i32
; CHECK-NEXT: .LBB54_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3315,7 +3315,7 @@ define void @sink_splat_vp_fdiv(float* nocapture %a, float %x, <4 x i1> %m, i32
; CHECK-NEXT: .LBB55_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3353,7 +3353,7 @@ define void @sink_splat_vp_frdiv(float* nocapture %a, float %x, <4 x i1> %m, i32
; CHECK-NEXT: .LBB56_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3393,7 +3393,7 @@ define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32
; CHECK-NEXT: .LBB57_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3433,7 +3433,7 @@ define void @sink_splat_vp_fsub(float* nocapture %a, float %x, <4 x i1> %m, i32
; CHECK-NEXT: .LBB58_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3473,7 +3473,7 @@ define void @sink_splat_vp_frsub(float* nocapture %a, float %x, <4 x i1> %m, i32
; CHECK-NEXT: .LBB59_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3513,7 +3513,7 @@ define void @sink_splat_vp_udiv(i32* nocapture %a, i32 signext %x, <4 x i1> %m,
; CHECK-NEXT: .LBB60_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3553,7 +3553,7 @@ define void @sink_splat_vp_sdiv(i32* nocapture %a, i32 signext %x, <4 x i1> %m,
; CHECK-NEXT: .LBB61_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3593,7 +3593,7 @@ define void @sink_splat_vp_urem(i32* nocapture %a, i32 signext %x, <4 x i1> %m,
; CHECK-NEXT: .LBB62_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3633,7 +3633,7 @@ define void @sink_splat_vp_srem(i32* nocapture %a, i32 signext %x, <4 x i1> %m,
; CHECK-NEXT: .LBB63_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -3674,7 +3674,7 @@ define void @sink_splat_vp_srem_commute(i32* nocapture %a, i32 signext %x, <4 x
; CHECK-NEXT: .LBB64_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v9, (a0)
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vrem.vv v9, v8, v9, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ define <32 x i8> @strided_load_i8(ptr %p, i64 %stride, <32 x i1> %m) {
; CHECK-LABEL: strided_load_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 32
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 %stride, <32 x i1> %m)
Expand All @@ -21,7 +21,7 @@ define <32 x i8> @strided_load_i8(ptr %p, i64 %stride, <32 x i1> %m) {
define <2 x i64> @strided_load_i64(ptr %p, i64 %stride, <2 x i1> %m) {
; CHECK-LABEL: strided_load_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%res = call <2 x i64> @llvm.riscv.masked.strided.load.v2i64.p0.i64(<2 x i64> undef, ptr %p, i64 %stride, <2 x i1> %m)
Expand All @@ -32,7 +32,7 @@ define <32 x i8> @strided_load_i8_splat(ptr %p, <32 x i1> %m) {
; CHECK-LABEL: strided_load_i8_splat:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), zero, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 0, <32 x i1> %m)
Expand All @@ -44,7 +44,7 @@ define <32 x i8> @strided_load_i8_reverse(ptr %p, <32 x i1> %m) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 -1, <32 x i1> %m)
Expand All @@ -56,7 +56,7 @@ define <32 x i8> @strided_load_i8_nostride(ptr %p, <32 x i1> %m) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: li a2, 1
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 1, <32 x i1> %m)
Expand Down Expand Up @@ -117,7 +117,7 @@ declare <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscal
define <vscale x 1 x i64> @strided_load_vscale_i64(ptr %p, i64 %stride, <vscale x 1 x i1> %m) {
; CHECK-LABEL: strided_load_vscale_i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%res = call <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64> undef, ptr %p, i64 %stride, <vscale x 1 x i1> %m)
Expand Down
148 changes: 74 additions & 74 deletions llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll

Large diffs are not rendered by default.

168 changes: 84 additions & 84 deletions llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll

Large diffs are not rendered by default.

150 changes: 75 additions & 75 deletions llvm/test/CodeGen/RISCV/rvv/vand-vp.ll

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ declare <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half>, <vsca
define <vscale x 1 x half> @vfsgnj_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -33,7 +33,7 @@ declare <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half>, <vsca
define <vscale x 2 x half> @vfsgnj_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -57,7 +57,7 @@ declare <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half>, <vsca
define <vscale x 4 x half> @vfsgnj_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -81,7 +81,7 @@ declare <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half>, <vsca
define <vscale x 8 x half> @vfsgnj_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand All @@ -105,7 +105,7 @@ declare <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half>, <v
define <vscale x 16 x half> @vfsgnj_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
Expand All @@ -129,7 +129,7 @@ declare <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half>, <v
define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
Expand All @@ -153,7 +153,7 @@ declare <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float>, <vs
define <vscale x 1 x float> @vfsgnj_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -177,7 +177,7 @@ declare <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float>, <vs
define <vscale x 2 x float> @vfsgnj_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -201,7 +201,7 @@ declare <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float>, <vs
define <vscale x 4 x float> @vfsgnj_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -225,7 +225,7 @@ declare <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float>, <vs
define <vscale x 8 x float> @vfsgnj_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand All @@ -249,7 +249,7 @@ declare <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float>,
define <vscale x 16 x float> @vfsgnj_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
Expand All @@ -273,7 +273,7 @@ declare <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double>, <
define <vscale x 1 x double> @vfsgnj_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -297,7 +297,7 @@ declare <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double>, <
define <vscale x 2 x double> @vfsgnj_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -321,7 +321,7 @@ declare <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double>, <
define <vscale x 4 x double> @vfsgnj_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -345,7 +345,7 @@ declare <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double>, <
define <vscale x 8 x double> @vfsgnj_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand Down
100 changes: 50 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll

Large diffs are not rendered by default.

100 changes: 50 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll

Large diffs are not rendered by default.

64 changes: 32 additions & 32 deletions llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll

Large diffs are not rendered by default.

62 changes: 31 additions & 31 deletions llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ declare <vscale x 1 x half> @llvm.vp.maxnum.nxv1f16(<vscale x 1 x half>, <vscale
define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.maxnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -33,7 +33,7 @@ declare <vscale x 2 x half> @llvm.vp.maxnum.nxv2f16(<vscale x 2 x half>, <vscale
define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.maxnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -57,7 +57,7 @@ declare <vscale x 4 x half> @llvm.vp.maxnum.nxv4f16(<vscale x 4 x half>, <vscale
define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.maxnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -81,7 +81,7 @@ declare <vscale x 8 x half> @llvm.vp.maxnum.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.maxnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand All @@ -105,7 +105,7 @@ declare <vscale x 16 x half> @llvm.vp.maxnum.nxv16f16(<vscale x 16 x half>, <vsc
define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.maxnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
Expand All @@ -129,7 +129,7 @@ declare <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half>, <vsc
define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
Expand All @@ -153,7 +153,7 @@ declare <vscale x 1 x float> @llvm.vp.maxnum.nxv1f32(<vscale x 1 x float>, <vsca
define <vscale x 1 x float> @vfmax_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.maxnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -177,7 +177,7 @@ declare <vscale x 2 x float> @llvm.vp.maxnum.nxv2f32(<vscale x 2 x float>, <vsca
define <vscale x 2 x float> @vfmax_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.maxnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -201,7 +201,7 @@ declare <vscale x 4 x float> @llvm.vp.maxnum.nxv4f32(<vscale x 4 x float>, <vsca
define <vscale x 4 x float> @vfmax_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.maxnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -225,7 +225,7 @@ declare <vscale x 8 x float> @llvm.vp.maxnum.nxv8f32(<vscale x 8 x float>, <vsca
define <vscale x 8 x float> @vfmax_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.maxnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand All @@ -249,7 +249,7 @@ declare <vscale x 16 x float> @llvm.vp.maxnum.nxv16f32(<vscale x 16 x float>, <v
define <vscale x 16 x float> @vfmax_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.maxnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
Expand All @@ -273,7 +273,7 @@ declare <vscale x 1 x double> @llvm.vp.maxnum.nxv1f64(<vscale x 1 x double>, <vs
define <vscale x 1 x double> @vfmax_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.maxnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -297,7 +297,7 @@ declare <vscale x 2 x double> @llvm.vp.maxnum.nxv2f64(<vscale x 2 x double>, <vs
define <vscale x 2 x double> @vfmax_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.maxnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -321,7 +321,7 @@ declare <vscale x 4 x double> @llvm.vp.maxnum.nxv4f64(<vscale x 4 x double>, <vs
define <vscale x 4 x double> @vfmax_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.maxnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -345,7 +345,7 @@ declare <vscale x 8 x double> @llvm.vp.maxnum.nxv8f64(<vscale x 8 x double>, <vs
define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.maxnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ declare <vscale x 1 x half> @llvm.vp.minnum.nxv1f16(<vscale x 1 x half>, <vscale
define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.minnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -33,7 +33,7 @@ declare <vscale x 2 x half> @llvm.vp.minnum.nxv2f16(<vscale x 2 x half>, <vscale
define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.minnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -57,7 +57,7 @@ declare <vscale x 4 x half> @llvm.vp.minnum.nxv4f16(<vscale x 4 x half>, <vscale
define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.minnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -81,7 +81,7 @@ declare <vscale x 8 x half> @llvm.vp.minnum.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.minnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand All @@ -105,7 +105,7 @@ declare <vscale x 16 x half> @llvm.vp.minnum.nxv16f16(<vscale x 16 x half>, <vsc
define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.minnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
Expand All @@ -129,7 +129,7 @@ declare <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half>, <vsc
define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
Expand All @@ -153,7 +153,7 @@ declare <vscale x 1 x float> @llvm.vp.minnum.nxv1f32(<vscale x 1 x float>, <vsca
define <vscale x 1 x float> @vfmin_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.minnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -177,7 +177,7 @@ declare <vscale x 2 x float> @llvm.vp.minnum.nxv2f32(<vscale x 2 x float>, <vsca
define <vscale x 2 x float> @vfmin_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.minnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -201,7 +201,7 @@ declare <vscale x 4 x float> @llvm.vp.minnum.nxv4f32(<vscale x 4 x float>, <vsca
define <vscale x 4 x float> @vfmin_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.minnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -225,7 +225,7 @@ declare <vscale x 8 x float> @llvm.vp.minnum.nxv8f32(<vscale x 8 x float>, <vsca
define <vscale x 8 x float> @vfmin_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.minnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand All @@ -249,7 +249,7 @@ declare <vscale x 16 x float> @llvm.vp.minnum.nxv16f32(<vscale x 16 x float>, <v
define <vscale x 16 x float> @vfmin_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.minnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
Expand All @@ -273,7 +273,7 @@ declare <vscale x 1 x double> @llvm.vp.minnum.nxv1f64(<vscale x 1 x double>, <vs
define <vscale x 1 x double> @vfmin_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.minnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
Expand All @@ -297,7 +297,7 @@ declare <vscale x 2 x double> @llvm.vp.minnum.nxv2f64(<vscale x 2 x double>, <vs
define <vscale x 2 x double> @vfmin_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.minnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
Expand All @@ -321,7 +321,7 @@ declare <vscale x 4 x double> @llvm.vp.minnum.nxv4f64(<vscale x 4 x double>, <vs
define <vscale x 4 x double> @vfmin_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.minnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
Expand All @@ -345,7 +345,7 @@ declare <vscale x 8 x double> @llvm.vp.minnum.nxv8f64(<vscale x 8 x double>, <vs
define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.minnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
Expand Down
Loading