144 changes: 36 additions & 108 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll

Large diffs are not rendered by default.

56 changes: 14 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -170,9 +168,7 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -256,9 +252,7 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -344,9 +338,7 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -388,9 +380,7 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -432,9 +422,7 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.floor.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.floor.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -478,9 +466,7 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.floor.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.floor.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -524,9 +510,7 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.floor.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.floor.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -568,9 +552,7 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -614,9 +596,7 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -660,9 +640,7 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -706,9 +684,7 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <15 x i1> poison, i1 true, i32 0
%m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
%v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
%v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}

Expand Down Expand Up @@ -752,9 +728,7 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -867,8 +841,6 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
52 changes: 13 additions & 39 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,7 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.maximum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.maximum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -150,9 +148,7 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.maximum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.maximum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -226,9 +222,7 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.maximum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.maximum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -304,9 +298,7 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.maximum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.maximum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -340,9 +332,7 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.maximum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.maximum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -376,9 +366,7 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.maximum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.maximum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -414,9 +402,7 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.maximum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.maximum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -452,9 +438,7 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.maximum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.maximum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -488,9 +472,7 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.maximum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.maximum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -526,9 +508,7 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.maximum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.maximum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -564,9 +544,7 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.maximum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.maximum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -615,9 +593,7 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmax.vv v8, v8, v24
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.maximum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.maximum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -808,8 +784,6 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.maximum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.maximum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
52 changes: 13 additions & 39 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,7 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.minimum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.minimum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -150,9 +148,7 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.minimum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.minimum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -226,9 +222,7 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.minimum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.minimum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -304,9 +298,7 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.minimum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.minimum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -340,9 +332,7 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.minimum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.minimum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -376,9 +366,7 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.minimum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.minimum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -414,9 +402,7 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.minimum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.minimum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -452,9 +438,7 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.minimum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.minimum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -488,9 +472,7 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v11
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.minimum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.minimum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -526,9 +508,7 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v14
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.minimum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.minimum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -564,9 +544,7 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v20
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.minimum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.minimum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -615,9 +593,7 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vfmin.vv v8, v8, v24
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.minimum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -808,8 +784,6 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.minimum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
48 changes: 12 additions & 36 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,7 @@ define void @splat_zero_v8f16(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x half> poison, half 0.0, i32 0
%b = shufflevector <8 x half> %a, <8 x half> poison, <8 x i32> zeroinitializer
store <8 x half> %b, ptr %x
store <8 x half> splat (half 0.0), ptr %x
ret void
}

Expand All @@ -100,9 +98,7 @@ define void @splat_zero_v4f32(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x float> poison, float 0.0, i32 0
%b = shufflevector <4 x float> %a, <4 x float> poison, <4 x i32> zeroinitializer
store <4 x float> %b, ptr %x
store <4 x float> splat (float 0.0), ptr %x
ret void
}

Expand All @@ -113,9 +109,7 @@ define void @splat_zero_v2f64(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <2 x double> poison, double 0.0, i32 0
%b = shufflevector <2 x double> %a, <2 x double> poison, <2 x i32> zeroinitializer
store <2 x double> %b, ptr %x
store <2 x double> splat (double 0.0), ptr %x
ret void
}

Expand All @@ -126,9 +120,7 @@ define void @splat_zero_16f16(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <16 x half> poison, half 0.0, i32 0
%b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
store <16 x half> %b, ptr %x
store <16 x half> splat (half 0.0), ptr %x
ret void
}

Expand All @@ -139,9 +131,7 @@ define void @splat_zero_v8f32(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x float> poison, float 0.0, i32 0
%b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %b, ptr %x
store <8 x float> splat (float 0.0), ptr %x
ret void
}

Expand All @@ -152,9 +142,7 @@ define void @splat_zero_v4f64(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x double> poison, double 0.0, i32 0
%b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %b, ptr %x
store <4 x double> splat (double 0.0), ptr %x
ret void
}

Expand All @@ -166,9 +154,7 @@ define void @splat_negzero_v8f16(ptr %x) {
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x half> poison, half -0.0, i32 0
%b = shufflevector <8 x half> %a, <8 x half> poison, <8 x i32> zeroinitializer
store <8 x half> %b, ptr %x
store <8 x half> splat (half -0.0), ptr %x
ret void
}

Expand All @@ -180,9 +166,7 @@ define void @splat_negzero_v4f32(ptr %x) {
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x float> poison, float -0.0, i32 0
%b = shufflevector <4 x float> %a, <4 x float> poison, <4 x i32> zeroinitializer
store <4 x float> %b, ptr %x
store <4 x float> splat (float -0.0), ptr %x
ret void
}

Expand All @@ -204,9 +188,7 @@ define void @splat_negzero_v2f64(ptr %x) {
; CHECK-RV64-NEXT: vmv.v.x v8, a1
; CHECK-RV64-NEXT: vse64.v v8, (a0)
; CHECK-RV64-NEXT: ret
%a = insertelement <2 x double> poison, double -0.0, i32 0
%b = shufflevector <2 x double> %a, <2 x double> poison, <2 x i32> zeroinitializer
store <2 x double> %b, ptr %x
store <2 x double> splat (double -0.0), ptr %x
ret void
}

Expand All @@ -218,9 +200,7 @@ define void @splat_negzero_16f16(ptr %x) {
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <16 x half> poison, half -0.0, i32 0
%b = shufflevector <16 x half> %a, <16 x half> poison, <16 x i32> zeroinitializer
store <16 x half> %b, ptr %x
store <16 x half> splat (half -0.0), ptr %x
ret void
}

Expand All @@ -232,9 +212,7 @@ define void @splat_negzero_v8f32(ptr %x) {
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x float> poison, float -0.0, i32 0
%b = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> zeroinitializer
store <8 x float> %b, ptr %x
store <8 x float> splat (float -0.0), ptr %x
ret void
}

Expand All @@ -256,8 +234,6 @@ define void @splat_negzero_v4f64(ptr %x) {
; CHECK-RV64-NEXT: vmv.v.x v8, a1
; CHECK-RV64-NEXT: vse64.v v8, (a0)
; CHECK-RV64-NEXT: ret
%a = insertelement <4 x double> poison, double -0.0, i32 0
%b = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> zeroinitializer
store <4 x double> %b, ptr %x
store <4 x double> splat (double -0.0), ptr %x
ret void
}
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ define <2 x float> @vfpext_v2f16_v2f32_unmasked(<2 x half> %a, i32 zeroext %vl)
; CHECK-NEXT: vfwcvt.f.f.v v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
%v = call <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half> %a, <2 x i1> splat (i1 true), i32 %vl)
ret <2 x float> %v
}

Expand All @@ -50,7 +50,7 @@ define <2 x double> @vfpext_v2f16_v2f64_unmasked(<2 x half> %a, i32 zeroext %vl)
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v8, v9
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
%v = call <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half> %a, <2 x i1> splat (i1 true), i32 %vl)
ret <2 x double> %v
}

Expand All @@ -74,7 +74,7 @@ define <2 x double> @vfpext_v2f32_v2f64_unmasked(<2 x float> %a, i32 zeroext %vl
; CHECK-NEXT: vfwcvt.f.f.v v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
%v = call <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float> %a, <2 x i1> splat (i1 true), i32 %vl)
ret <2 x double> %v
}

Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ define <4 x i1> @vfptosi_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i1> %v
}

Expand All @@ -66,7 +66,7 @@ define <4 x i1> @vfptosi_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i1> %v
}

Expand All @@ -91,6 +91,6 @@ define <4 x i1> @vfptosi_v4i1_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i1> %v
}
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i8> %v
}

Expand Down Expand Up @@ -105,7 +105,7 @@ define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i16> %v
}

Expand Down Expand Up @@ -145,7 +145,7 @@ define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9
; ZVFHMIN-NEXT: ret
%v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i32> %v
}

Expand Down Expand Up @@ -187,7 +187,7 @@ define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.rtz.x.f.v v8, v10
; ZVFHMIN-NEXT: ret
%v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i64> %v
}

Expand All @@ -213,7 +213,7 @@ define <4 x i8> @vfptosi_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i8> %v
}

Expand All @@ -237,7 +237,7 @@ define <4 x i16> @vfptosi_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i16> %v
}

Expand All @@ -259,7 +259,7 @@ define <4 x i32> @vfptosi_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i32> %v
}

Expand All @@ -283,7 +283,7 @@ define <4 x i64> @vfptosi_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i64> %v
}

Expand Down Expand Up @@ -313,7 +313,7 @@ define <4 x i8> @vfptosi_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i8> %v
}

Expand All @@ -339,7 +339,7 @@ define <4 x i16> @vfptosi_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i16> %v
}

Expand All @@ -363,7 +363,7 @@ define <4 x i32> @vfptosi_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i32> %v
}

Expand All @@ -385,7 +385,7 @@ define <4 x i64> @vfptosi_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i64> %v
}

Expand Down Expand Up @@ -434,6 +434,6 @@ define <32 x i64> @vfptosi_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
%v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x i64> %v
}
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ define <4 x i1> @vfptoui_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i1> %v
}

Expand All @@ -66,7 +66,7 @@ define <4 x i1> @vfptoui_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i1> %v
}

Expand All @@ -91,6 +91,6 @@ define <4 x i1> @vfptoui_v4i1_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i1> %v
}
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ define <4 x i8> @vfptoui_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
; ZVFHMIN-NEXT: ret
%v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i8> %v
}

Expand Down Expand Up @@ -105,7 +105,7 @@ define <4 x i16> @vfptoui_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i16> %v
}

Expand Down Expand Up @@ -145,7 +145,7 @@ define <4 x i32> @vfptoui_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9
; ZVFHMIN-NEXT: ret
%v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i32> %v
}

Expand Down Expand Up @@ -187,7 +187,7 @@ define <4 x i64> @vfptoui_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.rtz.xu.f.v v8, v10
; ZVFHMIN-NEXT: ret
%v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i64> %v
}

Expand All @@ -213,7 +213,7 @@ define <4 x i8> @vfptoui_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i8> %v
}

Expand All @@ -237,7 +237,7 @@ define <4 x i16> @vfptoui_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i16> %v
}

Expand All @@ -259,7 +259,7 @@ define <4 x i32> @vfptoui_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i32> %v
}

Expand All @@ -283,7 +283,7 @@ define <4 x i64> @vfptoui_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i64> %v
}

Expand Down Expand Up @@ -313,7 +313,7 @@ define <4 x i8> @vfptoui_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i8> %v
}

Expand All @@ -339,7 +339,7 @@ define <4 x i16> @vfptoui_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i16> %v
}

Expand All @@ -363,7 +363,7 @@ define <4 x i32> @vfptoui_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i32> %v
}

Expand All @@ -385,7 +385,7 @@ define <4 x i64> @vfptoui_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
%v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x i64> %v
}

Expand Down Expand Up @@ -434,6 +434,6 @@ define <32 x i64> @vfptoui_v32i64_v32f64_unmasked(<32 x double> %va, i32 zeroext
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
%v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x i64> %v
}
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ define <2 x half> @vfptrunc_v2f16_v2f32_unmasked(<2 x float> %a, i32 zeroext %vl
; CHECK-NEXT: vfncvt.f.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
%v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float> %a, <2 x i1> splat (i1 true), i32 %vl)
ret <2 x half> %v
}

Expand All @@ -51,7 +51,7 @@ define <2 x half> @vfptrunc_v2f16_v2f64_unmasked(<2 x double> %a, i32 zeroext %v
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v8, v9
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
%v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double> %a, <2 x i1> splat (i1 true), i32 %vl)
ret <2 x half> %v
}

Expand All @@ -75,7 +75,7 @@ define <2 x float> @vfptrunc_v2f32_v2f64_unmasked(<2 x double> %a, i32 zeroext %
; CHECK-NEXT: vfncvt.f.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
%v = call <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double> %a, <2 x i1> splat (i1 true), i32 %vl)
ret <2 x float> %v
}

Expand Down
44 changes: 11 additions & 33 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -523,9 +523,7 @@ define void @seteq_vi_v16i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <16 x i8>, ptr %x
%b = insertelement <16 x i8> poison, i8 0, i32 0
%c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
%d = icmp eq <16 x i8> %a, %c
%d = icmp eq <16 x i8> %a, splat (i8 0)
store <16 x i1> %d, ptr %z
ret void
}
Expand All @@ -540,9 +538,7 @@ define void @setne_vi_v32i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v10, (a1)
; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = insertelement <32 x i8> poison, i8 0, i32 0
%c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
%d = icmp ne <32 x i8> %a, %c
%d = icmp ne <32 x i8> %a, splat (i8 0)
store <32 x i1> %d, ptr %z
ret void
}
Expand All @@ -557,9 +553,7 @@ define void @setgt_vi_v64i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v12, (a1)
; CHECK-NEXT: ret
%a = load <64 x i8>, ptr %x
%b = insertelement <64 x i8> poison, i8 0, i32 0
%c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
%d = icmp sgt <64 x i8> %a, %c
%d = icmp sgt <64 x i8> %a, splat (i8 0)
store <64 x i1> %d, ptr %z
ret void
}
Expand All @@ -574,9 +568,7 @@ define void @setgt_vi_v64i8_nonzero(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v12, (a1)
; CHECK-NEXT: ret
%a = load <64 x i8>, ptr %x
%b = insertelement <64 x i8> poison, i8 5, i32 0
%c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
%d = icmp sgt <64 x i8> %a, %c
%d = icmp sgt <64 x i8> %a, splat (i8 5)
store <64 x i1> %d, ptr %z
ret void
}
Expand All @@ -591,9 +583,7 @@ define void @setlt_vi_v128i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v16, (a1)
; CHECK-NEXT: ret
%a = load <128 x i8>, ptr %x
%b = insertelement <128 x i8> poison, i8 0, i32 0
%c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
%d = icmp slt <128 x i8> %a, %c
%d = icmp slt <128 x i8> %a, splat (i8 0)
store <128 x i1> %d, ptr %z
ret void
}
Expand All @@ -607,9 +597,7 @@ define void @setge_vi_v8i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i8>, ptr %x
%b = insertelement <8 x i8> poison, i8 0, i32 0
%c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
%d = icmp sge <8 x i8> %a, %c
%d = icmp sge <8 x i8> %a, splat (i8 0)
store <8 x i1> %d, ptr %z
ret void
}
Expand All @@ -623,9 +611,7 @@ define void @setle_vi_v16i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <16 x i8>, ptr %x
%b = insertelement <16 x i8> poison, i8 0, i32 0
%c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
%d = icmp sle <16 x i8> %a, %c
%d = icmp sle <16 x i8> %a, splat (i8 0)
store <16 x i1> %d, ptr %z
ret void
}
Expand All @@ -640,9 +626,7 @@ define void @setugt_vi_v32i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v10, (a1)
; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = insertelement <32 x i8> poison, i8 5, i32 0
%c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
%d = icmp ugt <32 x i8> %a, %c
%d = icmp ugt <32 x i8> %a, splat (i8 5)
store <32 x i1> %d, ptr %z
ret void
}
Expand All @@ -657,9 +641,7 @@ define void @setult_vi_v64i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v12, (a1)
; CHECK-NEXT: ret
%a = load <64 x i8>, ptr %x
%b = insertelement <64 x i8> poison, i8 5, i32 0
%c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
%d = icmp ult <64 x i8> %a, %c
%d = icmp ult <64 x i8> %a, splat (i8 5)
store <64 x i1> %d, ptr %z
ret void
}
Expand All @@ -674,9 +656,7 @@ define void @setuge_vi_v128i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v16, (a1)
; CHECK-NEXT: ret
%a = load <128 x i8>, ptr %x
%b = insertelement <128 x i8> poison, i8 5, i32 0
%c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
%d = icmp uge <128 x i8> %a, %c
%d = icmp uge <128 x i8> %a, splat (i8 5)
store <128 x i1> %d, ptr %z
ret void
}
Expand All @@ -690,9 +670,7 @@ define void @setule_vi_v8i8(ptr %x, ptr %z) {
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i8>, ptr %x
%b = insertelement <8 x i8> poison, i8 5, i32 0
%c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
%d = icmp ule <8 x i8> %a, %c
%d = icmp ule <8 x i8> %a, splat (i8 5)
store <8 x i1> %d, ptr %z
ret void
}
Expand Down
64 changes: 16 additions & 48 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,7 @@ define void @splat_zero_v16i8(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <16 x i8> poison, i8 0, i32 0
%b = shufflevector <16 x i8> %a, <16 x i8> poison, <16 x i32> zeroinitializer
store <16 x i8> %b, ptr %x
store <16 x i8> splat (i8 0), ptr %x
ret void
}

Expand All @@ -153,9 +151,7 @@ define void @splat_zero_v8i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x i16> poison, i16 0, i32 0
%b = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> zeroinitializer
store <8 x i16> %b, ptr %x
store <8 x i16> splat (i16 0), ptr %x
ret void
}

Expand All @@ -166,9 +162,7 @@ define void @splat_zero_v4i32(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x i32> poison, i32 0, i32 0
%b = shufflevector <4 x i32> %a, <4 x i32> poison, <4 x i32> zeroinitializer
store <4 x i32> %b, ptr %x
store <4 x i32> splat (i32 0), ptr %x
ret void
}

Expand All @@ -179,9 +173,7 @@ define void @splat_zero_v2i64(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <2 x i64> poison, i64 0, i32 0
%b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
store <2 x i64> %b, ptr %x
store <2 x i64> splat (i64 0), ptr %x
ret void
}

Expand All @@ -193,9 +185,7 @@ define void @splat_zero_v32i8(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <32 x i8> poison, i8 0, i32 0
%b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %b, ptr %x
store <32 x i8> splat (i8 0), ptr %x
ret void
}

Expand All @@ -206,9 +196,7 @@ define void @splat_zero_v16i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <16 x i16> poison, i16 0, i32 0
%b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %b, ptr %x
store <16 x i16> splat (i16 0), ptr %x
ret void
}

Expand All @@ -219,9 +207,7 @@ define void @splat_zero_v8i32(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x i32> poison, i32 0, i32 0
%b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %b, ptr %x
store <8 x i32> splat (i32 0), ptr %x
ret void
}

Expand All @@ -232,9 +218,7 @@ define void @splat_zero_v4i64(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x i64> poison, i64 0, i32 0
%b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %b, ptr %x
store <4 x i64> splat (i64 0), ptr %x
ret void
}

Expand Down Expand Up @@ -311,9 +295,7 @@ define void @splat_allones_v16i8(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <16 x i8> poison, i8 -1, i32 0
%b = shufflevector <16 x i8> %a, <16 x i8> poison, <16 x i32> zeroinitializer
store <16 x i8> %b, ptr %x
store <16 x i8> splat (i8 -1), ptr %x
ret void
}

Expand All @@ -324,9 +306,7 @@ define void @splat_allones_v8i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x i16> poison, i16 -1, i32 0
%b = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> zeroinitializer
store <8 x i16> %b, ptr %x
store <8 x i16> splat (i16 -1), ptr %x
ret void
}

Expand All @@ -337,9 +317,7 @@ define void @splat_allones_v4i32(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x i32> poison, i32 -1, i32 0
%b = shufflevector <4 x i32> %a, <4 x i32> poison, <4 x i32> zeroinitializer
store <4 x i32> %b, ptr %x
store <4 x i32> splat (i32 -1), ptr %x
ret void
}

Expand All @@ -350,9 +328,7 @@ define void @splat_allones_v2i64(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <2 x i64> poison, i64 -1, i32 0
%b = shufflevector <2 x i64> %a, <2 x i64> poison, <2 x i32> zeroinitializer
store <2 x i64> %b, ptr %x
store <2 x i64> splat (i64 -1), ptr %x
ret void
}

Expand All @@ -364,9 +340,7 @@ define void @splat_allones_v32i8(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <32 x i8> poison, i8 -1, i32 0
%b = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> zeroinitializer
store <32 x i8> %b, ptr %x
store <32 x i8> splat (i8 -1), ptr %x
ret void
}

Expand All @@ -377,9 +351,7 @@ define void @splat_allones_v16i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <16 x i16> poison, i16 -1, i32 0
%b = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> zeroinitializer
store <16 x i16> %b, ptr %x
store <16 x i16> splat (i16 -1), ptr %x
ret void
}

Expand All @@ -390,9 +362,7 @@ define void @splat_allones_v8i32(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x i32> poison, i32 -1, i32 0
%b = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> zeroinitializer
store <8 x i32> %b, ptr %x
store <8 x i32> splat (i32 -1), ptr %x
ret void
}

Expand All @@ -403,9 +373,7 @@ define void @splat_allones_v4i64(ptr %x) {
; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x i64> poison, i64 -1, i32 0
%b = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> zeroinitializer
store <4 x i64> %b, ptr %x
store <4 x i64> splat (i64 -1), ptr %x
ret void
}

Expand Down
208 changes: 52 additions & 156 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll

Large diffs are not rendered by default.

10 changes: 2 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12850,10 +12850,8 @@ define <4 x i32> @mgather_broadcast_load_unmasked(ptr %base) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), zero
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
ret <4 x i32> %v
}

Expand All @@ -12864,11 +12862,9 @@ define <4 x i32> @mgather_broadcast_load_unmasked2(ptr %base) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), zero
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%ptrhead = insertelement <4 x ptr> poison, ptr %base, i32 0
%ptrs = shufflevector <4 x ptr> %ptrhead, <4 x ptr> poison, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 true), <4 x i32> poison)
ret <4 x i32> %v
}

Expand All @@ -12878,8 +12874,6 @@ define <4 x i32> @mgather_broadcast_load_masked(ptr %base, <4 x i1> %m) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), zero, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%ptrs = getelementptr inbounds i8, ptr %base, <4 x i32> zeroinitializer
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %m, <4 x i32> poison)
ret <4 x i32> %v
Expand Down
56 changes: 14 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -86,9 +84,7 @@ define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -130,9 +126,7 @@ define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -176,9 +170,7 @@ define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -220,9 +212,7 @@ define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -264,9 +254,7 @@ define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.nearbyint.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.nearbyint.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -310,9 +298,7 @@ define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -356,9 +342,7 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -400,9 +384,7 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -446,9 +428,7 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -492,9 +472,7 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -538,9 +516,7 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <15 x i1> poison, i1 true, i32 0
%m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
%v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
%v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}

Expand Down Expand Up @@ -584,9 +560,7 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -690,8 +664,6 @@ define <32 x double> @vp_nearbyint_v32f64_unmasked(<32 x double> %va, i32 zeroex
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
108 changes: 30 additions & 78 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll

Large diffs are not rendered by default.

56 changes: 14 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,7 @@ define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.rint.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.rint.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -78,9 +76,7 @@ define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.rint.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.rint.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -118,9 +114,7 @@ define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.rint.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.rint.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -160,9 +154,7 @@ define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.rint.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.rint.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -200,9 +192,7 @@ define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.rint.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.rint.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -240,9 +230,7 @@ define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.rint.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.rint.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -282,9 +270,7 @@ define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.rint.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.rint.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -324,9 +310,7 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.rint.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.rint.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -364,9 +348,7 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -406,9 +388,7 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -448,9 +428,7 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -490,9 +468,7 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <15 x i1> poison, i1 true, i32 0
%m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
%v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
%v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}

Expand Down Expand Up @@ -532,9 +508,7 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -639,8 +613,6 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
56 changes: 14 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.round.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.round.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -170,9 +168,7 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.round.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.round.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -256,9 +252,7 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.round.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.round.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -344,9 +338,7 @@ define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.round.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.round.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -388,9 +380,7 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.round.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.round.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -432,9 +422,7 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.round.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.round.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -478,9 +466,7 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.round.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.round.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -524,9 +510,7 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.round.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.round.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -568,9 +552,7 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -614,9 +596,7 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -660,9 +640,7 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -706,9 +684,7 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <15 x i1> poison, i1 true, i32 0
%m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
%v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
%v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}

Expand Down Expand Up @@ -752,9 +728,7 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -867,8 +841,6 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
56 changes: 14 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -170,9 +168,7 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -256,9 +252,7 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -344,9 +338,7 @@ define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -388,9 +380,7 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -432,9 +422,7 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.roundeven.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.roundeven.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -478,9 +466,7 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.roundeven.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.roundeven.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -524,9 +510,7 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.roundeven.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.roundeven.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -568,9 +552,7 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -614,9 +596,7 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -660,9 +640,7 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -706,9 +684,7 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <15 x i1> poison, i1 true, i32 0
%m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
%v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
%v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}

Expand Down Expand Up @@ -752,9 +728,7 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -867,8 +841,6 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
56 changes: 14 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev
; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x half> %v
}

Expand Down Expand Up @@ -170,9 +168,7 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev
; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x half> %v
}

Expand Down Expand Up @@ -256,9 +252,7 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x half> %v
}

Expand Down Expand Up @@ -344,9 +338,7 @@ define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x half> %v
}

Expand Down Expand Up @@ -388,9 +380,7 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x float> %v
}

Expand Down Expand Up @@ -432,9 +422,7 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x float> %v
}

Expand Down Expand Up @@ -478,9 +466,7 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext %
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x float> %v
}

Expand Down Expand Up @@ -524,9 +510,7 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x float> %v
}

Expand Down Expand Up @@ -568,9 +552,7 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <2 x i1> poison, i1 true, i32 0
%m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
%v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
%v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}

Expand Down Expand Up @@ -614,9 +596,7 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <4 x i1> poison, i1 true, i32 0
%m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
%v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
%v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}

Expand Down Expand Up @@ -660,9 +640,7 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <8 x i1> poison, i1 true, i32 0
%m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
%v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
%v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}

Expand Down Expand Up @@ -706,9 +684,7 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <15 x i1> poison, i1 true, i32 0
%m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
%v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
%v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}

Expand Down Expand Up @@ -752,9 +728,7 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: ret
%head = insertelement <16 x i1> poison, i1 true, i32 0
%m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
%v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
%v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}

Expand Down Expand Up @@ -867,8 +841,6 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: ret
%head = insertelement <32 x i1> poison, i1 true, i32 0
%m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
%v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
%v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
Loading