diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def index 4a71097226f18..cd7343ff8df56 100644 --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -278,24 +278,28 @@ END_REGISTER_VP(vp_fshr, VP_FSHR) // llvm.vp.sadd.sat(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_sadd_sat, 2, 3, VP_SADDSAT, -1) +VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_INTRINSIC(sadd_sat) VP_PROPERTY_FUNCTIONAL_SDOPC(SADDSAT) END_REGISTER_VP(vp_sadd_sat, VP_SADDSAT) // llvm.vp.uadd.sat(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_uadd_sat, 2, 3, VP_UADDSAT, -1) +VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_INTRINSIC(uadd_sat) VP_PROPERTY_FUNCTIONAL_SDOPC(UADDSAT) END_REGISTER_VP(vp_uadd_sat, VP_UADDSAT) // llvm.vp.ssub.sat(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_ssub_sat, 2, 3, VP_SSUBSAT, -1) +VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_INTRINSIC(ssub_sat) VP_PROPERTY_FUNCTIONAL_SDOPC(SSUBSAT) END_REGISTER_VP(vp_ssub_sat, VP_SSUBSAT) // llvm.vp.usub.sat(x,y,mask,vlen) BEGIN_REGISTER_VP(vp_usub_sat, 2, 3, VP_USUBSAT, -1) +VP_PROPERTY_BINARYOP VP_PROPERTY_FUNCTIONAL_INTRINSIC(usub_sat) VP_PROPERTY_FUNCTIONAL_SDOPC(USUBSAT) END_REGISTER_VP(vp_usub_sat, VP_USUBSAT) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll index a04e31a19a4f1..902001a376d6a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll @@ -434,19 +434,12 @@ define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -; FIXME: The upper half is doing nothing. - define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vsadd_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128) ret <256 x i8> %v @@ -1418,13 +1411,8 @@ define <32 x i64> @vsadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) { ; CHECK-LABEL: vsadd_vx_v32i64_evl12: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma -; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12) ret <32 x i64> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll index 5556b11e9a90c..57292147a0140 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll @@ -430,19 +430,12 @@ define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -; FIXME: The upper half is doing nothing. - define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vsaddu_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128) ret <256 x i8> %v @@ -1414,13 +1407,8 @@ define <32 x i64> @vsaddu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) { ; CHECK-LABEL: vsaddu_vx_v32i64_evl12: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma -; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12) ret <32 x i64> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll index c28317bf14269..353042fc889e5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll @@ -449,20 +449,13 @@ define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -; FIXME: The upper half is doing nothing. - define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vssub_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vssub.vx v16, v16, a1, v0.t ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128) ret <256 x i8> %v @@ -1460,14 +1453,9 @@ define <32 x i64> @vssub_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) { ; CHECK-LABEL: vssub_vx_v32i64_evl12: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma -; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12) ret <32 x i64> %v diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll index cbfe1292877ee..c00fb329b2f0c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll @@ -444,20 +444,13 @@ define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) { ret <256 x i8> %v } -; FIXME: The upper half is doing nothing. - define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) { ; CHECK-LABEL: vssubu_vi_v258i8_evl128: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma -; CHECK-NEXT: vssubu.vx v16, v16, a1, v0.t ; CHECK-NEXT: ret %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128) ret <256 x i8> %v @@ -1455,14 +1448,9 @@ define <32 x i64> @vssubu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) { ; CHECK-LABEL: vssubu_vx_v32i64_evl12: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vslidedown.vi v24, v0, 2 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vsetivli zero, 12, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma -; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12) ret <32 x i64> %v