diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 09c9d4179ae13a..755ae2ac2361ba 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -8864,6 +8864,11 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, if (VT.isVector() && VT.getVectorElementType() == MVT::i1) Opcode = ISD::VP_AND; break; + case ISD::VP_REDUCE_MUL: + // If it is VP_REDUCE_MUL mask operation then turn it to VP_REDUCE_AND + if (VT == MVT::i1) + Opcode = ISD::VP_REDUCE_AND; + break; case ISD::VP_REDUCE_ADD: // If it is VP_REDUCE_ADD mask operation then turn it to VP_REDUCE_XOR if (VT == MVT::i1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll index 547799ddbda552..c9265477dba414 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -890,3 +890,121 @@ define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %r = call i1 @llvm.vp.reduce.umin.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) ret i1 %r } + +declare i1 @llvm.vp.reduce.mul.v1i1(i1, <1 x i1>, <1 x i1>, i32) + +define i1 @vpreduce_mul_v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_v1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.v2i1(i1, <2 x i1>, <2 x i1>, i32) + +define signext i1 @vpreduce_mul_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.v4i1(i1, <4 x i1>, <4 x i1>, i32) + +define signext i1 @vpreduce_mul_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.v8i1(i1, <8 x i1>, <8 x i1>, i32) + +define signext i1 @vpreduce_mul_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.v16i1(i1, <16 x i1>, <16 x i1>, i32) + +define signext i1 @vpreduce_mul_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.v32i1(i1, <32 x i1>, <32 x i1>, i32) + +define signext i1 @vpreduce_mul_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_v32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.v64i1(i1, <64 x i1>, <64 x i1>, i32) + +define signext i1 @vpreduce_mul_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_v64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + ret i1 %r +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll index 9b4464000b1270..850990ffd7fd10 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -1029,3 +1029,122 @@ define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, %v, < %r = call i1 @llvm.vp.reduce.umin.nxv64i1(i1 %s, %v, %m, i32 %evl) ret i1 %r } + +declare i1 @llvm.vp.reduce.mul.nxv1i1(i1, , , i32) + +define signext i1 @vpreduce_mul_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.nxv1i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.nxv2i1(i1, , , i32) + +define signext i1 @vpreduce_mul_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.nxv2i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.nxv4i1(i1, , , i32) + +define signext i1 @vpreduce_mul_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.nxv4i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.nxv8i1(i1, , , i32) + +define signext i1 @vpreduce_mul_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.nxv8i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.nxv16i1(i1, , , i32) + +define signext i1 @vpreduce_mul_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.nxv16i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.nxv32i1(i1, , , i32) + +define signext i1 @vpreduce_mul_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.nxv32i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +} + +declare i1 @llvm.vp.reduce.mul.nxv64i1(i1, , , i32) + +define signext i1 @vpreduce_mul_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_mul_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vmnot.m v9, v0 +; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vcpop.m a1, v9, v0.t +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: ret + %r = call i1 @llvm.vp.reduce.mul.nxv64i1(i1 %s, %v, %m, i32 %evl) + ret i1 %r +}