diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 9a63d14b0ef0a..1d58860a0afc8 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -108,6 +108,22 @@ let TargetPrefix = "riscv" in { def int_riscv_xperm8 : BitManipGPRGPRIntrinsics; } // TargetPrefix = "riscv" +//===----------------------------------------------------------------------===// +// May-Be-Operations + +let TargetPrefix = "riscv" in { + + // Zimop + def int_riscv_mopr + : DefaultAttrsIntrinsic<[llvm_any_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, ImmArg>]>; + def int_riscv_moprr + : DefaultAttrsIntrinsic<[llvm_any_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, ImmArg>]>; +} // TargetPrefix = "riscv" + //===----------------------------------------------------------------------===// // Vectors diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 05264f7fc4204..f2cacba0ff773 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8575,6 +8575,33 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP; return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1)); } + case Intrinsic::riscv_mopr: { + if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) { + SDValue NewOp = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1)); + SDValue Res = DAG.getNode( + RISCVISD::MOPR, DL, MVT::i64, NewOp, + DAG.getTargetConstant(Op.getConstantOperandVal(2), DL, MVT::i64)); + return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res); + } + return DAG.getNode(RISCVISD::MOPR, DL, XLenVT, Op.getOperand(1), + Op.getOperand(2)); + } + + case Intrinsic::riscv_moprr: { + if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) { + SDValue NewOp0 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1)); + SDValue NewOp1 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2)); + SDValue Res = DAG.getNode( + RISCVISD::MOPRR, DL, MVT::i64, NewOp0, NewOp1, + DAG.getTargetConstant(Op.getConstantOperandVal(3), DL, MVT::i64)); + return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res); + } + return DAG.getNode(RISCVISD::MOPRR, DL, XLenVT, Op.getOperand(1), + Op.getOperand(2), Op.getOperand(3)); + } case Intrinsic::riscv_clmul: if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) { SDValue NewOp0 = @@ -11956,6 +11983,30 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); return; } + case Intrinsic::riscv_mopr: { + if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32) + return; + SDValue NewOp = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); + SDValue Res = DAG.getNode( + RISCVISD::MOPR, DL, MVT::i64, NewOp, + DAG.getTargetConstant(N->getConstantOperandVal(2), DL, MVT::i64)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); + return; + } + case Intrinsic::riscv_moprr: { + if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32) + return; + SDValue NewOp0 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); + SDValue NewOp1 = + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); + SDValue Res = DAG.getNode( + RISCVISD::MOPRR, DL, MVT::i64, NewOp0, NewOp1, + DAG.getTargetConstant(N->getConstantOperandVal(3), DL, MVT::i64)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); + return; + } case Intrinsic::riscv_clmul: { if (!Subtarget.is64Bit() || N->getValueType(0) != MVT::i32) return; @@ -18916,6 +18967,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(CLMUL) NODE_NAME_CASE(CLMULH) NODE_NAME_CASE(CLMULR) + NODE_NAME_CASE(MOPR) + NODE_NAME_CASE(MOPRR) NODE_NAME_CASE(SHA256SIG0) NODE_NAME_CASE(SHA256SIG1) NODE_NAME_CASE(SHA256SUM0) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 8d2fdbb9d40b4..c8afa7c40afbc 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -142,6 +142,9 @@ enum NodeType : unsigned { SM4KS, SM4ED, SM3P0, SM3P1, + // May-Be-Operations + MOPR, MOPRR, + // Vector Extension FIRST_VL_VECTOR_OP, // VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td index 1e8c70046c634..f8ec099ca8197 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZimop.td @@ -34,6 +34,14 @@ class RVInstRMoprr imm4, bits<3> imm3, bits<3> funct3, RISCVOpcode opcod let Inst{25} = imm4{0}; } +def riscv_mopr : SDNode<"RISCVISD::MOPR", + SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>, + SDTCisSameAs<0, 2>]>>; +def riscv_moprr : SDNode<"RISCVISD::MOPRR", + SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>, + SDTCisSameAs<0, 2>, + SDTCisSameAs<0, 3>]>>; + let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in class RVMopr imm7, bits<5> imm5, bits<3> funct3, RISCVOpcode opcode, string opcodestr> @@ -57,3 +65,17 @@ foreach i = 0...7 in { def MOPRR#i : RVMoprr<0b1001, i, 0b100, OPC_SYSTEM, "mop.rr."#i>, Sched<[]>; } + +let Predicates = [HasStdExtZimop] in { +// Zimop instructions +foreach i = 0...31 in { + def : Pat<(XLenVT (riscv_mopr GPR:$rs1, (XLenVT i))), + (!cast("MOPR"#i) GPR:$rs1)>; +} + +foreach i = 0...7 in { + def : Pat<(XLenVT (riscv_moprr GPR:$rs1, GPR:$rs2, (XLenVT i))), + (!cast("MOPRR"#i) GPR:$rs1, GPR:$rs2)>; +} + +} // Predicates = [HasStdExtZimop] diff --git a/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll new file mode 100644 index 0000000000000..e5f36086f1cfc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zimop-intrinsic.ll @@ -0,0 +1,44 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zimop -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32ZIMOP + +declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b) + +define i32 @mopr0_32(i32 %a) nounwind { +; RV32ZIMOP-LABEL: mopr0_32: +; RV32ZIMOP: # %bb.0: +; RV32ZIMOP-NEXT: mop.r.0 a0, a0 +; RV32ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 0) + ret i32 %tmp +} + +define i32 @mopr31_32(i32 %a) nounwind { +; RV32ZIMOP-LABEL: mopr31_32: +; RV32ZIMOP: # %bb.0: +; RV32ZIMOP-NEXT: mop.r.31 a0, a0 +; RV32ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 31) + ret i32 %tmp +} + +declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c) + +define i32 @moprr0_32(i32 %a, i32 %b) nounwind { +; RV32ZIMOP-LABEL: moprr0_32: +; RV32ZIMOP: # %bb.0: +; RV32ZIMOP-NEXT: mop.rr.0 a0, a0, a1 +; RV32ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 0) + ret i32 %tmp +} + +define i32 @moprr7_32(i32 %a, i32 %b) nounwind { +; RV32ZIMOP-LABEL: moprr7_32: +; RV32ZIMOP: # %bb.0: +; RV32ZIMOP-NEXT: mop.rr.7 a0, a0, a1 +; RV32ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 7) + ret i32 %tmp +} + diff --git a/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll new file mode 100644 index 0000000000000..cd57739a955d5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zimop-intrinsic.ll @@ -0,0 +1,88 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zimop -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64ZIMOP + +declare i64 @llvm.riscv.mopr.i64(i64 %a, i64 %b) + +define i64 @mopr0_64(i64 %a) nounwind { +; RV64ZIMOP-LABEL: mopr0_64: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.r.0 a0, a0 +; RV64ZIMOP-NEXT: ret + %tmp = call i64 @llvm.riscv.mopr.i64(i64 %a, i64 0) + ret i64 %tmp +} + +define i64 @mopr31_64(i64 %a) nounwind { +; RV64ZIMOP-LABEL: mopr31_64: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.r.31 a0, a0 +; RV64ZIMOP-NEXT: ret + %tmp = call i64 @llvm.riscv.mopr.i64(i64 %a, i64 31) + ret i64 %tmp +} + +declare i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 %c) + +define i64 @moprr0_64(i64 %a, i64 %b) nounwind { +; RV64ZIMOP-LABEL: moprr0_64: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.rr.0 a0, a0, a1 +; RV64ZIMOP-NEXT: ret + %tmp = call i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 0) + ret i64 %tmp +} + +define i64 @moprr7_64(i64 %a, i64 %b) nounwind { +; RV64ZIMOP-LABEL: moprr7_64: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.rr.7 a0, a0, a1 +; RV64ZIMOP-NEXT: ret + %tmp = call i64 @llvm.riscv.moprr.i64(i64 %a, i64 %b, i64 7) + ret i64 %tmp +} + +declare i32 @llvm.riscv.mopr.i32(i32 %a, i32 %b) + +define signext i32 @mopr0_32(i32 signext %a) nounwind { +; RV64ZIMOP-LABEL: mopr0_32: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.r.0 a0, a0 +; RV64ZIMOP-NEXT: sext.w a0, a0 +; RV64ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 0) + ret i32 %tmp +} + +define signext i32 @mopr31_32(i32 signext %a) nounwind { +; RV64ZIMOP-LABEL: mopr31_32: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.r.31 a0, a0 +; RV64ZIMOP-NEXT: sext.w a0, a0 +; RV64ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.mopr.i32(i32 %a, i32 31) + ret i32 %tmp +} + +declare i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 %c) + +define signext i32 @moprr0_32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZIMOP-LABEL: moprr0_32: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.rr.0 a0, a0, a1 +; RV64ZIMOP-NEXT: sext.w a0, a0 +; RV64ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 0) + ret i32 %tmp +} + +define signext i32 @moprr7_32(i32 signext %a, i32 signext %b) nounwind { +; RV64ZIMOP-LABEL: moprr7_32: +; RV64ZIMOP: # %bb.0: +; RV64ZIMOP-NEXT: mop.rr.7 a0, a0, a1 +; RV64ZIMOP-NEXT: sext.w a0, a0 +; RV64ZIMOP-NEXT: ret + %tmp = call i32 @llvm.riscv.moprr.i32(i32 %a, i32 %b, i32 7) + ret i32 %tmp +} +