-
Notifications
You must be signed in to change notification settings - Fork 14.8k
[RISCV][GISel] Support select vector load intrinsics #160720
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
[RISCV][GISel] Support select vector load intrinsics #160720
Conversation
@llvm/pr-subscribers-llvm-globalisel Author: Jianjian Guan (jacquesguan) ChangesInclude unit-stride, strided and mask vector load intrinsics. Patch is 88.95 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/160720.diff 7 Files Affected:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 71c21e43998d4..4a793c549ec91 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/Support/Debug.h"
@@ -79,6 +80,9 @@ class RISCVInstructionSelector : public InstructionSelector {
// Returns true if the instruction was modified.
void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
+ // An early selection function that runs before the selectImpl() call.
+ bool earlySelect(MachineInstr &I, MachineIRBuilder &MIB);
+
bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
// Custom selection methods
@@ -92,6 +96,8 @@ class RISCVInstructionSelector : public InstructionSelector {
void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
MachineIRBuilder &MIB) const;
bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool selectIntrinsicWithSideEffects(MachineInstr &I,
+ MachineIRBuilder &MIB) const;
ComplexRendererFns selectShiftMask(MachineOperand &Root,
unsigned ShiftWidth) const;
@@ -675,6 +681,109 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
CC = getRISCVCCFromICmp(Pred);
}
+bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
+ MachineInstr &I, MachineIRBuilder &MIB) const {
+ // Find the intrinsic ID.
+ unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
+ // Select the instruction.
+ switch (IntrinID) {
+ default:
+ return false;
+ case Intrinsic::riscv_vlm:
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vlse_mask: {
+ bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
+ IntrinID == Intrinsic::riscv_vlse_mask;
+ bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
+ IntrinID == Intrinsic::riscv_vlse_mask;
+ LLT VT = MRI->getType(I.getOperand(0).getReg());
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+
+ // Result vector
+ const Register DstReg = I.getOperand(0).getReg();
+ const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
+ MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
+ if (IsMasked)
+ DstRC = TRI.getNoV0RegClass(DstRC);
+ RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
+
+ // Sources
+ bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
+ unsigned CurOp = 2;
+ SmallVector<SrcOp, 4> SrcOps; // Source registers.
+
+ // Passthru
+ if (HasPassthruOperand) {
+ auto PassthruReg = I.getOperand(CurOp++).getReg();
+ SrcOps.push_back(PassthruReg);
+ RBI.constrainGenericRegister(PassthruReg, *DstRC, *MRI);
+ } else {
+ auto UndefReg = MRI->createVirtualRegister(DstRC);
+ MIB.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(UndefReg);
+ SrcOps.push_back(UndefReg);
+ }
+
+ // Base Pointer
+ auto PtrReg = I.getOperand(CurOp++).getReg();
+ SrcOps.push_back(PtrReg);
+
+ // Stride
+ if (IsStrided) {
+ auto StrideReg = I.getOperand(CurOp++).getReg();
+ SrcOps.push_back(StrideReg);
+ }
+
+ // Mask
+ if (IsMasked) {
+ auto MaskReg = I.getOperand(CurOp++).getReg();
+ RBI.constrainGenericRegister(MaskReg, RISCV::VMV0RegClass, *MRI);
+ SrcOps.push_back(MaskReg);
+ }
+
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT));
+ const RISCV::VLEPseudo *P =
+ RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
+ static_cast<unsigned>(LMUL));
+
+ auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
+
+ // Select VL
+ auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
+ for (auto &RenderFn : *VLOpFn)
+ RenderFn(PseudoMI);
+ if (auto VLReg = PseudoMI.getReg(PseudoMI.getInstr()->getNumOperands() - 1))
+ RBI.constrainGenericRegister(VLReg, RISCV::GPRNoX0RegClass, *MRI);
+
+ // SEW
+ PseudoMI.addImm(Log2SEW);
+
+ // Policy
+ uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
+ if (IsMasked)
+ Policy = I.getOperand(CurOp++).getImm();
+ PseudoMI.addImm(Policy);
+
+ // Memref
+ PseudoMI.cloneMemRefs(I);
+
+ I.eraseFromParent();
+ return true;
+ }
+ }
+}
+
+bool RISCVInstructionSelector::earlySelect(MachineInstr &MI,
+ MachineIRBuilder &MIB) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
+ return selectIntrinsicWithSideEffects(MI, MIB);
+ }
+}
+
bool RISCVInstructionSelector::select(MachineInstr &MI) {
MachineIRBuilder MIB(MI);
@@ -716,6 +825,9 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return true;
}
+ if (earlySelect(MI, MIB))
+ return true;
+
if (selectImpl(MI, *CoverageInfo))
return true;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 597dd1271d394..bae7d78e89680 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#define GET_TARGET_REGBANK_IMPL
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 40b641680b2ce..af325aa93abd0 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -805,6 +805,19 @@ RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
return RC;
}
+const TargetRegisterClass *
+RISCVRegisterInfo::getNoV0RegClass(const TargetRegisterClass *RC) const {
+ if (RC == &RISCV::VRRegClass)
+ return &RISCV::VRNoV0RegClass;
+ if (RC == &RISCV::VRM2RegClass)
+ return &RISCV::VRM2NoV0RegClass;
+ if (RC == &RISCV::VRM4RegClass)
+ return &RISCV::VRM4NoV0RegClass;
+ if (RC == &RISCV::VRM8RegClass)
+ return &RISCV::VRM8NoV0RegClass;
+ return RC;
+}
+
void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
SmallVectorImpl<uint64_t> &Ops) const {
// VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index 67726db504122..449e00c561d13 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -131,6 +131,9 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
getLargestLegalSuperClass(const TargetRegisterClass *RC,
const MachineFunction &) const override;
+ const TargetRegisterClass *
+ getNoV0RegClass(const TargetRegisterClass *RC) const;
+
void getOffsetOpcodes(const StackOffset &Offset,
SmallVectorImpl<uint64_t> &Ops) const override;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll
new file mode 100644
index 0000000000000..3e70ce2553668
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll
@@ -0,0 +1,1338 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vle32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLe...
[truncated]
|
@llvm/pr-subscribers-backend-risc-v Author: Jianjian Guan (jacquesguan) ChangesInclude unit-stride, strided and mask vector load intrinsics. Patch is 88.95 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/160720.diff 7 Files Affected:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 71c21e43998d4..4a793c549ec91 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/Support/Debug.h"
@@ -79,6 +80,9 @@ class RISCVInstructionSelector : public InstructionSelector {
// Returns true if the instruction was modified.
void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
+ // An early selection function that runs before the selectImpl() call.
+ bool earlySelect(MachineInstr &I, MachineIRBuilder &MIB);
+
bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
// Custom selection methods
@@ -92,6 +96,8 @@ class RISCVInstructionSelector : public InstructionSelector {
void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
MachineIRBuilder &MIB) const;
bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool selectIntrinsicWithSideEffects(MachineInstr &I,
+ MachineIRBuilder &MIB) const;
ComplexRendererFns selectShiftMask(MachineOperand &Root,
unsigned ShiftWidth) const;
@@ -675,6 +681,109 @@ static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
CC = getRISCVCCFromICmp(Pred);
}
+bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
+ MachineInstr &I, MachineIRBuilder &MIB) const {
+ // Find the intrinsic ID.
+ unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
+ // Select the instruction.
+ switch (IntrinID) {
+ default:
+ return false;
+ case Intrinsic::riscv_vlm:
+ case Intrinsic::riscv_vle:
+ case Intrinsic::riscv_vle_mask:
+ case Intrinsic::riscv_vlse:
+ case Intrinsic::riscv_vlse_mask: {
+ bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
+ IntrinID == Intrinsic::riscv_vlse_mask;
+ bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
+ IntrinID == Intrinsic::riscv_vlse_mask;
+ LLT VT = MRI->getType(I.getOperand(0).getReg());
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+
+ // Result vector
+ const Register DstReg = I.getOperand(0).getReg();
+ const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
+ MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
+ if (IsMasked)
+ DstRC = TRI.getNoV0RegClass(DstRC);
+ RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
+
+ // Sources
+ bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
+ unsigned CurOp = 2;
+ SmallVector<SrcOp, 4> SrcOps; // Source registers.
+
+ // Passthru
+ if (HasPassthruOperand) {
+ auto PassthruReg = I.getOperand(CurOp++).getReg();
+ SrcOps.push_back(PassthruReg);
+ RBI.constrainGenericRegister(PassthruReg, *DstRC, *MRI);
+ } else {
+ auto UndefReg = MRI->createVirtualRegister(DstRC);
+ MIB.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(UndefReg);
+ SrcOps.push_back(UndefReg);
+ }
+
+ // Base Pointer
+ auto PtrReg = I.getOperand(CurOp++).getReg();
+ SrcOps.push_back(PtrReg);
+
+ // Stride
+ if (IsStrided) {
+ auto StrideReg = I.getOperand(CurOp++).getReg();
+ SrcOps.push_back(StrideReg);
+ }
+
+ // Mask
+ if (IsMasked) {
+ auto MaskReg = I.getOperand(CurOp++).getReg();
+ RBI.constrainGenericRegister(MaskReg, RISCV::VMV0RegClass, *MRI);
+ SrcOps.push_back(MaskReg);
+ }
+
+ RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT));
+ const RISCV::VLEPseudo *P =
+ RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
+ static_cast<unsigned>(LMUL));
+
+ auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
+
+ // Select VL
+ auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
+ for (auto &RenderFn : *VLOpFn)
+ RenderFn(PseudoMI);
+ if (auto VLReg = PseudoMI.getReg(PseudoMI.getInstr()->getNumOperands() - 1))
+ RBI.constrainGenericRegister(VLReg, RISCV::GPRNoX0RegClass, *MRI);
+
+ // SEW
+ PseudoMI.addImm(Log2SEW);
+
+ // Policy
+ uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
+ if (IsMasked)
+ Policy = I.getOperand(CurOp++).getImm();
+ PseudoMI.addImm(Policy);
+
+ // Memref
+ PseudoMI.cloneMemRefs(I);
+
+ I.eraseFromParent();
+ return true;
+ }
+ }
+}
+
+bool RISCVInstructionSelector::earlySelect(MachineInstr &MI,
+ MachineIRBuilder &MIB) {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
+ return selectIntrinsicWithSideEffects(MI, MIB);
+ }
+}
+
bool RISCVInstructionSelector::select(MachineInstr &MI) {
MachineIRBuilder MIB(MI);
@@ -716,6 +825,9 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
return true;
}
+ if (earlySelect(MI, MIB))
+ return true;
+
if (selectImpl(MI, *CoverageInfo))
return true;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 597dd1271d394..bae7d78e89680 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#define GET_TARGET_REGBANK_IMPL
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 40b641680b2ce..af325aa93abd0 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -805,6 +805,19 @@ RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
return RC;
}
+const TargetRegisterClass *
+RISCVRegisterInfo::getNoV0RegClass(const TargetRegisterClass *RC) const {
+ if (RC == &RISCV::VRRegClass)
+ return &RISCV::VRNoV0RegClass;
+ if (RC == &RISCV::VRM2RegClass)
+ return &RISCV::VRM2NoV0RegClass;
+ if (RC == &RISCV::VRM4RegClass)
+ return &RISCV::VRM4NoV0RegClass;
+ if (RC == &RISCV::VRM8RegClass)
+ return &RISCV::VRM8NoV0RegClass;
+ return RC;
+}
+
void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset,
SmallVectorImpl<uint64_t> &Ops) const {
// VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index 67726db504122..449e00c561d13 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -131,6 +131,9 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
getLargestLegalSuperClass(const TargetRegisterClass *RC,
const MachineFunction &) const override;
+ const TargetRegisterClass *
+ getNoV0RegClass(const TargetRegisterClass *RC) const;
+
void getOffsetOpcodes(const StackOffset &Offset,
SmallVectorImpl<uint64_t> &Ops) const override;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll
new file mode 100644
index 0000000000000..3e70ce2553668
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll
@@ -0,0 +1,1338 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vle64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vle32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ iXLen %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vle32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLe...
[truncated]
|
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" | ||
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" | ||
#include "llvm/CodeGen/MachineJumpTableInfo.h" | ||
#include "llvm/CodeGen/MachineOperand.h" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is ths change needed?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Removed, mis-added by editor.
return true; | ||
} | ||
|
||
if (earlySelect(MI, MIB)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why does this need to be done "early"? Why can't it be part of the switch after the selectImpl call?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The motivation is that we process those intrinsics before pattern match in SelectionDAG. Now I move it after selectImpl.
#include "llvm/CodeGen/MachineRegisterInfo.h" | ||
#include "llvm/CodeGen/RegisterBank.h" | ||
#include "llvm/CodeGen/RegisterBankInfo.h" | ||
#include "llvm/CodeGen/TargetOpcodes.h" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is this change needed?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Removed, mis-added by editor.
Include unit-stride, strided and mask vector load intrinsics.
7ec1cd3
to
a9a2e3c
Compare
Include unit-stride, strided and mask vector load intrinsics.