diff --git a/llvm/lib/Target/BPF/BPF.td b/llvm/lib/Target/BPF/BPF.td index a7aa6274f5ac1..436b7eef600e7 100644 --- a/llvm/lib/Target/BPF/BPF.td +++ b/llvm/lib/Target/BPF/BPF.td @@ -31,6 +31,10 @@ def MisalignedMemAccess : SubtargetFeature<"allows-misaligned-mem-access", "AllowsMisalignedMemAccess", "true", "Allows misaligned memory access">; +def AllowBuiltinCall : SubtargetFeature<"allow-builtin-calls", + "AllowBuiltinCalls", "true", + "Allow calls to builtin functions">; + def : Proc<"generic", []>; def : Proc<"v1", []>; def : Proc<"v2", []>; diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp index ecefd2379356a..c9df0c9b627e9 100644 --- a/llvm/lib/Target/BPF/BPFISelLowering.cpp +++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp @@ -208,6 +208,7 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM, HasMovsx = STI.hasMovsx(); AllowsMisalignedMemAccess = STI.getAllowsMisalignedMemAccess(); + AllowBuiltinCalls = STI.getAllowBuiltinCalls(); } bool BPFTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, @@ -567,9 +568,10 @@ SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { if (StringRef(E->getSymbol()) != BPF_TRAP) { Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0); - fail(CLI.DL, DAG, - Twine("A call to built-in function '" + StringRef(E->getSymbol()) + - "' is not supported.")); + if (!AllowBuiltinCalls) + fail(CLI.DL, DAG, + Twine("A call to built-in function '" + StringRef(E->getSymbol()) + + "' is not supported.")); } } @@ -1196,3 +1198,18 @@ bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL, return true; } + +bool BPFTargetLowering::shouldSignExtendTypeInLibCall(Type *Ty, + bool IsSigned) const { + return IsSigned || Ty->isIntegerTy(32); +} + +bool BPFTargetLowering::CanLowerReturn( + CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, + const SmallVectorImpl &Outs, LLVMContext &Context, + const Type *RetTy) const { + // At minimal return Outs.size() <= 1, or check valid types in CC. + SmallVector RVLocs; + CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); + return CCInfo.CheckReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64); +} \ No newline at end of file diff --git a/llvm/lib/Target/BPF/BPFISelLowering.h b/llvm/lib/Target/BPF/BPFISelLowering.h index 8607e4f8c9e69..a5036e31cb61d 100644 --- a/llvm/lib/Target/BPF/BPFISelLowering.h +++ b/llvm/lib/Target/BPF/BPFISelLowering.h @@ -68,6 +68,8 @@ class BPFTargetLowering : public TargetLowering { // Allows Misalignment bool AllowsMisalignedMemAccess; + bool AllowBuiltinCalls; + SDValue LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; @@ -163,6 +165,14 @@ class BPFTargetLowering : public TargetLowering { MachineBasicBlock * EmitInstrWithCustomInserterLDimm64(MachineInstr &MI, MachineBasicBlock *BB) const; + + // Returns true if arguments should be sign-extended in lib calls. + bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const override; + + bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, + bool IsVarArg, + const SmallVectorImpl &Outs, + LLVMContext &Context, const Type *RetTy) const override; }; } diff --git a/llvm/lib/Target/BPF/BPFSubtarget.cpp b/llvm/lib/Target/BPF/BPFSubtarget.cpp index 726f8f4b39827..77a1a5fe7444c 100644 --- a/llvm/lib/Target/BPF/BPFSubtarget.cpp +++ b/llvm/lib/Target/BPF/BPFSubtarget.cpp @@ -70,6 +70,7 @@ void BPFSubtarget::initializeEnvironment() { HasLoadAcqStoreRel = false; HasGotox = false; AllowsMisalignedMemAccess = false; + AllowBuiltinCalls = false; } void BPFSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { diff --git a/llvm/lib/Target/BPF/BPFSubtarget.h b/llvm/lib/Target/BPF/BPFSubtarget.h index 24eff862224b0..40751fc9b7454 100644 --- a/llvm/lib/Target/BPF/BPFSubtarget.h +++ b/llvm/lib/Target/BPF/BPFSubtarget.h @@ -70,6 +70,8 @@ class BPFSubtarget : public BPFGenSubtargetInfo { bool HasLdsx, HasMovsx, HasBswap, HasSdivSmod, HasGotol, HasStoreImm, HasLoadAcqStoreRel, HasGotox; + bool AllowBuiltinCalls; + std::unique_ptr CallLoweringInfo; std::unique_ptr InstSelector; std::unique_ptr Legalizer; @@ -101,6 +103,7 @@ class BPFSubtarget : public BPFGenSubtargetInfo { bool hasStoreImm() const { return HasStoreImm; } bool hasLoadAcqStoreRel() const { return HasLoadAcqStoreRel; } bool hasGotox() const { return HasGotox; } + bool getAllowBuiltinCalls() const { return AllowBuiltinCalls; } bool isLittleEndian() const { return IsLittleEndian; } diff --git a/llvm/test/CodeGen/BPF/atomic-oversize.ll b/llvm/test/CodeGen/BPF/atomic-oversize.ll index 187f0964d4fb8..6dc49398f091d 100644 --- a/llvm/test/CodeGen/BPF/atomic-oversize.ll +++ b/llvm/test/CodeGen/BPF/atomic-oversize.ll @@ -1,6 +1,4 @@ ; RUN: llc -mtriple=bpf < %s | FileCheck %s -; XFAIL: * -; Doesn't currently build, with error 'only small returns supported'. define void @test(ptr %a) nounwind { ; CHECK-LABEL: test: diff --git a/llvm/test/CodeGen/BPF/builtin_calls.ll b/llvm/test/CodeGen/BPF/builtin_calls.ll new file mode 100644 index 0000000000000..18199eba7222a --- /dev/null +++ b/llvm/test/CodeGen/BPF/builtin_calls.ll @@ -0,0 +1,39 @@ +; RUN: llc -march=bpfel -mattr=+allow-builtin-calls < %s | FileCheck %s +; +; C code for this test case: +; +; long func(long a, long b) { +; long x; +; return __builtin_mul_overflow(a, b, &x); +; } + + +declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64) + +define noundef range(i64 0, 2) i64 @func(i64 noundef %a, i64 noundef %b) local_unnamed_addr { +entry: + %0 = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %a, i64 %b) + %1 = extractvalue { i64, i1 } %0, 1 + %conv = zext i1 %1 to i64 + ret i64 %conv +} + +; CHECK-LABEL: func +; CHECK: r4 = r2 +; CHECK: r2 = r1 +; CHECK: r3 = r2 +; CHECK: r3 s>>= 63 +; CHECK: r5 = r4 +; CHECK: r5 s>>= 63 +; CHECK: r1 = r10 +; CHECK: r1 += -16 +; CHECK: call __multi3 +; CHECK: r1 = *(u64 *)(r10 - 16) +; CHECK: r1 s>>= 63 +; CHECK: w0 = 1 +; CHECK: r2 = *(u64 *)(r10 - 8) +; CHECK: if r2 != r1 goto LBB0_2 +; CHECK: # %bb.1: # %entry +; CHECK: w0 = 0 +; CHECK: LBB0_2: # %entry +; CHECK: exit \ No newline at end of file diff --git a/llvm/test/CodeGen/BPF/struct_ret1.ll b/llvm/test/CodeGen/BPF/struct_ret1.ll index 40d17ec514c48..eb66a7deacb91 100644 --- a/llvm/test/CodeGen/BPF/struct_ret1.ll +++ b/llvm/test/CodeGen/BPF/struct_ret1.ll @@ -1,6 +1,6 @@ ; RUN: not llc -mtriple=bpf < %s 2> %t1 ; RUN: FileCheck %s < %t1 -; CHECK: error: :0:0: in function bar { i64, i32 } (i32, i32, i32, i32, i32): aggregate returns are not supported +; CHECK: error: :0:0: in function bar { i64, i32 } (i32, i32, i32, i32, i32): stack arguments are not supported %struct.S = type { i32, i32, i32 } diff --git a/llvm/test/CodeGen/BPF/struct_ret2.ll b/llvm/test/CodeGen/BPF/struct_ret2.ll index 170d55cc29df0..a20280949215e 100644 --- a/llvm/test/CodeGen/BPF/struct_ret2.ll +++ b/llvm/test/CodeGen/BPF/struct_ret2.ll @@ -1,6 +1,6 @@ ; RUN: not llc -mtriple=bpf < %s 2> %t1 ; RUN: FileCheck %s < %t1 -; CHECK: only small returns +; CHECK: too many arguments ; Function Attrs: nounwind uwtable define { i64, i32 } @foo(i32 %a, i32 %b, i32 %c) #0 {