178 changes: 155 additions & 23 deletions clang/lib/CodeGen/CGCall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Attributes.h"
Expand Down Expand Up @@ -59,6 +60,7 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
case CC_Swift: return llvm::CallingConv::Swift;
}
}

Expand Down Expand Up @@ -109,7 +111,7 @@ static void appendParameterTypes(const CodeGenTypes &CGT,
auto protoParamInfos = FPT->getExtParameterInfos();
paramInfos.reserve(prefix.size() + protoParamInfos.size());
paramInfos.resize(prefix.size());
paramInfos.append(paramInfos.begin(), paramInfos.end());
paramInfos.append(protoParamInfos.begin(), protoParamInfos.end());
}

// Fast path: unknown target.
Expand Down Expand Up @@ -590,7 +592,6 @@ CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
}


/// Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
Expand Down Expand Up @@ -679,7 +680,11 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
assert(inserted && "Recursively being processed?");

// Compute ABI information.
getABIInfo().computeInfo(*FI);
if (info.getCC() != CC_Swift) {
getABIInfo().computeInfo(*FI);
} else {
swiftcall::computeABIInfo(CGM, *FI);
}

// Loop over all of the computed argument and return value info. If any of
// them are direct or extend without a specified coerce type, specify the
Expand Down Expand Up @@ -918,7 +923,7 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
}

void CodeGenFunction::ExpandTypeFromArgs(
QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
assert(LV.isSimple() &&
"Unexpected non-simple lvalue during struct expansion.");

Expand Down Expand Up @@ -1813,10 +1818,13 @@ void CodeGenModule::ConstructAttributeList(
getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
}

bool hasUsedSRet = false;

// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
llvm::AttrBuilder SRETAttrs;
SRETAttrs.addAttribute(llvm::Attribute::StructRet);
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
PAL.push_back(llvm::AttributeSet::get(
Expand Down Expand Up @@ -1920,6 +1928,41 @@ void CodeGenModule::ConstructAttributeList(
Attrs.addAttribute(llvm::Attribute::NonNull);
}

switch (FI.getExtParameterInfo(ArgNo).getABI()) {
case ParameterABI::Ordinary:
break;

case ParameterABI::SwiftIndirectResult: {
// Add 'sret' if we haven't already used it for something, but
// only if the result is void.
if (!hasUsedSRet && RetTy->isVoidType()) {
Attrs.addAttribute(llvm::Attribute::StructRet);
hasUsedSRet = true;
}

// Add 'noalias' in either case.
Attrs.addAttribute(llvm::Attribute::NoAlias);

// Add 'dereferenceable' and 'alignment'.
auto PTy = ParamType->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
auto info = getContext().getTypeInfoInChars(PTy);
Attrs.addDereferenceableAttr(info.first.getQuantity());
Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
info.second.getQuantity()));
}
break;
}

case ParameterABI::SwiftErrorResult:
Attrs.addAttribute(llvm::Attribute::SwiftError);
break;

case ParameterABI::SwiftContext:
Attrs.addAttribute(llvm::Attribute::SwiftSelf);
break;
}

if (Attrs.hasAttributes()) {
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
Expand Down Expand Up @@ -1985,6 +2028,18 @@ static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
return nullptr;
}

namespace {
struct CopyBackSwiftError final : EHScopeStack::Cleanup {
Address Temp;
Address Arg;
CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
CGF.Builder.CreateStore(errorValue, Arg);
}
};
}

void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args) {
Expand All @@ -2010,7 +2065,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,

ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
// Flattened function arguments.
SmallVector<llvm::Argument *, 16> FnArgs;
SmallVector<llvm::Value *, 16> FnArgs;
FnArgs.reserve(IRFunctionArgs.totalIRArgs());
for (auto &Arg : Fn->args()) {
FnArgs.push_back(&Arg);
Expand All @@ -2031,7 +2086,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,

// Name the struct return parameter.
if (IRFunctionArgs.hasSRetArg()) {
auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
AI->setName("agg.result");
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
llvm::Attribute::NoAlias));
Expand Down Expand Up @@ -2119,8 +2174,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
ArgI.getCoerceToType() == ConvertType(Ty) &&
ArgI.getDirectOffset() == 0) {
assert(NumIRArgs == 1);
auto AI = FnArgs[FirstIRArg];
llvm::Value *V = AI;
llvm::Value *V = FnArgs[FirstIRArg];
auto AI = cast<llvm::Argument>(V);

if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
Expand Down Expand Up @@ -2189,6 +2244,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->getArgNo() + 1,
llvm::Attribute::NoAlias));

// LLVM expects swifterror parameters to be used in very restricted
// ways. Copy the value into a less-restricted temporary.
if (FI.getExtParameterInfo(ArgNo).getABI()
== ParameterABI::SwiftErrorResult) {
QualType pointeeTy = Ty->getPointeeType();
assert(pointeeTy->isPointerType());
Address temp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
Builder.CreateStore(incomingErrorValue, temp);
V = temp.getPointer();

// Push a cleanup to copy the value back at the end of the function.
// The convention does not guarantee that the value will be written
// back if the function exits with an unwind exception.
EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
}

// Ensure the argument is the correct type.
if (V->getType() != ArgI.getCoerceToType())
V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
Expand Down Expand Up @@ -3481,6 +3555,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}

Address swiftErrorTemp = Address::invalid();
Address swiftErrorArg = Address::invalid();

assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
unsigned ArgNo = 0;
Expand Down Expand Up @@ -3587,6 +3664,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
else
V = Builder.CreateLoad(RV.getAggregateAddress());

// Implement swifterror by copying into a new swifterror argument.
// We'll write back in the normal path out of the call.
if (CallInfo.getExtParameterInfo(ArgNo).getABI()
== ParameterABI::SwiftErrorResult) {
assert(!swiftErrorTemp.isValid() && "multiple swifterror args");

QualType pointeeTy = I->Ty->getPointeeType();
swiftErrorArg =
Address(V, getContext().getTypeAlignInChars(pointeeTy));

swiftErrorTemp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
V = swiftErrorTemp.getPointer();
cast<llvm::AllocaInst>(V)->setSwiftError(true);

llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
Builder.CreateStore(errorValue, swiftErrorTemp);
}

// We might have to widen integers, but we should never truncate.
if (ArgInfo.getCoerceToType() != V->getType() &&
V->getType()->isIntegerTy())
Expand All @@ -3597,6 +3693,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (FirstIRArg < IRFuncTy->getNumParams() &&
V->getType() != IRFuncTy->getParamType(FirstIRArg))
V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));

IRCallArgs[FirstIRArg] = V;
break;
}
Expand Down Expand Up @@ -3656,13 +3753,31 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}

case ABIArgInfo::CoerceAndExpand: {
assert(RV.isAggregate() &&
"CoerceAndExpand does not support non-aggregate types yet");

auto coercionType = ArgInfo.getCoerceAndExpandType();
auto layout = CGM.getDataLayout().getStructLayout(coercionType);

Address addr = RV.getAggregateAddress();
llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
if (RV.isAggregate()) {
addr = RV.getAggregateAddress();
} else {
assert(RV.isScalar()); // complex should always just be direct

llvm::Type *scalarType = RV.getScalarVal()->getType();
auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);

tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);

// Materialize to a temporary.
addr = CreateTempAlloca(RV.getScalarVal()->getType(),
CharUnits::fromQuantity(std::max(layout->getAlignment(),
scalarAlign)));
EmitLifetimeStart(scalarSize, addr.getPointer());

Builder.CreateStore(RV.getScalarVal(), addr);
}

addr = Builder.CreateElementBitCast(addr, coercionType);

unsigned IRArgPos = FirstIRArg;
Expand All @@ -3675,6 +3790,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
assert(IRArgPos == FirstIRArg + NumIRArgs);

if (tempSize) {
EmitLifetimeEnd(tempSize, addr.getPointer());
}

break;
}

Expand Down Expand Up @@ -3853,6 +3972,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!CI->getType()->isVoidTy())
CI->setName("call");

// Perform the swifterror writeback.
if (swiftErrorTemp.isValid()) {
llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
Builder.CreateStore(errorResult, swiftErrorArg);
}

// Emit any writebacks immediately. Arguably this should happen
// after any return-value munging.
if (CallArgs.hasWritebacks())
Expand All @@ -3870,31 +3995,38 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,

RValue Ret = [&] {
switch (RetAI.getKind()) {
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
SRetPtr.getPointer());
return ret;
}

case ABIArgInfo::CoerceAndExpand: {
auto coercionType = RetAI.getCoerceAndExpandType();
auto layout = CGM.getDataLayout().getStructLayout(coercionType);

Address addr = SRetPtr;
addr = Builder.CreateElementBitCast(addr, coercionType);

assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
bool requiresExtract = isa<llvm::StructType>(CI->getType());

unsigned unpaddedIndex = 0;
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
llvm::Type *eltType = coercionType->getElementType(i);
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
llvm::Value *elt = Builder.CreateExtractValue(CI, unpaddedIndex++);
llvm::Value *elt = CI;
if (requiresExtract)
elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
else
assert(unpaddedIndex == 0);
Builder.CreateStore(elt, eltAddr);
}
break;
// FALLTHROUGH
}

case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
SRetPtr.getPointer());
return ret;
}

case ABIArgInfo::Ignore:
Expand Down
1 change: 1 addition & 0 deletions clang/lib/CodeGen/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ add_clang_library(clangCodeGen
ModuleBuilder.cpp
ObjectFilePCHContainerOperations.cpp
SanitizerMetadata.cpp
SwiftCallingConv.cpp
TargetInfo.cpp

DEPENDS
Expand Down
4 changes: 2 additions & 2 deletions clang/lib/CodeGen/CodeGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ class ObjCMethodDecl;
class ObjCImplementationDecl;
class ObjCPropertyImplDecl;
class TargetInfo;
class TargetCodeGenInfo;
class VarDecl;
class ObjCForCollectionStmt;
class ObjCAtTryStmt;
Expand All @@ -86,6 +85,7 @@ class BlockByrefHelpers;
class BlockByrefInfo;
class BlockFlags;
class BlockFieldFlags;
class TargetCodeGenInfo;

/// The kind of evaluation to perform on values of a particular
/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
Expand Down Expand Up @@ -3110,7 +3110,7 @@ class CodeGenFunction : public CodeGenTypeCache {
///
/// \param AI - The first function argument of the expansion.
void ExpandTypeFromArgs(QualType Ty, LValue Dst,
SmallVectorImpl<llvm::Argument *>::iterator &AI);
SmallVectorImpl<llvm::Value *>::iterator &AI);

/// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
/// Ty, into individual arguments on the provided vector \arg IRCallArgs,
Expand Down
2 changes: 1 addition & 1 deletion clang/lib/CodeGen/CodeGenModule.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ class IndexedInstrProfReader;
}

namespace clang {
class TargetCodeGenInfo;
class ASTContext;
class AtomicType;
class FunctionDecl;
Expand Down Expand Up @@ -93,6 +92,7 @@ class CGCUDARuntime;
class BlockFieldFlags;
class FunctionArgList;
class CoverageMappingModuleGen;
class TargetCodeGenInfo;

struct OrderGlobalInits {
unsigned int priority;
Expand Down
2 changes: 1 addition & 1 deletion clang/lib/CodeGen/CodeGenTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ class StructType;
}

namespace clang {
class ABIInfo;
class ASTContext;
template <typename> class CanQual;
class CXXConstructorDecl;
Expand All @@ -51,6 +50,7 @@ class Type;
typedef CanQual<Type> CanQualType;

namespace CodeGen {
class ABIInfo;
class CGCXXABI;
class CGRecordLayout;
class CodeGenModule;
Expand Down
830 changes: 830 additions & 0 deletions clang/lib/CodeGen/SwiftCallingConv.cpp

Large diffs are not rendered by default.

87 changes: 79 additions & 8 deletions clang/lib/CodeGen/TargetInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
Expand Down Expand Up @@ -68,6 +69,46 @@ Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,

ABIInfo::~ABIInfo() {}

/// Does the given lowering require more than the given number of
/// registers when expanded?
///
/// This is intended to be the basis of a reasonable basic implementation
/// of should{Pass,Return}IndirectlyForSwift.
///
/// For most targets, a limit of four total registers is reasonable; this
/// limits the amount of code required in order to move around the value
/// in case it wasn't produced immediately prior to the call by the caller
/// (or wasn't produced in exactly the right registers) or isn't used
/// immediately within the callee. But some targets may need to further
/// limit the register count due to an inability to support that many
/// return registers.
static bool occupiesMoreThan(CodeGenTypes &cgt,
ArrayRef<llvm::Type*> scalarTypes,
unsigned maxAllRegisters) {
unsigned intCount = 0, fpCount = 0;
for (llvm::Type *type : scalarTypes) {
if (type->isPointerTy()) {
intCount++;
} else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
auto ptrWidth = cgt.getTarget().getPointerWidth(0);
intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
} else {
assert(type->isVectorTy() || type->isFloatingPointTy());
fpCount++;
}
}

return (intCount + fpCount > maxAllRegisters);
}

bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
llvm::Type *eltTy,
unsigned numElts) const {
// The default implementation of this assumes that the target guarantees
// 128-bit SIMD support but nothing more.
return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
}

static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
CGCXXABI &CXXABI) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
Expand Down Expand Up @@ -866,7 +907,7 @@ struct CCState {
};

/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
class X86_32ABIInfo : public SwiftABIInfo {
enum Class {
Integer,
Float
Expand Down Expand Up @@ -935,12 +976,22 @@ class X86_32ABIInfo : public ABIInfo {
X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
bool RetSmallStructInRegABI, bool Win32StructABI,
unsigned NumRegisterParameters, bool SoftFloatABI)
: ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
: SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
IsRetSmallStructInRegABI(RetSmallStructInRegABI),
IsWin32StructABI(Win32StructABI),
IsSoftFloatABI(SoftFloatABI),
IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
DefaultNumRegisterParameters(NumRegisterParameters) {}

bool shouldPassIndirectlyForSwift(CharUnits totalSize,
ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
// LLVM's x86-32 lowering currently only assigns up to three
// integer registers and three fp registers. Oddly, it'll use up to
// four vector registers for vectors, but those can overlap with the
// scalar registers.
return occupiesMoreThan(CGT, scalars, /*total*/ 3);
}
};

class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
Expand Down Expand Up @@ -1758,7 +1809,7 @@ static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
}

/// X86_64ABIInfo - The X86_64 ABI information.
class X86_64ABIInfo : public ABIInfo {
class X86_64ABIInfo : public SwiftABIInfo {
enum Class {
Integer = 0,
SSE,
Expand Down Expand Up @@ -1880,7 +1931,7 @@ class X86_64ABIInfo : public ABIInfo {

public:
X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
ABIInfo(CGT), AVXLevel(AVXLevel),
SwiftABIInfo(CGT), AVXLevel(AVXLevel),
Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
}

Expand All @@ -1907,6 +1958,12 @@ class X86_64ABIInfo : public ABIInfo {
bool has64BitPointers() const {
return Has64BitPointers;
}

bool shouldPassIndirectlyForSwift(CharUnits totalSize,
ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
};

/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
Expand Down Expand Up @@ -4338,7 +4395,7 @@ PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,

namespace {

class AArch64ABIInfo : public ABIInfo {
class AArch64ABIInfo : public SwiftABIInfo {
public:
enum ABIKind {
AAPCS = 0,
Expand All @@ -4349,7 +4406,8 @@ class AArch64ABIInfo : public ABIInfo {
ABIKind Kind;

public:
AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
: SwiftABIInfo(CGT), Kind(Kind) {}

private:
ABIKind getABIKind() const { return Kind; }
Expand Down Expand Up @@ -4382,6 +4440,12 @@ class AArch64ABIInfo : public ABIInfo {
return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
}

bool shouldPassIndirectlyForSwift(CharUnits totalSize,
ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
};

class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
Expand Down Expand Up @@ -4856,7 +4920,7 @@ Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,

namespace {

class ARMABIInfo : public ABIInfo {
class ARMABIInfo : public SwiftABIInfo {
public:
enum ABIKind {
APCS = 0,
Expand All @@ -4869,7 +4933,8 @@ class ARMABIInfo : public ABIInfo {
ABIKind Kind;

public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
: SwiftABIInfo(CGT), Kind(_Kind) {
setCCs();
}

Expand Down Expand Up @@ -4915,6 +4980,12 @@ class ARMABIInfo : public ABIInfo {
llvm::CallingConv::ID getLLVMDefaultCC() const;
llvm::CallingConv::ID getABIDefaultCC() const;
void setCCs();

bool shouldPassIndirectlyForSwift(CharUnits totalSize,
ArrayRef<llvm::Type*> scalars,
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
};

class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
Expand Down
5 changes: 3 additions & 2 deletions clang/lib/CodeGen/TargetInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,14 @@ class Value;
}

namespace clang {
class ABIInfo;
class Decl;

namespace CodeGen {
class ABIInfo;
class CallArgList;
class CodeGenModule;
class CodeGenFunction;
class CGFunctionInfo;
}

/// TargetCodeGenInfo - This class organizes various target-specific
/// codegeneration issues, like target-specific attributes, builtins and so
Expand Down Expand Up @@ -219,6 +218,8 @@ class TargetCodeGenInfo {
llvm::StringRef Value,
llvm::SmallString<32> &Opt) const {}
};

} // namespace CodeGen
} // namespace clang

#endif // LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
496 changes: 496 additions & 0 deletions clang/test/CodeGen/arm-swiftcall.c

Large diffs are not rendered by default.

115 changes: 115 additions & 0 deletions clang/test/CodeGenCXX/arm-swiftcall.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
// RUN: %clang_cc1 -triple armv7-apple-darwin9 -emit-llvm -o - %s -Wno-return-type-c-linkage | FileCheck %s

// This isn't really testing anything ARM-specific; it's just a convenient
// 32-bit platform.

#define SWIFTCALL __attribute__((swiftcall))
#define OUT __attribute__((swift_indirect_result))
#define ERROR __attribute__((swift_error_result))
#define CONTEXT __attribute__((swift_context))

/*****************************************************************************/
/********************************** LOWERING *********************************/
/*****************************************************************************/

#define TEST(TYPE) \
extern "C" SWIFTCALL TYPE return_##TYPE(void) { \
TYPE result = {}; \
return result; \
} \
extern "C" SWIFTCALL void take_##TYPE(TYPE v) { \
} \
extern "C" void test_##TYPE() { \
take_##TYPE(return_##TYPE()); \
}

/*****************************************************************************/
/*********************************** STRUCTS *********************************/
/*****************************************************************************/

typedef struct {
} struct_empty;
TEST(struct_empty);
// CHECK-LABEL: define {{.*}} @return_struct_empty()
// CHECK: ret void
// CHECK-LABEL: define {{.*}} @take_struct_empty()
// CHECK: ret void

// This is only properly testable in C++ because it relies on empty structs
// actually taking up space in a structure without requiring any extra data
// to be passed.
typedef struct {
int x;
struct_empty padding[2];
char c1;
float f0;
float f1;
} struct_1;
TEST(struct_1);
// CHECK-LABEL: define {{.*}} @return_struct_1()
// CHECK: [[RET:%.*]] = alloca [[REC:%.*]], align 4
// CHECK: @llvm.memset
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ i32, \[2 x i8\], i8, \[1 x i8\], float, float }]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[SECOND:%.*]] = load i8, i8* [[T0]], align 2
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 5
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = insertvalue [[UAGG:{ i32, i8, float, float }]] undef, i32 [[FIRST]], 0
// CHECK: [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i8 [[SECOND]], 1
// CHECK: [[T2:%.*]] = insertvalue [[UAGG]] [[T1]], float [[THIRD]], 2
// CHECK: [[T3:%.*]] = insertvalue [[UAGG]] [[T2]], float [[FOURTH]], 3
// CHECK: ret [[UAGG]] [[T3]]
// CHECK-LABEL: define {{.*}} @take_struct_1(i32, i8, float, float)
// CHECK: [[V:%.*]] = alloca [[REC]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: store i32 %0, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: store i8 %1, i8* [[T0]], align 2
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: store float %2, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 5
// CHECK: store float %3, float* [[T0]], align 4
// CHECK: ret void
// CHECK-LABEL: define void @test_struct_1()
// CHECK: [[TMP:%.*]] = alloca [[REC]], align 4
// CHECK: [[CALL:%.*]] = call [[SWIFTCC:cc16]] [[UAGG]] @return_struct_1()
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
// CHECK: store i32 [[T1]], i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
// CHECK: store i8 [[T1]], i8* [[T0]], align 2
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 2
// CHECK: store float [[T1]], float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 5
// CHECK: [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 3
// CHECK: store float [[T1]], float* [[T0]], align 4
// CHECK: [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
// CHECK: [[FIRST:%.*]] = load i32, i32* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 2
// CHECK: [[SECOND:%.*]] = load i8, i8* [[T0]], align 2
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 4
// CHECK: [[THIRD:%.*]] = load float, float* [[T0]], align 4
// CHECK: [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 5
// CHECK: [[FOURTH:%.*]] = load float, float* [[T0]], align 4
// CHECK: call [[SWIFTCC]] void @take_struct_1(i32 [[FIRST]], i8 [[SECOND]], float [[THIRD]], float [[FOURTH]])
// CHECK: ret void

struct struct_indirect_1 {
int x;
~struct_indirect_1();
};
TEST(struct_indirect_1)

// CHECK-LABEL: define {{.*}} void @return_struct_indirect_1({{.*}} noalias sret

// Should not be byval.
// CHECK-LABEL: define {{.*}} void @take_struct_indirect_1({{.*}}*{{( %.*)?}})