Skip to content

Commit

Permalink
[CodeGen] Emit elementtype attributes for indirect inline asm constra…
Browse files Browse the repository at this point in the history
…ints

This implements the clang side of D116531. The elementtype
attribute is added for all indirect constraints (*) and tests are
updated accordingly.

Differential Revision: https://reviews.llvm.org/D116666
  • Loading branch information
nikic committed Jan 6, 2022
1 parent 1919720 commit e8b98a5
Show file tree
Hide file tree
Showing 28 changed files with 250 additions and 226 deletions.
5 changes: 4 additions & 1 deletion clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,10 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,

llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
return CGF.Builder.CreateCall(IA, {Addr});
llvm::CallInst *CI = CGF.Builder.CreateCall(IA, {Addr});
CI->addParamAttr(
0, Attribute::get(CGF.getLLVMContext(), Attribute::ElementType, RetType));
return CI;
}

namespace {
Expand Down
10 changes: 9 additions & 1 deletion clang/lib/CodeGen/CGObjCMac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4370,14 +4370,22 @@ FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
void FragileHazards::emitWriteHazard() {
if (Locals.empty()) return;

CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
llvm::CallInst *Call = CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
for (auto Pair : llvm::enumerate(Locals))
Call->addParamAttr(Pair.index(), llvm::Attribute::get(
CGF.getLLVMContext(), llvm::Attribute::ElementType,
cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}

void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
assert(!Locals.empty());
llvm::CallInst *call = Builder.CreateCall(ReadHazard, Locals);
call->setDoesNotThrow();
call->setCallingConv(CGF.getRuntimeCC());
for (auto Pair : llvm::enumerate(Locals))
call->addParamAttr(Pair.index(), llvm::Attribute::get(
Builder.getContext(), llvm::Attribute::ElementType,
cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}

/// Emit read hazards in all the protected blocks, i.e. all the blocks
Expand Down
116 changes: 64 additions & 52 deletions clang/lib/CodeGen/CGStmt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2109,42 +2109,35 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
}

llvm::Value*
CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
std::string &ConstraintStr,
SourceLocation Loc) {
llvm::Value *Arg;
std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
const TargetInfo::ConstraintInfo &Info, LValue InputValue,
QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
if (Info.allowsRegister() || !Info.allowsMemory()) {
if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
} else {
llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);

Arg = Builder.CreateLoad(
Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
} else {
Arg = InputValue.getPointer(*this);
ConstraintStr += '*';
}
if (CodeGenFunction::hasScalarEvaluationKind(InputType))
return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};

llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
Ty = llvm::PointerType::getUnqual(Ty);

return {Builder.CreateLoad(
Builder.CreateBitCast(InputValue.getAddress(*this), Ty)),
nullptr};
}
} else {
Arg = InputValue.getPointer(*this);
ConstraintStr += '*';
}

return Arg;
Address Addr = InputValue.getAddress(*this);
ConstraintStr += '*';
return {Addr.getPointer(), Addr.getElementType()};
}

llvm::Value* CodeGenFunction::EmitAsmInput(
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr,
std::string &ConstraintStr) {
std::pair<llvm::Value *, llvm::Type *>
CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr,
std::string &ConstraintStr) {
// If this can't be a register or memory, i.e., has to be a constant
// (immediate or symbolic), try to emit it as such.
if (!Info.allowsRegister() && !Info.allowsMemory()) {
Expand All @@ -2155,19 +2148,20 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
llvm::APSInt IntResult;
if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
getContext()))
return llvm::ConstantInt::get(getLLVMContext(), IntResult);
return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
}

Expr::EvalResult Result;
if (InputExpr->EvaluateAsInt(Result, getContext()))
return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
nullptr};
}

if (Info.allowsRegister() || !Info.allowsMemory())
if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
return EmitScalarExpr(InputExpr);
return {EmitScalarExpr(InputExpr), nullptr};
if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
return EmitScalarExpr(InputExpr);
return {EmitScalarExpr(InputExpr), nullptr};
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
Expand Down Expand Up @@ -2209,6 +2203,7 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
bool HasUnwindClobber, bool ReadOnly,
bool ReadNone, bool NoMerge, const AsmStmt &S,
const std::vector<llvm::Type *> &ResultRegTypes,
const std::vector<llvm::Type *> &ArgElemTypes,
CodeGenFunction &CGF,
std::vector<llvm::Value *> &RegResults) {
if (!HasUnwindClobber)
Expand All @@ -2224,6 +2219,15 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
Result.addFnAttr(llvm::Attribute::ReadOnly);
}

// Add elementtype attribute for indirect constraints.
for (auto Pair : llvm::enumerate(ArgElemTypes)) {
if (Pair.value()) {
auto Attr = llvm::Attribute::get(
CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
Result.addParamAttr(Pair.index(), Attr);
}
}

// Slap the source location of the inline asm into a !srcloc metadata on the
// call.
if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
Expand Down Expand Up @@ -2291,13 +2295,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<llvm::Type *> ResultRegTypes;
std::vector<llvm::Type *> ResultTruncRegTypes;
std::vector<llvm::Type *> ArgTypes;
std::vector<llvm::Type *> ArgElemTypes;
std::vector<llvm::Value*> Args;
llvm::BitVector ResultTypeRequiresCast;

// Keep track of inout constraints.
std::string InOutConstraints;
std::vector<llvm::Value*> InOutArgs;
std::vector<llvm::Type*> InOutArgTypes;
std::vector<llvm::Type*> InOutArgElemTypes;

// Keep track of out constraints for tied input operand.
std::vector<std::string> OutputConstraints;
Expand Down Expand Up @@ -2399,21 +2405,19 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
} else {
llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
llvm::Value *DestPtr = Dest.getPointer(*this);
Address DestAddr = Dest.getAddress(*this);
// Matrix types in memory are represented by arrays, but accessed through
// vector pointers, with the alignment specified on the access operation.
// For inline assembly, update pointer arguments to use vector pointers.
// Otherwise there will be a mis-match if the matrix is also an
// input-argument which is represented as vector.
if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
DestAddrTy = llvm::PointerType::get(
ConvertType(OutExpr->getType()),
cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
}
ArgTypes.push_back(DestAddrTy);
Args.push_back(DestPtr);
if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
DestAddr = Builder.CreateElementBitCast(
DestAddr, ConvertType(OutExpr->getType()));

ArgTypes.push_back(DestAddr.getType());
ArgElemTypes.push_back(DestAddr.getElementType());
Args.push_back(DestAddr.getPointer());
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
Expand All @@ -2423,9 +2427,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += ',';

const Expr *InputExpr = S.getOutputExpr(i);
llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
InOutConstraints,
InputExpr->getExprLoc());
llvm::Value *Arg;
llvm::Type *ArgElemType;
std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
Info, Dest, InputExpr->getType(), InOutConstraints,
InputExpr->getExprLoc());

if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
Expand All @@ -2444,6 +2450,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += OutputConstraint;

InOutArgTypes.push_back(Arg->getType());
InOutArgElemTypes.push_back(ArgElemType);
InOutArgs.push_back(Arg);
}
}
Expand Down Expand Up @@ -2483,7 +2490,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
getTarget(), CGM, S, false /* No EarlyClobber */);

std::string ReplaceConstraint (InputConstraint);
llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
llvm::Value *Arg;
llvm::Type *ArgElemType;
std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);

// If this input argument is tied to a larger output result, extend the
// input to be the same size as the output. The LLVM backend wants to see
Expand Down Expand Up @@ -2528,6 +2537,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
VT->getPrimitiveSizeInBits().getKnownMinSize());

ArgTypes.push_back(Arg->getType());
ArgElemTypes.push_back(ArgElemType);
Args.push_back(Arg);
Constraints += InputConstraint;
}
Expand All @@ -2546,6 +2556,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::BlockAddress::get(CurFn, Dest.getBlock());
Args.push_back(BA);
ArgTypes.push_back(BA->getType());
ArgElemTypes.push_back(nullptr);
if (!Constraints.empty())
Constraints += ',';
Constraints += 'X';
Expand All @@ -2557,6 +2568,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Append the "input" part of inout constraints last.
for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
ArgTypes.push_back(InOutArgTypes[i]);
ArgElemTypes.push_back(InOutArgElemTypes[i]);
Args.push_back(InOutArgs[i]);
}
Constraints += InOutConstraints;
Expand Down Expand Up @@ -2647,18 +2659,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitBlock(Fallthrough);
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
ResultRegTypes, *this, RegResults);
ResultRegTypes, ArgElemTypes, *this, RegResults);
} else if (HasUnwindClobber) {
llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
InNoMergeAttributedStmt, S, ResultRegTypes, *this,
RegResults);
InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
*this, RegResults);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
ResultRegTypes, *this, RegResults);
ResultRegTypes, ArgElemTypes, *this, RegResults);
}

assert(RegResults.size() == ResultRegTypes.size());
Expand Down
15 changes: 8 additions & 7 deletions clang/lib/CodeGen/CodeGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -4677,13 +4677,14 @@ class CodeGenFunction : public CodeGenTypeCache {
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);

llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);

llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
std::string &ConstraintStr,
SourceLocation Loc);
std::pair<llvm::Value *, llvm::Type *>
EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
std::string &ConstraintStr);

std::pair<llvm::Value *, llvm::Type *>
EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
QualType InputType, std::string &ConstraintStr,
SourceLocation Loc);

/// Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
Expand Down
2 changes: 1 addition & 1 deletion clang/test/CodeGen/RISCV/riscv-inline-asm.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ void test_f() {

void test_A(int *p) {
// CHECK-LABEL: define{{.*}} void @test_A(i32* %p)
// CHECK: call void asm sideeffect "", "*A"(i32* %p)
// CHECK: call void asm sideeffect "", "*A"(i32* elementtype(i32) %p)
asm volatile("" :: "A"(*p));
}

Expand Down
22 changes: 11 additions & 11 deletions clang/test/CodeGen/SystemZ/systemz-inline-asm.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,77 +6,77 @@ unsigned long gl;
void test_store_m(unsigned int i) {
asm("st %1, %0" : "=m" (gi) : "r" (i));
// CHECK-LABEL: define{{.*}} void @test_store_m(i32 zeroext %i)
// CHECK: call void asm "st $1, $0", "=*m,r"(i32* nonnull @gi, i32 %i)
// CHECK: call void asm "st $1, $0", "=*m,r"(i32* nonnull elementtype(i32) @gi, i32 %i)
}

void test_store_Q(unsigned int i) {
asm("st %1, %0" : "=Q" (gi) : "r" (i));
// CHECK-LABEL: define{{.*}} void @test_store_Q(i32 zeroext %i)
// CHECK: call void asm "st $1, $0", "=*Q,r"(i32* nonnull @gi, i32 %i)
// CHECK: call void asm "st $1, $0", "=*Q,r"(i32* nonnull elementtype(i32) @gi, i32 %i)
}

void test_store_R(unsigned int i) {
asm("st %1, %0" : "=R" (gi) : "r" (i));
// CHECK-LABEL: define{{.*}} void @test_store_R(i32 zeroext %i)
// CHECK: call void asm "st $1, $0", "=*R,r"(i32* nonnull @gi, i32 %i)
// CHECK: call void asm "st $1, $0", "=*R,r"(i32* nonnull elementtype(i32) @gi, i32 %i)
}

void test_store_S(unsigned int i) {
asm("st %1, %0" : "=S" (gi) : "r" (i));
// CHECK-LABEL: define{{.*}} void @test_store_S(i32 zeroext %i)
// CHECK: call void asm "st $1, $0", "=*S,r"(i32* nonnull @gi, i32 %i)
// CHECK: call void asm "st $1, $0", "=*S,r"(i32* nonnull elementtype(i32) @gi, i32 %i)
}

void test_store_T(unsigned int i) {
asm("st %1, %0" : "=T" (gi) : "r" (i));
// CHECK-LABEL: define{{.*}} void @test_store_T(i32 zeroext %i)
// CHECK: call void asm "st $1, $0", "=*T,r"(i32* nonnull @gi, i32 %i)
// CHECK: call void asm "st $1, $0", "=*T,r"(i32* nonnull elementtype(i32) @gi, i32 %i)
}

int test_load_m() {
unsigned int i;
asm("l %0, %1" : "=r" (i) : "m" (gi));
return i;
// CHECK-LABEL: define{{.*}} signext i32 @test_load_m()
// CHECK: call i32 asm "l $0, $1", "=r,*m"(i32* nonnull @gi)
// CHECK: call i32 asm "l $0, $1", "=r,*m"(i32* nonnull elementtype(i32) @gi)
}

int test_load_Q() {
unsigned int i;
asm("l %0, %1" : "=r" (i) : "Q" (gi));
return i;
// CHECK-LABEL: define{{.*}} signext i32 @test_load_Q()
// CHECK: call i32 asm "l $0, $1", "=r,*Q"(i32* nonnull @gi)
// CHECK: call i32 asm "l $0, $1", "=r,*Q"(i32* nonnull elementtype(i32) @gi)
}

int test_load_R() {
unsigned int i;
asm("l %0, %1" : "=r" (i) : "R" (gi));
return i;
// CHECK-LABEL: define{{.*}} signext i32 @test_load_R()
// CHECK: call i32 asm "l $0, $1", "=r,*R"(i32* nonnull @gi)
// CHECK: call i32 asm "l $0, $1", "=r,*R"(i32* nonnull elementtype(i32) @gi)
}

int test_load_S() {
unsigned int i;
asm("l %0, %1" : "=r" (i) : "S" (gi));
return i;
// CHECK-LABEL: define{{.*}} signext i32 @test_load_S()
// CHECK: call i32 asm "l $0, $1", "=r,*S"(i32* nonnull @gi)
// CHECK: call i32 asm "l $0, $1", "=r,*S"(i32* nonnull elementtype(i32) @gi)
}

int test_load_T() {
unsigned int i;
asm("l %0, %1" : "=r" (i) : "T" (gi));
return i;
// CHECK-LABEL: define{{.*}} signext i32 @test_load_T()
// CHECK: call i32 asm "l $0, $1", "=r,*T"(i32* nonnull @gi)
// CHECK: call i32 asm "l $0, $1", "=r,*T"(i32* nonnull elementtype(i32) @gi)
}

void test_mI(unsigned char *c) {
asm volatile("cli %0, %1" :: "Q" (*c), "I" (100));
// CHECK-LABEL: define{{.*}} void @test_mI(i8* %c)
// CHECK: call void asm sideeffect "cli $0, $1", "*Q,I"(i8* %c, i32 100)
// CHECK: call void asm sideeffect "cli $0, $1", "*Q,I"(i8* elementtype(i8) %c, i32 100)
}

unsigned int test_dJa(unsigned int i, unsigned int j) {
Expand Down

0 comments on commit e8b98a5

Please sign in to comment.