From ee36691bad001fed52f79e040acab98e34ee7372 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 14 Sep 2023 20:12:53 +0300 Subject: [PATCH] [CIR][CIRGen] CIR generation for bitfields. Fixes #13 (#233) This PR introduces bitfelds support. This now works: ``` #include typedef struct { int a1 : 4; int a2 : 28; int a3 : 16; int a4 : 3; int a5 : 17; int a6 : 25; } A; void init(A* a) { a->a1 = 1; a->a2 = 321; a->a3 = 15; a->a4 = -2; a->a5 = -123; a->a6 = 1234; } void print(A* a) { printf("%d %d %d %d %d %d\n", a->a1, a->a2, a->a3, a->a4, a->a5, a->a6 ); } int main() { A a; init(&a); print(&a); return 0; } ``` the output is: `1 321 15 -2 -123 1234` --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 65 ++++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 229 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 138 +++++------ clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 10 + clang/lib/CIR/CodeGen/CIRGenValue.h | 34 +++ .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 8 +- .../CodeGen/UnimplementedFeatureGuarding.h | 1 + clang/test/CIR/CodeGen/bitfields.c | 83 +++++++ clang/test/CIR/CodeGen/bitfields.cpp | 65 +++++ 10 files changed, 563 insertions(+), 85 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitfields.c diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index b5ab97f19599..5c143ae1b5f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -460,6 +460,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getConstInt( loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, + const llvm::APInt &val) { + return create(loc, typ, + getAttr(typ, val)); + } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); @@ -677,6 +682,65 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::UnaryOpKind::Not, value); } + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + mlir::Value rhs) { + return create(lhs.getLoc(), lhs.getType(), kind, lhs, + rhs); + } + + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, + bool isShiftLeft) { + return create( + lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + } + + mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { + auto width = lhs.getType().dyn_cast().getWidth(); + auto shift = llvm::APInt(width, bits); + return createShift(lhs, shift, isShiftLeft); + } + + mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, true); + } + + mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, false); + } + + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, + unsigned bits) { + auto val = llvm::APInt::getLowBitsSet(size, bits); + auto typ = mlir::cir::IntType::get(getContext(), size, false); + return getConstAPInt(loc, typ, val); + } + + mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::And, val); + } + + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); + } + + mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Or, val); + } + + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// @@ -727,6 +791,5 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return createCast(mlir::cir::CastKind::bitcast, src, newTy); } }; - } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index d82b610520aa..0077981ddf78 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -21,6 +21,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E, if (PtrTy->getPointeeType()->isVoidType()) break; assert(!UnimplementedFeature::tbaa()); + LValueBaseInfo InnerBaseInfo; Address Addr = CGF.buildPointerWithAlignment( CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); @@ -211,13 +213,78 @@ static Address buildPointerWithAlignment(const Expr *E, return Address(CGF.buildScalarExpr(E), Align); } +/// Helper method to check if the underlying ABI is AAPCS +static bool isAAPCS(const TargetInfo &TargetInfo) { + return TargetInfo.getABI().starts_with("aapcs"); +} + +Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, + unsigned index) { + if (index == 0) + return base.getAddress(); + + auto loc = getLoc(field->getLocation()); + auto fieldType = convertType(field->getType()); + auto fieldPtr = + mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + auto sea = getBuilder().createGetMember( + loc, fieldPtr, base.getPointer(), field->getName(), index); + + return Address(sea, CharUnits::One()); +} + +static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, + const CIRGenBitFieldInfo &info, + const FieldDecl *field) { + return isAAPCS(cgm.getTarget()) && cgm.getCodeGenOpts().AAPCSBitfieldWidth && + info.VolatileStorageSize != 0 && + field->getType() + .withCVRQualifiers(base.getVRQualifiers()) + .isVolatileQualified(); +} + +LValue CIRGenFunction::buildLValueForBitField(LValue base, + const FieldDecl *field) { + + LValueBaseInfo BaseInfo = base.getBaseInfo(); + const RecordDecl *rec = field->getParent(); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + auto &info = layout.getBitFieldInfo(field); + auto useVolatile = useVolatileForBitField(CGM, base, info, field); + unsigned Idx = layout.getCIRFieldNo(field); + + if (useVolatile || + (IsInPreservedAIRegion || + (getDebugInfo() && rec->hasAttr()))) { + llvm_unreachable("NYI"); + } + + Address Addr = getAddrOfField(base, field, Idx); + + const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; + + // Get the access type. + mlir::Type FieldIntTy = builder.getUIntNTy(SS); + + auto loc = getLoc(field->getLocation()); + if (Addr.getElementType() != FieldIntTy) + Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); + + QualType fieldType = + field->getType().withCVRQualifiers(base.getVRQualifiers()); + + assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); + LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); + return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { + LValueBaseInfo BaseInfo = base.getBaseInfo(); - if (field->isBitField()) { - llvm_unreachable("NYI"); - } + if (field->isBitField()) + return buildLValueForBitField(base, field); // Fields of may-alias structures are may-alais themselves. // FIXME: this hould get propagated down through anonymous structs and unions. @@ -520,12 +587,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { - assert(LV.isSimple() && "not implemented"); assert(!LV.getType()->isFunctionType()); assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); - // Everything needs a load. - return RValue::get(buildLoadOfScalar(LV, Loc)); + if (LV.isBitField()) + return buildLoadOfBitfieldLValue(LV, Loc); + + if (LV.isSimple()) + return RValue::get(buildLoadOfScalar(LV, Loc)); + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, + SourceLocation Loc) { + const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo(); + + // Get the output type. + mlir::Type ResLTy = convertType(LV.getType()); + Address Ptr = LV.getBitFieldAddress(); + mlir::Value Val = builder.createLoad(getLoc(Loc), Ptr); + auto ValWidth = Val.getType().cast().getWidth(); + + bool UseVolatile = LV.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + + if (Info.IsSigned) { + assert(static_cast(Offset + Info.Size) <= StorageSize); + + mlir::Type typ = builder.getSIntNTy(ValWidth); + Val = builder.createIntCast(Val, typ); + + unsigned HighBits = StorageSize - Offset - Info.Size; + if (HighBits) + Val = builder.createShiftLeft(Val, HighBits); + if (Offset + HighBits) + Val = builder.createShiftRight(Val, Offset + HighBits); + } else { + if (Offset) + Val = builder.createShiftRight(Val, Offset); + + if (static_cast(Offset) + Info.Size < StorageSize) + Val = builder.createAnd(Val, + llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); + } + Val = builder.createIntCast(Val, ResLTy); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); + return RValue::get(Val); } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { @@ -548,6 +658,81 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { buildStoreOfScalar(Src.getScalarVal(), Dst); } +void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result) { + const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo(); + mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType()); + Address Ptr = Dst.getBitFieldAddress(); + + // Get the source value, truncated to the width of the bit-field. + mlir::Value SrcVal = Src.getScalarVal(); + + // Cast the source to the storage type and shift it into place. + SrcVal = builder.createIntCast(SrcVal, Ptr.getElementType()); + auto SrcWidth = SrcVal.getType().cast().getWidth(); + mlir::Value MaskedVal = SrcVal; + + const bool UseVolatile = + CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + // See if there are other bits in the bitfield's storage we'll need to load + // and mask together with source before storing. + if (StorageSize != Info.Size) { + assert(StorageSize > Info.Size && "Invalid bitfield size."); + + mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc()); + + // Mask the source value as needed. + if (!hasBooleanRepresentation(Dst.getType())) + SrcVal = builder.createAnd( + SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); + + MaskedVal = SrcVal; + if (Offset) + SrcVal = builder.createShiftLeft(SrcVal, Offset); + + // Mask out the original value. + Val = builder.createAnd( + Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); + + // Or together the unchanged values and the source value. + SrcVal = builder.createOr(Val, SrcVal); + + } else { + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once + // and written exactly once using the access width appropriate to the type + // of the container. The two accesses are not atomic. + llvm_unreachable("volatile bit-field is not implemented for the AACPS"); + } + + // Write the new value back out. + // TODO: constant matrix type, volatile, no init, non temporal, TBAA + buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(), + Dst.getBaseInfo(), false, false); + + // Return the new value of the bit-field. + mlir::Value ResultVal = MaskedVal; + ResultVal = builder.createIntCast(ResultVal, ResLTy); + + // Sign extend the value if needed. + if (Info.IsSigned) { + assert(Info.Size <= StorageSize); + unsigned HighBits = StorageSize - Info.Size; + + if (HighBits) { + ResultVal = builder.createShiftLeft(ResultVal, HighBits); + ResultVal = builder.createShiftRight(ResultVal, HighBits); + } + } + + Result = buildFromMemory(ResultVal, Dst.getType()); +} + static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, const VarDecl *VD) { QualType T = E->getType(); @@ -771,7 +956,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buildStoreThroughLValue(RV, LV); + if (LV.isBitField()) { + mlir::Value result; + buildStoreThroughBitfieldLValue(RV, LV, result); + } else { + buildStoreThroughLValue(RV, LV); + } + assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; @@ -2207,6 +2398,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, SourceLocation Loc) { + return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(), + lvalue.isNontemporal()); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, + mlir::Location Loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), lvalue.getType(), Loc, lvalue.getBaseInfo(), lvalue.isNontemporal()); @@ -2224,6 +2422,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal) { + return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo, + isNontemporal); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, + QualType Ty, mlir::Location Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal) { if (!CGM.getCodeGenOpts().PreserveVec3Type) { if (Ty->isVectorType()) { llvm_unreachable("NYI"); @@ -2237,15 +2443,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, } mlir::cir::LoadOp Load = builder.create( - getLoc(Loc), Addr.getElementType(), Addr.getPointer()); + Loc, Addr.getElementType(), Addr.getPointer()); if (isNontemporal) { llvm_unreachable("NYI"); } - - // TODO: TBAA - - // TODO: buildScalarRangeCheck + + assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); return buildFromMemory(Load, Ty); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 5a48e44f61eb..f4a76958bcf2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1060,9 +1060,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, std::swap(pointerOperand, indexOperand); } - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); - - auto &DL = CGF.CGM.getDataLayout(); + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); // Some versions of glibc and gcc use idioms (particularly in their malloc // routines) that add a pointer-sized integer (known to be a pointer value) @@ -1863,7 +1861,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - llvm_unreachable("NYI"); + CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); } else { CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, @@ -1964,25 +1962,27 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( auto condV = CGF.evaluateExprAsBool(condExpr); assert(!UnimplementedFeature::incrementProfileCounter()); - return builder.create( - loc, condV, /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto lhs = Visit(lhsExpr); - if (!lhs) { - lhs = builder.getNullValue(CGF.VoidTy, loc); - lhsIsVoid = true; - } - builder.create(loc, lhs); - }, - /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto rhs = Visit(rhsExpr); - if (lhsIsVoid) { - assert(!rhs && "lhs and rhs types must match"); - rhs = builder.getNullValue(CGF.VoidTy, loc); - } - builder.create(loc, rhs); - }).getResult(); + return builder + .create( + loc, condV, /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = Visit(lhsExpr); + if (!lhs) { + lhs = builder.getNullValue(CGF.VoidTy, loc); + lhsIsVoid = true; + } + builder.create(loc, lhs); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = Visit(rhsExpr); + if (lhsIsVoid) { + assert(!rhs && "lhs and rhs types must match"); + rhs = builder.getNullValue(CGF.VoidTy, loc); + } + builder.create(loc, rhs); + }) + .getResult(); } mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); @@ -2012,51 +2012,53 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( } }; - return builder.create( - loc, condV, /*trueBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto lhs = Visit(lhsExpr); - eval.end(CGF); - - if (lhs) { - yieldTy = lhs.getType(); - b.create(loc, lhs); - return; - } - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - }, - /*falseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto rhs = Visit(rhsExpr); - eval.end(CGF); - - if (rhs) { - yieldTy = rhs.getType(); - b.create(loc, rhs); - } else { - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - } - - patchVoidOrThrowSites(); - }).getResult(); + return builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto lhs = Visit(lhsExpr); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto rhs = Visit(rhsExpr); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to patch + // arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); } mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 819a99f81ec7..ea5bf59d92c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -869,6 +869,12 @@ class CIRGenFunction : public CIRGenTypeCache { clang::SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + mlir::Location Loc, LValueBaseInfo BaseInfo, + bool isNontemporal = false); + + RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); + /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, @@ -883,6 +889,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// form the memory representation to the CIR value representation. The /// l-value must be a simple l-value. mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, LValueBaseInfo *PointeeBaseInfo = nullptr); @@ -1237,6 +1244,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// is 'Ty'. void buildStoreThroughLValue(RValue Src, LValue Dst); + void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result); + mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -1514,7 +1524,8 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - + LValue buildLValueForBitField(LValue base, const FieldDecl *field); + /// Like buildLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. @@ -1543,6 +1554,8 @@ class CIRGenFunction : public CIRGenTypeCache { return it->second; } + Address getAddrOfField(LValue base, const clang::FieldDecl *field, unsigned index); + /// Given an opaque value expression, return its LValue mapping if it exists, /// otherwise create one. LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index b1ded0017d59..0a686181db61 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -187,6 +187,16 @@ class CIRGenRecordLayout { /// Check whether this struct can be C++ zero-initialized with a /// zeroinitializer. bool isZeroInitializable() const { return IsZeroInitializable; } + + /// Return the BitFieldInfo that corresponds to the field FD. + const CIRGenBitFieldInfo &getBitFieldInfo(const clang::FieldDecl *FD) const { + FD = FD->getCanonicalDecl(); + assert(FD->isBitField() && "Invalid call for non-bit-field decl!"); + llvm::DenseMap::const_iterator + it = BitFields.find(FD); + assert(it != BitFields.end() && "Unable to find bitfield info"); + return it->second; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index f84c20c4b136..c6edeb4d4fe4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #include "Address.h" +#include "CIRGenRecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" @@ -207,6 +208,7 @@ class LValue { mlir::Value V; mlir::Type ElementType; LValueBaseInfo BaseInfo; + const CIRGenBitFieldInfo *BitFieldInfo{0}; public: bool isSimple() const { return LVType == Simple; } @@ -298,6 +300,38 @@ class LValue { const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } + + // bitfield lvalue + Address getBitFieldAddress() const { + return Address(getBitFieldPointer(), ElementType, getAlignment()); + } + + mlir::Value getBitFieldPointer() const { + assert(isBitField()); + return V; + } + + const CIRGenBitFieldInfo &getBitFieldInfo() const { + assert(isBitField()); + return *BitFieldInfo; + } + + /// Create a new object to represent a bit-field access. + /// + /// \param Addr - The base address of the bit-field sequence this + /// bit-field refers to. + /// \param Info - The information describing how to perform the bit-field + /// access. + static LValue MakeBitfield(Address Addr, const CIRGenBitFieldInfo &Info, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = BitField; + R.V = Addr.getPointer(); + R.ElementType = Addr.getElementType(); + R.BitFieldInfo = &Info; + R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo); + return R; + } }; /// An aggregate value slot. diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 52fc6069f7b8..44e899871418 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -136,7 +136,7 @@ struct CIRRecordLowering final { /// Wraps mlir::cir::IntType with some implicit arguments. mlir::Type getUIntNType(uint64_t NumBits) { - unsigned AlignedBits = llvm::alignTo(NumBits, astContext.getCharWidth()); + unsigned AlignedBits = llvm::PowerOf2Ceil(NumBits); return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, /*isSigned=*/false); } @@ -214,8 +214,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, dataLayout{cirGenTypes.getModule().getModule()}, - IsZeroInitializable(true), IsZeroInitializableAsBase(true), - isPacked{isPacked} {} + IsZeroInitializable(true), + IsZeroInitializableAsBase(true), isPacked{isPacked} {} void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, @@ -499,6 +499,8 @@ void CIRRecordLowering::accumulateBitFields( // with lower cost. auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, uint64_t StartBitOffset) { + if (OffsetInRecord >= 64) // See IntType::verify + return true; if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) return false; llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d39bb3c1b48d..5a857a2db39f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -138,6 +138,7 @@ struct UnimplementedFeature { static bool exceptions() { return false; } static bool metaDataNode() { return false; } static bool isSEHTryScope() { return false; } + static bool emitScalarRangeCheck() { return false; } }; } // namespace cir diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c new file mode 100644 index 000000000000..3be014e50ede --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -0,0 +1,83 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * + +struct __long { + struct __attribute__((__packed__)) { + unsigned __is_long_ : 1; + unsigned __cap_ : sizeof(unsigned) * 8 - 1; + }; + unsigned __size_; + unsigned *__data_; +}; + +void m() { + struct __long l; +} + +// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> + +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; +} S; // 65 bits in total, i.e. more than 64 + +// CHECK: cir.func {{.*@store_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func {{.*@store_neg_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func {{.*@load_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i +// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +int load_field(S* s) { + return s->d; +} diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 48eec3bd093b..27e24f30d582 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir // RUN: FileCheck --input-file=%t.cir %s +// XFAIL: * struct __long { struct __attribute__((__packed__)) { @@ -16,3 +17,67 @@ void m() { // CHECK: !ty_22anon22 = !cir.struct // CHECK: !ty_22__long22 = !cir.struct}> + +struct S { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; +}; // 65 bits in total, i.e. more than 64 + +// CHECK: cir.func @_Z11store_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func @_Z15store_neg_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func @_Z10load_field +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i +// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i +int load_field(S& s) { + return s.d; +}