Skip to content

Commit

Permalink
[clang] Move tailclipping to bitfield allocation (#87090)
Browse files Browse the repository at this point in the history
Move bitfield access clipping to bitfield access computation.
  • Loading branch information
urnathan committed Apr 15, 2024
1 parent 905d2ec commit 69d861e
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 40 deletions.
84 changes: 44 additions & 40 deletions clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,11 @@ namespace {
/// contains enough information to determine where the runs break. Microsoft
/// and Itanium follow different rules and use different codepaths.
/// * It is desired that, when possible, bitfields use the appropriate iN type
/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
/// i24. This isn't always possible because i24 has storage size of 32 bit
/// and if it is possible to use that extra byte of padding we must use
/// [i8 x 3] instead of i24. The function clipTailPadding does this.
/// and if it is possible to use that extra byte of padding we must use [i8 x
/// 3] instead of i24. This is computed when accumulating bitfields in
/// accumulateBitfields.
/// C++ examples that require clipping:
/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
/// struct A { int a : 24; ~A(); }; // a must be clipped because:
Expand All @@ -62,11 +63,7 @@ namespace {
/// that the tail padding is not used in the complete class.) However,
/// because LLVM reads from the complete type it can generate incorrect code
/// if we do not clip the tail padding off of the bitfield in the complete
/// layout. This introduces a somewhat awkward extra unnecessary clip stage.
/// The location of the clip is stored internally as a sentinel of type
/// SCISSOR. If LLVM were updated to read base types (which it probably
/// should because locations of things such as VBases are bogus in the llvm
/// type anyway) then we could eliminate the SCISSOR.
/// layout.
/// * Itanium allows nearly empty primary virtual bases. These bases don't get
/// get their own storage because they're laid out as part of another base
/// or at the beginning of the structure. Determining if a VBase actually
Expand Down Expand Up @@ -200,9 +197,7 @@ struct CGRecordLowering {
const CXXRecordDecl *Query) const;
void calculateZeroInit();
CharUnits calculateTailClippingOffset(bool isNonVirtualBaseType) const;
/// Lowers bitfield storage types to I8 arrays for bitfields with tail
/// padding that is or can potentially be used.
void clipTailPadding();
void checkBitfieldClipping() const;
/// Determines if we need a packed llvm struct.
void determinePacked(bool NVBaseType);
/// Inserts padding everywhere it's needed.
Expand Down Expand Up @@ -305,7 +300,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
}
llvm::stable_sort(Members);
Members.push_back(StorageInfo(Size, getIntNType(8)));
clipTailPadding();
checkBitfieldClipping();
determinePacked(NVBaseType);
insertPadding();
Members.pop_back();
Expand Down Expand Up @@ -531,6 +526,7 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
// available padding characters.
RecordDecl::field_iterator BestEnd = Begin;
CharUnits BestEndOffset;
bool BestClipped; // Whether the representation must be in a byte array.

for (;;) {
// AtAlignedBoundary is true iff Field is the (potential) start of a new
Expand Down Expand Up @@ -593,10 +589,9 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
// this is the best seen so far.
BestEnd = Field;
BestEndOffset = BeginOffset + AccessSize;
if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
// Fine-grained access, so no merging of spans.
InstallBest = true;
else if (!BitSizeSinceBegin)
// Assume clipped until proven not below.
BestClipped = true;
if (!BitSizeSinceBegin)
// A zero-sized initial span -- this will install nothing and reset
// for another.
InstallBest = true;
Expand Down Expand Up @@ -624,6 +619,12 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
// The access unit is not at a naturally aligned offset within the
// structure.
InstallBest = true;

if (InstallBest && BestEnd == Field)
// We're installing the first span, whose clipping was presumed
// above. Compute it correctly.
if (getSize(Type) == AccessSize)
BestClipped = false;
}

if (!InstallBest) {
Expand Down Expand Up @@ -656,11 +657,15 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
// access unit.
BestEndOffset = BeginOffset + TypeSize;
BestEnd = Field;
BestClipped = false;
}

if (Barrier)
// The next field is a barrier that we cannot merge across.
InstallBest = true;
else if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
// Fine-grained access, so no merging of spans.
InstallBest = true;
else
// Otherwise, we're not installing. Update the bit size
// of the current span to go all the way to LimitOffset, which is
Expand All @@ -679,7 +684,17 @@ CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
// Add the storage member for the access unit to the record. The
// bitfields get the offset of their storage but come afterward and
// remain there after a stable sort.
llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
llvm::Type *Type;
if (BestClipped) {
assert(getSize(getIntNType(Context.toBits(AccessSize))) >
AccessSize &&
"Clipped access need not be clipped");
Type = getByteArrayType(AccessSize);
} else {
Type = getIntNType(Context.toBits(AccessSize));
assert(getSize(Type) == AccessSize &&
"Unclipped access must be clipped");
}
Members.push_back(StorageInfo(BeginOffset, Type));
for (; Begin != BestEnd; ++Begin)
if (!Begin->isZeroLengthBitField(Context))
Expand Down Expand Up @@ -934,32 +949,21 @@ void CGRecordLowering::calculateZeroInit() {
}
}

void CGRecordLowering::clipTailPadding() {
std::vector<MemberInfo>::iterator Prior = Members.begin();
CharUnits Tail = getSize(Prior->Data);
for (std::vector<MemberInfo>::iterator Member = Prior + 1,
MemberEnd = Members.end();
Member != MemberEnd; ++Member) {
// Verify accumulateBitfields computed the correct storage representations.
void CGRecordLowering::checkBitfieldClipping() const {
#ifndef NDEBUG
auto Tail = CharUnits::Zero();
for (const auto &M : Members) {
// Only members with data and the scissor can cut into tail padding.
if (!Member->Data && Member->Kind != MemberInfo::Scissor)
if (!M.Data && M.Kind != MemberInfo::Scissor)
continue;
if (Member->Offset < Tail) {
assert(Prior->Kind == MemberInfo::Field &&
"Only storage fields have tail padding!");
if (!Prior->FD || Prior->FD->isBitField())
Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
else {
assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
"should not have reused this field's tail padding");
Prior->Data = getByteArrayType(
Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
}
}
if (Member->Data)
Prior = Member;
Tail = Prior->Offset + getSize(Prior->Data);

assert(M.Offset >= Tail && "Bitfield access unit is not clipped");
Tail = M.Offset;
if (M.Data)
Tail += getSize(M.Data);
}
#endif
}

void CGRecordLowering::determinePacked(bool NVBaseType) {
Expand Down
18 changes: 18 additions & 0 deletions clang/test/CodeGen/bitfield-access-unit.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,24 @@ struct G {
// LAYOUT-DWN32-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:7 IsSigned:1 StorageSize:8 StorageOffset:4
// CHECK-NEXT: ]>

struct __attribute__((aligned(8))) H {
char a;
unsigned b : 24; // on expensive alignment we want this to stay 24
unsigned c __attribute__((aligned(8))); // Think 'long long' or lp64 ptr
} h;
// CHECK-LABEL: LLVMType:%struct.H =
// LAYOUT-FLEX-SAME: type <{ i8, i32, [3 x i8], i32, [4 x i8] }>
// LAYOUT-STRICT-SAME: type { i8, [3 x i8], [4 x i8], i32, [4 x i8] }
// LAYOUT-DWN32-FLEX-SAME: type <{ i8, i32, [3 x i8], i32, [4 x i8] }>
// LAYOUT-DWN32-STRICT-SAME: type { i8, [3 x i8], [4 x i8], i32, [4 x i8] }
// CHECK: BitFields:[
// LAYOUT-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:32 StorageOffset:1
// LAYOUT-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:24 StorageOffset:1

// LAYOUT-DWN32-FLEX-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:32 StorageOffset:1
// LAYOUT-DWN32-STRICT-NEXT: <CGBitFieldInfo Offset:{{[0-9]+}} Size:24 IsSigned:0 StorageSize:24 StorageOffset:1
// CHECK-NEXT: ]>

#if _LP64
struct A64 {
int a : 16;
Expand Down

0 comments on commit 69d861e

Please sign in to comment.