Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 23 additions & 7 deletions clang/lib/CIR/CodeGen/CIRGenBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -410,21 +410,37 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType,
Address dstAddr, mlir::Type storageType,
mlir::Value src, const CIRGenBitFieldInfo &info,
bool isLvalueVolatile) {
bool isLvalueVolatile, bool useVolatile) {
unsigned offset = useVolatile ? info.volatileOffset : info.offset;

// If using AAPCS and the field is volatile, load with the size of the
// declared field
storageType =
useVolatile ? cir::IntType::get(storageType.getContext(),
info.volatileStorageSize, info.isSigned)
: storageType;
return create<cir::SetBitfieldOp>(
loc, resultType, dstAddr.getPointer(), storageType, src, info.name,
info.size, info.offset, info.isSigned, isLvalueVolatile,
info.size, offset, info.isSigned, isLvalueVolatile,
dstAddr.getAlignment().getAsAlign().value());
}

mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType,
Address addr, mlir::Type storageType,
const CIRGenBitFieldInfo &info,
bool isLvalueVolatile) {
return create<cir::GetBitfieldOp>(
loc, resultType, addr.getPointer(), storageType, info.name, info.size,
info.offset, info.isSigned, isLvalueVolatile,
addr.getAlignment().getAsAlign().value());
bool isLvalueVolatile, bool useVolatile) {
unsigned offset = useVolatile ? info.volatileOffset : info.offset;

// If using AAPCS and the field is volatile, load with the size of the
// declared field
storageType =
useVolatile ? cir::IntType::get(storageType.getContext(),
info.volatileStorageSize, info.isSigned)
: storageType;
return create<cir::GetBitfieldOp>(loc, resultType, addr.getPointer(),
storageType, info.name, info.size, offset,
info.isSigned, isLvalueVolatile,
addr.getAlignment().getAsAlign().value());
}
};

Expand Down
26 changes: 17 additions & 9 deletions clang/lib/CIR/CodeGen/CIRGenExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,22 +322,28 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
assert(!cir::MissingFeatures::opTBAA());
}

// TODO: Replace this with a proper TargetInfo function call.
/// Helper method to check if the underlying ABI is AAPCS
static bool isAAPCS(const TargetInfo &targetInfo) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems like something we should be getting through a TargetInfo function call. I'm not asking you to change that in this PR, but can you add a TODO comment? This will be the fourth place in the code (two in CIR, two in classic codegen) that performs the check this way.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

return targetInfo.getABI().starts_with("aapcs");
}

mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src,
LValue dst) {

assert(!cir::MissingFeatures::armComputeVolatileBitfields());

const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
mlir::Type resLTy = convertTypeForMem(dst.getType());
Address ptr = dst.getBitFieldAddress();

assert(!cir::MissingFeatures::armComputeVolatileBitfields());
bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
dst.isVolatileQualified() &&
info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());

mlir::Value dstAddr = dst.getAddress().getPointer();

return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
ptr.getElementType(), src.getValue(), info,
dst.isVolatileQualified());
dst.isVolatileQualified(), useVoaltile);
}

RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
Expand All @@ -347,10 +353,12 @@ RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
mlir::Type resLTy = convertType(lv.getType());
Address ptr = lv.getBitFieldAddress();

assert(!cir::MissingFeatures::armComputeVolatileBitfields());
bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
isAAPCS(cgm.getTarget());

mlir::Value field = builder.createGetBitfield(
getLoc(loc), resLTy, ptr, ptr.getElementType(), info, lv.isVolatile());
mlir::Value field =
builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
info, lv.isVolatile(), useVoaltile);
assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI");
return RValue::get(field);
}
Expand All @@ -375,10 +383,10 @@ LValue CIRGenFunction::emitLValueForBitField(LValue base,
const CIRGenRecordLayout &layout =
cgm.getTypes().getCIRGenRecordLayout(field->getParent());
const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
assert(!cir::MissingFeatures::armComputeVolatileBitfields());

assert(!cir::MissingFeatures::preservedAccessIndexRegion());
unsigned idx = layout.getCIRFieldNo(field);

unsigned idx = layout.getCIRFieldNo(field);
Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);

mlir::Location loc = getLoc(field->getLocation());
Expand Down
5 changes: 3 additions & 2 deletions clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -847,8 +847,9 @@ void CIRRecordLowering::computeVolatileBitfields() {

const CharUnits fEnd =
fOffset +
astContext.toCharUnitsFromBits(astContext.toBits(
getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) -
astContext.toCharUnitsFromBits(
getSizeInBits(cirGenTypes.convertTypeForMem(f->getType()))
.getQuantity()) -
CharUnits::One();
// If no overlap, continue.
if (end < fOffset || fEnd < storageOffset)
Expand Down
238 changes: 225 additions & 13 deletions clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-cir -fdump-record-layouts %s -o %t.cir 1> %t.cirlayout
// RUN: FileCheck --input-file=%t.cirlayout %s --check-prefix=CIR-LAYOUT
// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR

// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM

// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -emit-llvm -fdump-record-layouts %s -o %t.ll 1> %t.ogcglayout
// RUN: FileCheck --input-file=%t.ogcglayout %s --check-prefix=OGCG-LAYOUT
// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG

typedef struct {
unsigned int a : 9;
Expand Down Expand Up @@ -53,21 +58,228 @@ typedef struct{

typedef struct{
volatile unsigned int a : 3;
unsigned int z: 2;
volatile unsigned int b : 5;
unsigned int z;
volatile unsigned long b : 16;
} st4;

// CIR-LAYOUT: BitFields:[
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:z offset:3 size:2 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:3 volatileStorageSize:32 volatileStorageOffset:0>
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:5 size:5 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:5 volatileStorageSize:32 volatileStorageOffset:0>
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:8 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:0 size:16 isSigned:0 storageSize:16 storageOffset:8 volatileOffset:0 volatileStorageSize:64 volatileStorageOffset:1>

// OGCG-LAYOUT: BitFields:[
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:3 Size:2 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:3 VolatileStorageSize:32 VolatileStorageOffset:0>
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:5 Size:5 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:5 VolatileStorageSize:32 VolatileStorageOffset:0>

st1 s1;
st2 s2;
st3 s3;
st4 s4;
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:16 IsSigned:0 StorageSize:16 StorageOffset:8 VolatileOffset:0 VolatileStorageSize:64 VolatileStorageOffset:1>


void def () {
st1 s1;
st2 s2;
st3 s3;
st4 s4;
}

int check_load(st1 *s1) {
return s1->b;
}

// CIR: cir.func dso_local @check_load
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st1>>, !cir.ptr<!rec_st1>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "b"} : !cir.ptr<!rec_st1> -> !cir.ptr<!u16i>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b, [[MEMBER]] {is_volatile} : !cir.ptr<!u16i>) -> !u32i
// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return [[RET]] : !s32i

// LLVM:define dso_local i32 @check_load
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st1, ptr [[LOAD]], i32 0, i32 0
// LLVM: [[LOADVOL:%.*]] = load volatile i32, ptr [[MEMBER]], align 4
// LLVM: [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9
// LLVM: [[CLEAR:%.*]] = and i32 [[LSHR]], 1
// LLVM: store i32 [[CLEAR]], ptr [[RETVAL:%.*]], align 4
// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
// LLVM: ret i32 [[RET]]

// OGCG: define dso_local i32 @check_load
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// OGCG: [[LOADVOL:%.*]] = load volatile i32, ptr [[LOAD]], align 4
// OGCG: [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9
// OGCG: [[CLEAR:%.*]] = and i32 [[LSHR]], 1
// OGCG: ret i32 [[CLEAR]]

// this volatile bit-field container overlaps with a zero-length bit-field,
// so it may be accessed without using the container's width.
int check_load_exception(st3 *s3) {
return s3->b;
}

// CIR: cir.func dso_local @check_load_exception
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b1, [[MEMBER]] {is_volatile} : !cir.ptr<!u8i>) -> !u32i
// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i
// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return [[RET]] : !s32i

// LLVM:define dso_local i32 @check_load_exception
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2
// LLVM: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
// LLVM: [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31
// LLVM: [[CAST:%.*]] = zext i8 [[CLEAR]] to i32
// LLVM: store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4
// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
// LLVM: ret i32 [[RET]]

// OGCG: define dso_local i32 @check_load_exception
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// OGCG: [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2
// OGCG: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
// OGCG: [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31
// OGCG: [[CAST:%.*]] = zext i8 [[CLEAR]] to i32
// OGCG: ret i32 [[CAST]]

typedef struct {
volatile int a : 24;
char b;
volatile int c: 30;
} clip;

int clip_load_exception2(clip *c) {
return c->a;
}

// CIR: cir.func dso_local @clip_load_exception2
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_clip>>, !cir.ptr<!rec_clip>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_clip> -> !cir.ptr<!cir.array<!u8i x 3>>
// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_a1, [[MEMBER]] {is_volatile} : !cir.ptr<!cir.array<!u8i x 3>>) -> !s32i
// CIR: cir.store [[BITFI]], [[RETVAL:%.*]] : !s32i, !cir.ptr<!s32i>
// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.return [[RET]] : !s32i

// LLVM:define dso_local i32 @clip_load_exception2
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0
// LLVM: [[LOADVOL:%.*]] = load volatile i24, ptr [[MEMBER]], align 4
// LLVM: [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32
// LLVM: store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4
// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4
// LLVM: ret i32 [[RET]]

// OGCG: define dso_local i32 @clip_load_exception2
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// OGCG: [[LOADVOL:%.*]] = load volatile i24, ptr [[LOAD]], align 4
// OGCG: [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32
// OGCG: ret i32 [[CAST]]

void check_store(st2 *s2) {
s2->a = 1;
}

// CIR: cir.func dso_local @check_store
// CIR: [[CONST:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !s16i
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st2>>, !cir.ptr<!rec_st2>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_st2> -> !cir.ptr<!u32i>
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(8) (#bfi_a, [[MEMBER]] : !cir.ptr<!u32i>, [[CAST]] : !s16i) {is_volatile} -> !s16i
// CIR: cir.return

// LLVM:define dso_local void @check_store
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st2, ptr [[LOAD]], i32 0, i32 0
// LLVM: [[LOADVOL:%.*]] = load volatile i16, ptr [[MEMBER]], align 8
// LLVM: [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8
// LLVM: [[SET:%.*]] = or i16 [[CLEAR]], 1
// LLVM: store volatile i16 [[SET]], ptr [[MEMBER]], align 8
// LLVM: ret void

// OGCG: define dso_local void @check_store
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// OGCG: [[LOADVOL:%.*]] = load volatile i16, ptr [[LOAD]], align 8
// OGCG: [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8
// OGCG: [[SET:%.*]] = or i16 [[CLEAR]], 1
// OGCG: store volatile i16 [[SET]], ptr [[LOAD]], align 8
// OGCG: ret void

// this volatile bit-field container overlaps with a zero-length bit-field,
// so it may be accessed without using the container's width.
void check_store_exception(st3 *s3) {
s3->b = 2;
}

// CIR: cir.func dso_local @check_store_exception
// CIR: [[CONST:%.*]] = cir.const #cir.int<2> : !s32i
// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !u32i
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st3>>, !cir.ptr<!rec_st3>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st3> -> !cir.ptr<!u8i>
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_b1, [[MEMBER]] : !cir.ptr<!u8i>, [[CAST]] : !u32i) {is_volatile} -> !u32i
// CIR: cir.return

// LLVM:define dso_local void @check_store_exception
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2
// LLVM: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
// LLVM: [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32
// LLVM: [[SET:%.*]] = or i8 [[CLEAR]], 2
// LLVM: store volatile i8 [[SET]], ptr [[MEMBER]], align 4
// LLVM: ret void

// OGCG: define dso_local void @check_store_exception
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// OGCG: [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2
// OGCG: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4
// OGCG: [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32
// OGCG: [[SET:%.*]] = or i8 [[CLEAR]], 2
// OGCG: store volatile i8 [[SET]], ptr [[MEMBER]], align 4
// OGCG: ret void

void clip_store_exception2(clip *c) {
c->a = 3;
}

// CIR: cir.func dso_local @clip_store_exception2
// CIR: [[CONST:%.*]] = cir.const #cir.int<3> : !s32i
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_clip>>, !cir.ptr<!rec_clip>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr<!rec_clip> -> !cir.ptr<!cir.array<!u8i x 3>>
// CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_a1, [[MEMBER]] : !cir.ptr<!cir.array<!u8i x 3>>, [[CONST]] : !s32i) {is_volatile} -> !s32i
// CIR: cir.return

// LLVM:define dso_local void @clip_store_exception2
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0
// LLVM: store volatile i24 3, ptr [[MEMBER]], align 4
// LLVM: ret void

// OGCG: define dso_local void @clip_store_exception2
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// OGCG: store volatile i24 3, ptr [[LOAD]], align 4
// OGCG: ret void

void check_store_second_member (st4 *s4) {
s4->b = 1;
}

// CIR: cir.func dso_local @check_store_second_member
// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[CAST:%.*]] = cir.cast(integral, [[ONE]] : !s32i), !u64i
// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr<!cir.ptr<!rec_st4>>, !cir.ptr<!rec_st4>
// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr<!rec_st4> -> !cir.ptr<!u16i>
// CIR: cir.set_bitfield align(8) (#bfi_b2, [[MEMBER]] : !cir.ptr<!u16i>, [[CAST]] : !u64i) {is_volatile} -> !u64i

// LLVM: define dso_local void @check_store_second_member
// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st4, ptr [[LOAD]], i32 0, i32 2
// LLVM: [[VAL:%.*]] = load volatile i64, ptr [[MEMBER]], align 8
// LLVM: [[CLEAR:%.*]] = and i64 [[VAL]], -65536
// LLVM: [[SET:%.*]] = or i64 [[CLEAR]], 1
// LLVM: store volatile i64 [[SET]], ptr [[MEMBER]], align 8

// OGCG: define dso_local void @check_store_second_member
// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8
// OGCG: [[MEMBER:%.*]] = getelementptr inbounds i64, ptr [[LOAD]], i64 1
Comment on lines +273 to +281
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is one difference from classic CodeGen in how member access is handled in the case of volatile loads. In the classic implementation, a GEP is used based on the size of the declared field, as shown here:

if (UseVolatile) {
const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
if (VolatileOffset)
Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
}

Here's an example demonstrating that behavior:
https://godbolt.org/z/h98eEnd6f
In this example, the GEP is computed using the declared field type.

In CIR, however, we currently use a structure-type-based GEP instead. I believe this is semantically equivalent, but I’d like to confirm, is this correct?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A GEP using an integer type and a GEP using a structure type can be equivalent if they both end up computing the same pointer. In this case, you have %struct.S2 = type { i8, i32, i16 }. Assuming this is padded so that the third element occurs at offset 64 from the start of the structure, then getelementptr inbounds i64, ptr [[LOAD]], i64 1 will be equivalent to getelementptr %struct.st4, ptr [[LOAD]], i32 0, i32 2

// OGCG: [[LOADBF:%.*]] = load volatile i64, ptr [[MEMBER]], align 8
// OGCG: [[CLR:%.*]] = and i64 [[LOADBF]], -65536
// OGCG: [[SET:%.*]] = or i64 [[CLR]], 1
// OGCG: store volatile i64 [[SET]], ptr [[MEMBER]], align 8