From 6ff88bb5ea307ca083bbc408f0d7cc3775ff63ac Mon Sep 17 00:00:00 2001 From: Andres Salamanca Date: Sun, 3 Aug 2025 12:21:55 -0500 Subject: [PATCH 1/2] [CIR] Implemented get/set for volatile bitfields --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 30 ++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 25 +- .../CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp | 5 +- .../CIR/CodeGen/aapcs-volatile-bitfields.c | 238 +++++++++++++++++- 4 files changed, 267 insertions(+), 31 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 73c9fb924f682..ff8e12190c972 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -410,21 +410,37 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, Address dstAddr, mlir::Type storageType, mlir::Value src, const CIRGenBitFieldInfo &info, - bool isLvalueVolatile) { + bool isLvalueVolatile, bool useVolatile) { + unsigned offset = useVolatile ? info.volatileOffset : info.offset; + + // If using AAPCS and the field is volatile, load with the size of the + // declared field + storageType = + useVolatile ? cir::IntType::get(storageType.getContext(), + info.volatileStorageSize, info.isSigned) + : storageType; return create( loc, resultType, dstAddr.getPointer(), storageType, src, info.name, - info.size, info.offset, info.isSigned, isLvalueVolatile, + info.size, offset, info.isSigned, isLvalueVolatile, dstAddr.getAlignment().getAsAlign().value()); } mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, Address addr, mlir::Type storageType, const CIRGenBitFieldInfo &info, - bool isLvalueVolatile) { - return create( - loc, resultType, addr.getPointer(), storageType, info.name, info.size, - info.offset, info.isSigned, isLvalueVolatile, - addr.getAlignment().getAsAlign().value()); + bool isLvalueVolatile, bool useVolatile) { + unsigned offset = useVolatile ? info.volatileOffset : info.offset; + + // If using AAPCS and the field is volatile, load with the size of the + // declared field + storageType = + useVolatile ? cir::IntType::get(storageType.getContext(), + info.volatileStorageSize, info.isSigned) + : storageType; + return create(loc, resultType, addr.getPointer(), + storageType, info.name, info.size, offset, + info.isSigned, isLvalueVolatile, + addr.getAlignment().getAsAlign().value()); } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index cd37a2bd276bc..574a46715c2ee 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -322,22 +322,27 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, assert(!cir::MissingFeatures::opTBAA()); } +/// Helper method to check if the underlying ABI is AAPCS +static bool isAAPCS(const TargetInfo &targetInfo) { + return targetInfo.getABI().starts_with("aapcs"); +} + mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src, LValue dst) { - assert(!cir::MissingFeatures::armComputeVolatileBitfields()); - const CIRGenBitFieldInfo &info = dst.getBitFieldInfo(); mlir::Type resLTy = convertTypeForMem(dst.getType()); Address ptr = dst.getBitFieldAddress(); - assert(!cir::MissingFeatures::armComputeVolatileBitfields()); + bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth && + dst.isVolatileQualified() && + info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget()); mlir::Value dstAddr = dst.getAddress().getPointer(); return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr, ptr.getElementType(), src.getValue(), info, - dst.isVolatileQualified()); + dst.isVolatileQualified(), useVoaltile); } RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) { @@ -347,10 +352,12 @@ RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) { mlir::Type resLTy = convertType(lv.getType()); Address ptr = lv.getBitFieldAddress(); - assert(!cir::MissingFeatures::armComputeVolatileBitfields()); + bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 && + isAAPCS(cgm.getTarget()); - mlir::Value field = builder.createGetBitfield( - getLoc(loc), resLTy, ptr, ptr.getElementType(), info, lv.isVolatile()); + mlir::Value field = + builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(), + info, lv.isVolatile(), useVoaltile); assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI"); return RValue::get(field); } @@ -375,10 +382,10 @@ LValue CIRGenFunction::emitLValueForBitField(LValue base, const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(field->getParent()); const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field); - assert(!cir::MissingFeatures::armComputeVolatileBitfields()); + assert(!cir::MissingFeatures::preservedAccessIndexRegion()); - unsigned idx = layout.getCIRFieldNo(field); + unsigned idx = layout.getCIRFieldNo(field); Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx); mlir::Location loc = getLoc(field->getLocation()); diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp index ecf31a7024987..1764967329969 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp @@ -847,8 +847,9 @@ void CIRRecordLowering::computeVolatileBitfields() { const CharUnits fEnd = fOffset + - astContext.toCharUnitsFromBits(astContext.toBits( - getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) - + astContext.toCharUnitsFromBits( + getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())) + .getQuantity()) - CharUnits::One(); // If no overlap, continue. if (end < fOffset || fEnd < storageOffset) diff --git a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c index 3643cf257933e..00378f725d76a 100644 --- a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c +++ b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c @@ -1,8 +1,13 @@ // RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-cir -fdump-record-layouts %s -o %t.cir 1> %t.cirlayout // RUN: FileCheck --input-file=%t.cirlayout %s --check-prefix=CIR-LAYOUT +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR + +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM // RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -emit-llvm -fdump-record-layouts %s -o %t.ll 1> %t.ogcglayout // RUN: FileCheck --input-file=%t.ogcglayout %s --check-prefix=OGCG-LAYOUT +// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG typedef struct { unsigned int a : 9; @@ -53,21 +58,228 @@ typedef struct{ typedef struct{ volatile unsigned int a : 3; - unsigned int z: 2; - volatile unsigned int b : 5; + unsigned int z; + volatile unsigned long b : 16; } st4; // CIR-LAYOUT: BitFields:[ -// CIR-LAYOUT-NEXT: -// CIR-LAYOUT-NEXT: -// CIR-LAYOUT-NEXT: +// CIR-LAYOUT-NEXT: +// CIR-LAYOUT-NEXT: // OGCG-LAYOUT: BitFields:[ -// OGCG-LAYOUT-NEXT: -// OGCG-LAYOUT-NEXT: -// OGCG-LAYOUT-NEXT: - -st1 s1; -st2 s2; -st3 s3; -st4 s4; +// OGCG-LAYOUT-NEXT: +// OGCG-LAYOUT-NEXT: + + +void def () { + st1 s1; + st2 s2; + st3 s3; + st4 s4; +} + +int check_load(st1 *s1) { + return s1->b; +} + +// CIR: cir.func dso_local @check_load +// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr +// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b, [[MEMBER]] {is_volatile} : !cir.ptr) -> !u32i +// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i +// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr +// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr, !s32i +// CIR: cir.return [[RET]] : !s32i + +// LLVM:define dso_local i32 @check_load +// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st1, ptr [[LOAD]], i32 0, i32 0 +// LLVM: [[LOADVOL:%.*]] = load volatile i32, ptr [[MEMBER]], align 4 +// LLVM: [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9 +// LLVM: [[CLEAR:%.*]] = and i32 [[LSHR]], 1 +// LLVM: store i32 [[CLEAR]], ptr [[RETVAL:%.*]], align 4 +// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4 +// LLVM: ret i32 [[RET]] + +// OGCG: define dso_local i32 @check_load +// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// OGCG: [[LOADVOL:%.*]] = load volatile i32, ptr [[LOAD]], align 4 +// OGCG: [[LSHR:%.*]] = lshr i32 [[LOADVOL]], 9 +// OGCG: [[CLEAR:%.*]] = and i32 [[LSHR]], 1 +// OGCG: ret i32 [[CLEAR]] + +// this volatile bit-field container overlaps with a zero-length bit-field, +// so it may be accessed without using the container's width. +int check_load_exception(st3 *s3) { + return s3->b; +} + +// CIR: cir.func dso_local @check_load_exception +// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr +// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b1, [[MEMBER]] {is_volatile} : !cir.ptr) -> !u32i +// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i +// CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr +// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr, !s32i +// CIR: cir.return [[RET]] : !s32i + +// LLVM:define dso_local i32 @check_load_exception +// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2 +// LLVM: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4 +// LLVM: [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31 +// LLVM: [[CAST:%.*]] = zext i8 [[CLEAR]] to i32 +// LLVM: store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4 +// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4 +// LLVM: ret i32 [[RET]] + +// OGCG: define dso_local i32 @check_load_exception +// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// OGCG: [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2 +// OGCG: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4 +// OGCG: [[CLEAR:%.*]] = and i8 [[LOADVOL]], 31 +// OGCG: [[CAST:%.*]] = zext i8 [[CLEAR]] to i32 +// OGCG: ret i32 [[CAST]] + +typedef struct { + volatile int a : 24; + char b; + volatile int c: 30; + } clip; + +int clip_load_exception2(clip *c) { + return c->a; +} + +// CIR: cir.func dso_local @clip_load_exception2 +// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr +// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr -> !cir.ptr> +// CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_a1, [[MEMBER]] {is_volatile} : !cir.ptr>) -> !s32i +// CIR: cir.store [[BITFI]], [[RETVAL:%.*]] : !s32i, !cir.ptr +// CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr, !s32i +// CIR: cir.return [[RET]] : !s32i + +// LLVM:define dso_local i32 @clip_load_exception2 +// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0 +// LLVM: [[LOADVOL:%.*]] = load volatile i24, ptr [[MEMBER]], align 4 +// LLVM: [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32 +// LLVM: store i32 [[CAST]], ptr [[RETVAL:%.*]], align 4 +// LLVM: [[RET:%.*]] = load i32, ptr [[RETVAL]], align 4 +// LLVM: ret i32 [[RET]] + +// OGCG: define dso_local i32 @clip_load_exception2 +// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// OGCG: [[LOADVOL:%.*]] = load volatile i24, ptr [[LOAD]], align 4 +// OGCG: [[CAST:%.*]] = sext i24 [[LOADVOL]] to i32 +// OGCG: ret i32 [[CAST]] + +void check_store(st2 *s2) { + s2->a = 1; +} + +// CIR: cir.func dso_local @check_store +// CIR: [[CONST:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !s16i +// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr +// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr -> !cir.ptr +// CIR: [[SETBF:%.*]] = cir.set_bitfield align(8) (#bfi_a, [[MEMBER]] : !cir.ptr, [[CAST]] : !s16i) {is_volatile} -> !s16i +// CIR: cir.return + +// LLVM:define dso_local void @check_store +// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st2, ptr [[LOAD]], i32 0, i32 0 +// LLVM: [[LOADVOL:%.*]] = load volatile i16, ptr [[MEMBER]], align 8 +// LLVM: [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8 +// LLVM: [[SET:%.*]] = or i16 [[CLEAR]], 1 +// LLVM: store volatile i16 [[SET]], ptr [[MEMBER]], align 8 +// LLVM: ret void + +// OGCG: define dso_local void @check_store +// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// OGCG: [[LOADVOL:%.*]] = load volatile i16, ptr [[LOAD]], align 8 +// OGCG: [[CLEAR:%.*]] = and i16 [[LOADVOL]], -8 +// OGCG: [[SET:%.*]] = or i16 [[CLEAR]], 1 +// OGCG: store volatile i16 [[SET]], ptr [[LOAD]], align 8 +// OGCG: ret void + +// this volatile bit-field container overlaps with a zero-length bit-field, +// so it may be accessed without using the container's width. +void check_store_exception(st3 *s3) { + s3->b = 2; +} + +// CIR: cir.func dso_local @check_store_exception +// CIR: [[CONST:%.*]] = cir.const #cir.int<2> : !s32i +// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !u32i +// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr +// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_b1, [[MEMBER]] : !cir.ptr, [[CAST]] : !u32i) {is_volatile} -> !u32i +// CIR: cir.return + +// LLVM:define dso_local void @check_store_exception +// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st3, ptr [[LOAD]], i32 0, i32 2 +// LLVM: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4 +// LLVM: [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32 +// LLVM: [[SET:%.*]] = or i8 [[CLEAR]], 2 +// LLVM: store volatile i8 [[SET]], ptr [[MEMBER]], align 4 +// LLVM: ret void + +// OGCG: define dso_local void @check_store_exception +// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// OGCG: [[MEMBER:%.*]] = getelementptr inbounds nuw %struct.st3, ptr [[LOAD]], i32 0, i32 2 +// OGCG: [[LOADVOL:%.*]] = load volatile i8, ptr [[MEMBER]], align 4 +// OGCG: [[CLEAR:%.*]] = and i8 [[LOADVOL]], -32 +// OGCG: [[SET:%.*]] = or i8 [[CLEAR]], 2 +// OGCG: store volatile i8 [[SET]], ptr [[MEMBER]], align 4 +// OGCG: ret void + +void clip_store_exception2(clip *c) { + c->a = 3; +} + +// CIR: cir.func dso_local @clip_store_exception2 +// CIR: [[CONST:%.*]] = cir.const #cir.int<3> : !s32i +// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr +// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr -> !cir.ptr> +// CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_a1, [[MEMBER]] : !cir.ptr>, [[CONST]] : !s32i) {is_volatile} -> !s32i +// CIR: cir.return + +// LLVM:define dso_local void @clip_store_exception2 +// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: [[MEMBER:%.*]] = getelementptr %struct.clip, ptr [[LOAD]], i32 0, i32 0 +// LLVM: store volatile i24 3, ptr [[MEMBER]], align 4 +// LLVM: ret void + +// OGCG: define dso_local void @clip_store_exception2 +// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// OGCG: store volatile i24 3, ptr [[LOAD]], align 4 +// OGCG: ret void + +void check_store_second_member (st4 *s4) { + s4->b = 1; +} + +// CIR: cir.func dso_local @check_store_second_member +// CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i +// CIR: [[CAST:%.*]] = cir.cast(integral, [[ONE]] : !s32i), !u64i +// CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr +// CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr +// CIR: cir.set_bitfield align(8) (#bfi_b2, [[MEMBER]] : !cir.ptr, [[CAST]] : !u64i) {is_volatile} -> !u64i + +// LLVM: define dso_local void @check_store_second_member +// LLVM: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// LLVM: [[MEMBER:%.*]] = getelementptr %struct.st4, ptr [[LOAD]], i32 0, i32 2 +// LLVM: [[VAL:%.*]] = load volatile i64, ptr [[MEMBER]], align 8 +// LLVM: [[CLEAR:%.*]] = and i64 [[VAL]], -65536 +// LLVM: [[SET:%.*]] = or i64 [[CLEAR]], 1 +// LLVM: store volatile i64 [[SET]], ptr [[MEMBER]], align 8 + +// OGCG: define dso_local void @check_store_second_member +// OGCG: [[LOAD:%.*]] = load ptr, ptr {{.*}}, align 8 +// OGCG: [[MEMBER:%.*]] = getelementptr inbounds i64, ptr [[LOAD]], i64 1 +// OGCG: [[LOADBF:%.*]] = load volatile i64, ptr [[MEMBER]], align 8 +// OGCG: [[CLR:%.*]] = and i64 [[LOADBF]], -65536 +// OGCG: [[SET:%.*]] = or i64 [[CLR]], 1 +// OGCG: store volatile i64 [[SET]], ptr [[MEMBER]], align 8 From 95c427d3974812349f43bad061c0dc2c2dbcfa27 Mon Sep 17 00:00:00 2001 From: Andres Salamanca Date: Mon, 4 Aug 2025 15:00:31 -0500 Subject: [PATCH 2/2] Add TODO comment for AAPCS ABI check as requested --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 574a46715c2ee..8ae63bf6f952e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -322,6 +322,7 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, assert(!cir::MissingFeatures::opTBAA()); } +// TODO: Replace this with a proper TargetInfo function call. /// Helper method to check if the underlying ABI is AAPCS static bool isAAPCS(const TargetInfo &targetInfo) { return targetInfo.getABI().starts_with("aapcs");