Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 17 additions & 13 deletions clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2344,25 +2344,29 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
} else {
// C99 6.5.3.4p2: If the argument is an expression of type
// VLA, it is evaluated.
cgf.getCIRGenModule().errorNYI(
e->getSourceRange(),
"sizeof operator for VariableArrayType & evaluateExtent "
"ignoredExpr",
e->getStmtClassName());
return {};
cgf.emitIgnoredExpr(e->getArgumentExpr());
}

// For _Countof, we just want to return the size of a single dimension.
if (kind == UETT_CountOf)
return cgf.getVLAElements1D(vat).numElts;

cgf.getCIRGenModule().errorNYI(
e->getSourceRange(),
"sizeof operator for VariableArrayType & evaluateExtent",
e->getStmtClassName());
return builder.getConstant(
loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
-llvm::APSInt(llvm::APInt(64, 1), true)));
// For sizeof and __datasizeof, we need to scale the number of elements
// by the size of the array element type.
auto vlaSize = cgf.getVLASize(vat);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
auto vlaSize = cgf.getVLASize(vat);
VlaSizePair vlaSize = cgf.getVLASize(vat);

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

VlaSizePair is a struct inside CIRGenFunction to use the name we need to write it like

CIRGenFunction::VlaSizePair vlaSize = cgf.getVLASize(vat);

not sure if that's helpful 🤔

mlir::Value numElts = vlaSize.numElts;

// Scale the number of non-VLA elements by the non-VLA element size.
CharUnits eltSize = cgf.getContext().getTypeSizeInChars(vlaSize.type);
if (!eltSize.isOne()) {
mlir::Location loc = cgf.getLoc(e->getSourceRange());
mlir::Value eltSizeValue =
builder.getConstAPInt(numElts.getLoc(), numElts.getType(),
cgf.cgm.getSize(eltSize).getValue());
return builder.createMul(loc, eltSizeValue, numElts);
}

return numElts;
}
}
} else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
Expand Down
156 changes: 156 additions & 0 deletions clang/test/CIR/CodeGen/size-of-vla.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG

void vla_type_with_element_type_of_size_1() {
unsigned long n = 10ul;
unsigned long size = sizeof(bool[n]);
}

// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
// CIR: %[[TMP_N:.*]] = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
// CIR: cir.store {{.*}} %[[TMP_N]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>

// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// LLVM: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8

// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// OGCG: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8

void vla_type_with_element_type_int() {
unsigned long n = 10ul;
unsigned long size = sizeof(int[n]);
}

// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
// CIR: %3 = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u64i
// CIR: %[[SIZE:.*]] = cir.binop(mul, %[[CONST_4]], %3) : !u64i
// CIR: cir.store {{.*}} %[[SIZE]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>

// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// LLVM: %[[SIZE:.*]] = mul i64 4, %[[TMP_N]]
// LLVM: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8

// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// OGCG: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
// OGCG: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8

void vla_expr_element_type_of_size_1() {
unsigned long n = 10ul;
bool arr[n];
unsigned long size = sizeof(arr);
}

// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
// CIR: %[[SAVED_STACK_ADDR:.*]] = cir.alloca !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>, ["saved_stack"]
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
// CIR: %[[TMP_N:.*]] = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
// CIR: %[[STACK_SAVE:.*]] = cir.stacksave : !cir.ptr<!u8i>
// CIR: cir.store {{.*}} %[[STACK_SAVE]], %[[SAVED_STACK_ADDR]] : !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>
// CIR: %[[ARR_ADDR:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, %[[TMP_N]] : !u64i, ["arr"]
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
// CIR: cir.store {{.*}} %[[TMP_N]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
// CIR: %[[TMP_SAVED_STACK:.*]] = cir.load {{.*}} %[[SAVED_STACK_ADDR]] : !cir.ptr<!cir.ptr<!u8i>>, !cir.ptr<!u8i>
// CIR: cir.stackrestore %[[TMP_SAVED_STACK]] : !cir.ptr<!u8i>

// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, i64 1, align 8
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// LLVM: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
// LLVM: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
// LLVM: %[[ARR_ADDR:.*]] = alloca i8, i64 %[[TMP_N]], align 16
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8
// LLVM: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
// LLVM: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])

// Note: VLA_EXPR0 below is emitted to capture debug info.

// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
// OGCG: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, align 8
// OGCG: %[[VLA_EXPR0:.*]] = alloca i64, align 8
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// OGCG: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
// OGCG: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
// OGCG: %[[ARR_ADDR:.*]] = alloca i8, i64 %[[TMP_N]], align 16
// OGCG: store i64 %[[TMP_N]], ptr %[[VLA_EXPR0]], align 8
// OGCG: store i64 %[[TMP_N]], ptr %[[SIZE_ADDR]], align 8
// OGCG: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
// OGCG: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])

void vla_expr_element_type_int() {
unsigned long n = 10ul;
int arr[n];
unsigned long size = sizeof(arr);
}

// CIR: %[[N_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["n", init]
// CIR: %[[SAVED_STACK_ADDR:.*]] = cir.alloca !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>, ["saved_stack"]
// CIR: %[[CONST_10:.*]] = cir.const #cir.int<10> : !u64i
// CIR: cir.store {{.*}} %[[CONST_10]], %[[N_ADDR]] : !u64i, !cir.ptr<!u64i>
// CIR: %[[TMP_N:.*]] = cir.load {{.*}} %[[N_ADDR]] : !cir.ptr<!u64i>, !u64i
// CIR: %[[STACK_SAVE:.*]] = cir.stacksave : !cir.ptr<!u8i>
// CIR: cir.store {{.*}} %[[STACK_SAVE]], %[[SAVED_STACK_ADDR]] : !cir.ptr<!u8i>, !cir.ptr<!cir.ptr<!u8i>>
// CIR: %[[ARR_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, %[[TMP_N]] : !u64i, ["arr"]
// CIR: %[[SIZE_ADDR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["size", init]
// CIR: %[[CONST_4:.*]] = cir.const #cir.int<4> : !u64i
// CIR: %[[SIZE:.*]] = cir.binop(mul, %[[CONST_4]], %[[TMP_N]]) : !u64i
// CIR: cir.store {{.*}} %[[SIZE]], %[[SIZE_ADDR]] : !u64i, !cir.ptr<!u64i>
// CIR: %[[TMP_SAVED_STACK:.*]] = cir.load {{.*}} %[[SAVED_STACK_ADDR]] : !cir.ptr<!cir.ptr<!u8i>>, !cir.ptr<!u8i>
// CIR: cir.stackrestore %[[TMP_SAVED_STACK]] : !cir.ptr<!u8i>

// LLVM: %[[N_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, i64 1, align 8
// LLVM: store i64 10, ptr %[[N_ADDR]], align 8
// LLVM: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// LLVM: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
// LLVM: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
// LLVM: %[[ARR_ADDR:.*]] = alloca i32, i64 %[[TMP_N]], align 16
// LLVM: %[[SIZE_ADDR:.*]] = alloca i64, i64 1, align 8
// LLVM: %[[SIZE:.*]] = mul i64 4, %[[TMP_N]]
// LLVM: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
// LLVM: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
// LLVM: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])

// Note: VLA_EXPR0 below is emitted to capture debug info.

// OGCG: %[[N_ADDR:.*]] = alloca i64, align 8
// OGCG: %[[SAVED_STACK_ADDR:.*]] = alloca ptr, align 8
// OGCG: %[[VLA_EXPR0:.*]] = alloca i64, align 8
// OGCG: %[[SIZE_ADDR:.*]] = alloca i64, align 8
// OGCG: store i64 10, ptr %[[N_ADDR]], align 8
// OGCG: %[[TMP_N:.*]] = load i64, ptr %[[N_ADDR]], align 8
// OGCG: %[[STACK_SAVE:.*]] = call ptr @llvm.stacksave.p0()
// OGCG: store ptr %[[STACK_SAVE]], ptr %[[SAVED_STACK_ADDR]], align 8
// OGCG: %[[ARR_ADDR:.*]] = alloca i32, i64 %[[TMP_N]], align 16
// OGCG: store i64 %[[TMP_N]], ptr %[[VLA_EXPR0]], align 8
// OGCG: %[[SIZE:.*]] = mul nuw i64 4, %[[TMP_N]]
// OGCG: store i64 %[[SIZE]], ptr %[[SIZE_ADDR]], align 8
// OGCG: %[[TMP_SAVED_STACK:.*]] = load ptr, ptr %[[SAVED_STACK_ADDR]], align 8
// OGCG: call void @llvm.stackrestore.p0(ptr %[[TMP_SAVED_STACK]])
Loading