Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions clang/include/clang/CIR/Dialect/IR/CIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -4166,6 +4166,40 @@ def CIR_ThrowOp : CIR_Op<"throw"> {
let hasVerifier = 1;
}

//===----------------------------------------------------------------------===//
// AllocExceptionOp
//===----------------------------------------------------------------------===//

def CIR_AllocExceptionOp : CIR_Op<"alloc.exception"> {
let summary = "Allocates an exception according to Itanium ABI";
let description = [{
Implements a slightly higher level __cxa_allocate_exception:

`void *__cxa_allocate_exception(size_t thrown_size);`

If operation fails, program terminates, not throw.

Example:

```mlir
// if (b == 0) {
// ...
// throw "...";
cir.if %10 {
%11 = cir.alloc_exception 8 -> !cir.ptr<!void>
... // store exception content into %11
cir.throw %11 : !cir.ptr<!cir.ptr<!u8i>>, ...
```
}];

let arguments = (ins I64Attr:$size);
let results = (outs Res<CIR_PointerType, "", [MemAlloc<DefaultResource>]>:$addr);

let assemblyFormat = [{
$size `->` qualified(type($addr)) attr-dict
}];
}

//===----------------------------------------------------------------------===//
// Atomic operations
//===----------------------------------------------------------------------===//
Expand Down
1 change: 1 addition & 0 deletions clang/lib/CIR/CodeGen/CIRGenCXXABI.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ class CIRGenCXXABI {
CIRGenFunction &cgf) = 0;

virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
virtual void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) = 0;

virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
QualType ty) = 0;
Expand Down
33 changes: 33 additions & 0 deletions clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,3 +178,36 @@ void CIRGenFunction::popCleanupBlocks(
popCleanupBlock();
}
}

void CIRGenFunction::deactivateCleanupBlock(
EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP) {
assert(cleanup != ehStack.stable_end() && "deactivating bottom of stack?");
EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.find(cleanup));
assert(scope.isActive() && "double deactivation");

// If it's the top of the stack, just pop it, but do so only if it belongs
// to the current RunCleanupsScope.
if (cleanup == ehStack.stable_begin() &&
currentCleanupStackDepth.strictlyEncloses(cleanup)) {

// Per comment below, checking EHAsynch is not really necessary
// it's there to assure zero-impact w/o EHAsynch option
if (!scope.isNormalCleanup() && getLangOpts().EHAsynch) {
cgm.errorNYI("deactivateCleanupBlock: EHAsynch & non-normal cleanup");
return;
}

// From LLVM: If it's a normal cleanup, we need to pretend that the
// fallthrough is unreachable.
// CIR remarks: LLVM uses an empty insertion point to signal behavior
// change to other codegen paths (triggered by PopCleanupBlock).
// CIRGen doesn't do that yet, but let's mimic just in case.
mlir::OpBuilder::InsertionGuard guard(builder);
builder.clearInsertionPoint();
popCleanupBlock();
return;
}

// Otherwise, follow the general case.
cgm.errorNYI("deactivateCleanupBlock: the general case");
}
8 changes: 8 additions & 0 deletions clang/lib/CIR/CodeGen/CIRGenCleanup.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,5 +138,13 @@ inline EHScopeStack::iterator EHScopeStack::begin() const {
return iterator(startOfData);
}

inline EHScopeStack::iterator
EHScopeStack::find(stable_iterator savePoint) const {
assert(savePoint.isValid() && "finding invalid savepoint");
assert(savePoint.size <= stable_begin().size &&
"finding savepoint after pop");
return iterator(endOfBuffer - savePoint.size);
}

} // namespace clang::CIRGen
#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H
35 changes: 30 additions & 5 deletions clang/lib/CIR/CodeGen/CIRGenException.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,36 @@ void CIRGenFunction::emitCXXThrowExpr(const CXXThrowExpr *e) {
if (throwType->isObjCObjectPointerType()) {
cgm.errorNYI("emitCXXThrowExpr ObjCObjectPointerType");
return;
} else {
cgm.errorNYI("emitCXXThrowExpr with subExpr");
return;
}
} else {
cgm.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);

cgm.getCXXABI().emitThrow(*this, e);
return;
}

cgm.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
}

void CIRGenFunction::emitAnyExprToExn(const Expr *e, Address addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
assert(!cir::MissingFeatures::ehCleanupScope());

// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
mlir::Type ty = convertTypeForMem(e->getType());
Address typedAddr = addr.withElementType(builder, ty);

// From LLVM's codegen:
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
// must call std::terminate() if that constructor throws, because
// technically that copy occurs after the exception expression is
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
emitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
/*isInitializer=*/true);

// Deactivate the cleanup block.
assert(!cir::MissingFeatures::ehCleanupScope());
}
5 changes: 5 additions & 0 deletions clang/lib/CIR/CodeGen/CIRGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -1090,6 +1090,11 @@ class CIRGenFunction : public CIRGenTypeCache {
/// even if no aggregate location is provided.
RValue emitAnyExprToTemp(const clang::Expr *e);

void emitAnyExprToExn(const Expr *e, Address addr);

void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup,
mlir::Operation *dominatingIP);

void emitArrayDestroy(mlir::Value begin, mlir::Value numElements,
QualType elementType, CharUnits elementAlign,
Destroyer *destroyer);
Expand Down
55 changes: 55 additions & 0 deletions clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI {
QualType thisTy) override;

void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override;
void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override;

bool useThunkForDtorVariant(const CXXDestructorDecl *dtor,
CXXDtorType dt) const override {
Expand Down Expand Up @@ -1544,6 +1545,60 @@ void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &cgf, bool isNoReturn) {
}
}

void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &cgf,
const CXXThrowExpr *e) {
// This differs a bit from LLVM codegen, CIR has native operations for some
// cxa functions, and defers allocation size computation, always pass the dtor
// symbol, etc. CIRGen also does not use getAllocateExceptionFn / getThrowFn.

// Now allocate the exception object.
CIRGenBuilderTy &builder = cgf.getBuilder();
QualType clangThrowType = e->getSubExpr()->getType();
cir::PointerType throwTy =
builder.getPointerTo(cgf.convertType(clangThrowType));
uint64_t typeSize =
cgf.getContext().getTypeSizeInChars(clangThrowType).getQuantity();
mlir::Location subExprLoc = cgf.getLoc(e->getSubExpr()->getSourceRange());

// Defer computing allocation size to some later lowering pass.
mlir::TypedValue<cir::PointerType> exceptionPtr =
cir::AllocExceptionOp::create(builder, subExprLoc, throwTy,
builder.getI64IntegerAttr(typeSize))
.getAddr();

// Build expression and store its result into exceptionPtr.
CharUnits exnAlign = cgf.getContext().getExnObjectAlignment();
cgf.emitAnyExprToExn(e->getSubExpr(), Address(exceptionPtr, exnAlign));

// Get the RTTI symbol address.
auto typeInfo = mlir::dyn_cast_if_present<cir::GlobalViewAttr>(
cgm.getAddrOfRTTIDescriptor(subExprLoc, clangThrowType,
/*forEH=*/true));
assert(typeInfo && "expected GlobalViewAttr typeinfo");
assert(!typeInfo.getIndices() && "expected no indirection");

// The address of the destructor.
//
// Note: LLVM codegen already optimizes out the dtor if the
// type is a record with trivial dtor (by passing down a
// null dtor). In CIR, we forward this info and allow for
// Lowering pass to skip passing the trivial function.
//
if (const RecordType *recordTy = clangThrowType->getAs<RecordType>()) {
CXXRecordDecl *rec =
cast<CXXRecordDecl>(recordTy->getOriginalDecl()->getDefinition());
assert(!cir::MissingFeatures::isTrivialCtorOrDtor());
if (!rec->hasTrivialDestructor()) {
cgm.errorNYI("emitThrow: non-trivial destructor");
return;
}
}

// Now throw the exception.
mlir::Location loc = cgf.getLoc(e->getSourceRange());
insertThrowAndSplit(builder, loc, exceptionPtr, typeInfo.getSymbol());
}

CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) {
switch (cgm.getASTContext().getCXXABIKind()) {
case TargetCXXABI::GenericItanium:
Expand Down
4 changes: 4 additions & 0 deletions clang/lib/CIR/CodeGen/EHScopeStack.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,10 @@ class EHScopeStack {
return stable_iterator(endOfBuffer - startOfData);
}

/// Turn a stable reference to a scope depth into a unstable pointer
/// to the EH stack.
iterator find(stable_iterator savePoint) const;

/// Create a stable reference to the bottom of the EH stack.
static stable_iterator stable_end() { return stable_iterator(0); }
};
Expand Down
65 changes: 56 additions & 9 deletions clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2581,22 +2581,69 @@ void createLLVMFuncOpIfNotExist(mlir::ConversionPatternRewriter &rewriter,
mlir::LogicalResult CIRToLLVMThrowOpLowering::matchAndRewrite(
cir::ThrowOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
if (op.rethrows()) {
auto voidTy = mlir::LLVM::LLVMVoidType::get(getContext());
auto funcTy =
mlir::LLVM::LLVMFunctionType::get(getContext(), voidTy, {}, false);
mlir::Location loc = op.getLoc();
auto voidTy = mlir::LLVM::LLVMVoidType::get(getContext());

auto mlirModule = op->getParentOfType<mlir::ModuleOp>();
rewriter.setInsertionPointToStart(&mlirModule.getBodyRegion().front());
if (op.rethrows()) {
auto funcTy = mlir::LLVM::LLVMFunctionType::get(voidTy, {});

// Get or create `declare void @__cxa_rethrow()`
const llvm::StringRef functionName = "__cxa_rethrow";
createLLVMFuncOpIfNotExist(rewriter, op, functionName, funcTy);

rewriter.setInsertionPointAfter(op.getOperation());
rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
op, mlir::TypeRange{}, functionName, mlir::ValueRange{});
auto cxaRethrow = mlir::LLVM::CallOp::create(
rewriter, loc, mlir::TypeRange{}, functionName);

rewriter.replaceOp(op, cxaRethrow);
return mlir::success();
}

auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
auto fnTy = mlir::LLVM::LLVMFunctionType::get(
voidTy, {llvmPtrTy, llvmPtrTy, llvmPtrTy});

// Get or create `declare void @__cxa_throw(ptr, ptr, ptr)`
const llvm::StringRef fnName = "__cxa_throw";
createLLVMFuncOpIfNotExist(rewriter, op, fnName, fnTy);

mlir::Value typeInfo = mlir::LLVM::AddressOfOp::create(
rewriter, loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()),
adaptor.getTypeInfoAttr());

mlir::Value dtor;
if (op.getDtor()) {
dtor = mlir::LLVM::AddressOfOp::create(rewriter, loc, llvmPtrTy,
adaptor.getDtorAttr());
} else {
dtor = mlir::LLVM::ZeroOp::create(rewriter, loc, llvmPtrTy);
}

auto cxaThrowCall = mlir::LLVM::CallOp::create(
rewriter, loc, mlir::TypeRange{}, fnName,
mlir::ValueRange{adaptor.getExceptionPtr(), typeInfo, dtor});

rewriter.replaceOp(op, cxaThrowCall);
return mlir::success();
}

mlir::LogicalResult CIRToLLVMAllocExceptionOpLowering::matchAndRewrite(
cir::AllocExceptionOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
// Get or create `declare ptr @__cxa_allocate_exception(i64)`
StringRef fnName = "__cxa_allocate_exception";
auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext());
auto int64Ty = mlir::IntegerType::get(rewriter.getContext(), 64);
auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {int64Ty});

createLLVMFuncOpIfNotExist(rewriter, op, fnName, fnTy);
auto exceptionSize = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
adaptor.getSizeAttr());

auto allocaExceptionCall = mlir::LLVM::CallOp::create(
rewriter, op.getLoc(), mlir::TypeRange{llvmPtrTy}, fnName,
mlir::ValueRange{exceptionSize});

rewriter.replaceOp(op, allocaExceptionCall);
return mlir::success();
}

Expand Down
44 changes: 42 additions & 2 deletions clang/test/CIR/CodeGen/throws.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -emit-llvm %s -o %t.ll
// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG

void foo() {
void rethrow() {
throw;
}

Expand All @@ -18,7 +18,7 @@ void foo() {
// OGCG: call void @__cxa_rethrow()
// OGCG: unreachable

int foo1(int a, int b) {
int rethrow_from_block(int a, int b) {
if (b == 0)
throw;
return a / b;
Expand Down Expand Up @@ -83,3 +83,43 @@ int foo1(int a, int b) {
// OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
// OGCG: %[[DIV_A_B:.*]] = sdiv i32 %[[TMP_A]], %[[TMP_B]]
// OGCG: ret i32 %[[DIV_A_B]]

void throw_scalar() {
throw 1;
}

// CIR: %[[EXCEPTION_ADDR:.*]] = cir.alloc.exception 4 -> !cir.ptr<!s32i>
// CIR: %[[EXCEPTION_VALUE:.*]] = cir.const #cir.int<1> : !s32i
// CIR: cir.store{{.*}} %[[EXCEPTION_VALUE]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr<!s32i>
// CIR: cir.throw %[[EXCEPTION_ADDR]] : !cir.ptr<!s32i>, @_ZTIi
// CIR: cir.unreachable

// LLVM: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
// LLVM: store i32 1, ptr %[[EXCEPTION_ADDR]], align 16
// LLVM: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null)
// LLVM: unreachable

// OGCG: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
// OGCG: store i32 1, ptr %[[EXCEPTION_ADDR]], align 16
// OGCG: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null)
// OGCG: unreachable

void paren_expr() { (throw 0, 1 + 2); }

// CIR: %[[EXCEPTION_ADDR:.*]] = cir.alloc.exception 4 -> !cir.ptr<!s32i>
// CIR: %[[EXCEPTION_VALUE:.*]] = cir.const #cir.int<0> : !s32i
// CIR: cir.store{{.*}} %[[EXCEPTION_VALUE]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr<!s32i>
// CIR: cir.throw %[[EXCEPTION_ADDR]] : !cir.ptr<!s32i>, @_ZTIi
// CIR: cir.unreachable
// CIR: ^bb1:
// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i
// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i
// CIR: %[[ADD:.*]] = cir.binop(add, %[[CONST_1]], %[[CONST_2]]) nsw : !s32i

// LLVM: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
// LLVM: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16
// LLVM: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null)

// OGCG: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4)
// OGCG: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16
// OGCG: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null)