diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 7f2e55d14b51c..889d4ee29b538 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -4166,6 +4166,40 @@ def CIR_ThrowOp : CIR_Op<"throw"> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// AllocExceptionOp +//===----------------------------------------------------------------------===// + +def CIR_AllocExceptionOp : CIR_Op<"alloc.exception"> { + let summary = "Allocates an exception according to Itanium ABI"; + let description = [{ + Implements a slightly higher level __cxa_allocate_exception: + + `void *__cxa_allocate_exception(size_t thrown_size);` + + If operation fails, program terminates, not throw. + + Example: + + ```mlir + // if (b == 0) { + // ... + // throw "..."; + cir.if %10 { + %11 = cir.alloc_exception 8 -> !cir.ptr + ... // store exception content into %11 + cir.throw %11 : !cir.ptr>, ... + ``` + }]; + + let arguments = (ins I64Attr:$size); + let results = (outs Res]>:$addr); + + let assemblyFormat = [{ + $size `->` qualified(type($addr)) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // Atomic operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 1dee77425c30d..be66240c280ec 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -113,6 +113,7 @@ class CIRGenCXXABI { CIRGenFunction &cgf) = 0; virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0; + virtual void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) = 0; virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType ty) = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 4d4d10be40024..6deb3d6a8f1e7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -178,3 +178,36 @@ void CIRGenFunction::popCleanupBlocks( popCleanupBlock(); } } + +void CIRGenFunction::deactivateCleanupBlock( + EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP) { + assert(cleanup != ehStack.stable_end() && "deactivating bottom of stack?"); + EHCleanupScope &scope = cast(*ehStack.find(cleanup)); + assert(scope.isActive() && "double deactivation"); + + // If it's the top of the stack, just pop it, but do so only if it belongs + // to the current RunCleanupsScope. + if (cleanup == ehStack.stable_begin() && + currentCleanupStackDepth.strictlyEncloses(cleanup)) { + + // Per comment below, checking EHAsynch is not really necessary + // it's there to assure zero-impact w/o EHAsynch option + if (!scope.isNormalCleanup() && getLangOpts().EHAsynch) { + cgm.errorNYI("deactivateCleanupBlock: EHAsynch & non-normal cleanup"); + return; + } + + // From LLVM: If it's a normal cleanup, we need to pretend that the + // fallthrough is unreachable. + // CIR remarks: LLVM uses an empty insertion point to signal behavior + // change to other codegen paths (triggered by PopCleanupBlock). + // CIRGen doesn't do that yet, but let's mimic just in case. + mlir::OpBuilder::InsertionGuard guard(builder); + builder.clearInsertionPoint(); + popCleanupBlock(); + return; + } + + // Otherwise, follow the general case. + cgm.errorNYI("deactivateCleanupBlock: the general case"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h index a4ec8ccebbd3b..33330a8e17533 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -138,5 +138,13 @@ inline EHScopeStack::iterator EHScopeStack::begin() const { return iterator(startOfData); } +inline EHScopeStack::iterator +EHScopeStack::find(stable_iterator savePoint) const { + assert(savePoint.isValid() && "finding invalid savepoint"); + assert(savePoint.size <= stable_begin().size && + "finding savepoint after pop"); + return iterator(endOfBuffer - savePoint.size); +} + } // namespace clang::CIRGen #endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 7fcb39a2b74c4..645384383711b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -31,11 +31,36 @@ void CIRGenFunction::emitCXXThrowExpr(const CXXThrowExpr *e) { if (throwType->isObjCObjectPointerType()) { cgm.errorNYI("emitCXXThrowExpr ObjCObjectPointerType"); return; - } else { - cgm.errorNYI("emitCXXThrowExpr with subExpr"); - return; } - } else { - cgm.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true); + + cgm.getCXXABI().emitThrow(*this, e); + return; } + + cgm.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true); +} + +void CIRGenFunction::emitAnyExprToExn(const Expr *e, Address addr) { + // Make sure the exception object is cleaned up if there's an + // exception during initialization. + assert(!cir::MissingFeatures::ehCleanupScope()); + + // __cxa_allocate_exception returns a void*; we need to cast this + // to the appropriate type for the object. + mlir::Type ty = convertTypeForMem(e->getType()); + Address typedAddr = addr.withElementType(builder, ty); + + // From LLVM's codegen: + // FIXME: this isn't quite right! If there's a final unelided call + // to a copy constructor, then according to [except.terminate]p1 we + // must call std::terminate() if that constructor throws, because + // technically that copy occurs after the exception expression is + // evaluated but before the exception is caught. But the best way + // to handle that is to teach EmitAggExpr to do the final copy + // differently if it can't be elided. + emitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), + /*isInitializer=*/true); + + // Deactivate the cleanup block. + assert(!cir::MissingFeatures::ehCleanupScope()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index dfd9d2ccf0313..3bb975b1f8c8e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1090,6 +1090,11 @@ class CIRGenFunction : public CIRGenTypeCache { /// even if no aggregate location is provided. RValue emitAnyExprToTemp(const clang::Expr *e); + void emitAnyExprToExn(const Expr *e, Address addr); + + void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, + mlir::Operation *dominatingIP); + void emitArrayDestroy(mlir::Value begin, mlir::Value numElements, QualType elementType, CharUnits elementAlign, Destroyer *destroyer); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index debea8af66b50..6d8e99c28395d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -70,6 +70,7 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { QualType thisTy) override; void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) override; + void emitThrow(CIRGenFunction &cgf, const CXXThrowExpr *e) override; bool useThunkForDtorVariant(const CXXDestructorDecl *dtor, CXXDtorType dt) const override { @@ -1544,6 +1545,60 @@ void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &cgf, bool isNoReturn) { } } +void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &cgf, + const CXXThrowExpr *e) { + // This differs a bit from LLVM codegen, CIR has native operations for some + // cxa functions, and defers allocation size computation, always pass the dtor + // symbol, etc. CIRGen also does not use getAllocateExceptionFn / getThrowFn. + + // Now allocate the exception object. + CIRGenBuilderTy &builder = cgf.getBuilder(); + QualType clangThrowType = e->getSubExpr()->getType(); + cir::PointerType throwTy = + builder.getPointerTo(cgf.convertType(clangThrowType)); + uint64_t typeSize = + cgf.getContext().getTypeSizeInChars(clangThrowType).getQuantity(); + mlir::Location subExprLoc = cgf.getLoc(e->getSubExpr()->getSourceRange()); + + // Defer computing allocation size to some later lowering pass. + mlir::TypedValue exceptionPtr = + cir::AllocExceptionOp::create(builder, subExprLoc, throwTy, + builder.getI64IntegerAttr(typeSize)) + .getAddr(); + + // Build expression and store its result into exceptionPtr. + CharUnits exnAlign = cgf.getContext().getExnObjectAlignment(); + cgf.emitAnyExprToExn(e->getSubExpr(), Address(exceptionPtr, exnAlign)); + + // Get the RTTI symbol address. + auto typeInfo = mlir::dyn_cast_if_present( + cgm.getAddrOfRTTIDescriptor(subExprLoc, clangThrowType, + /*forEH=*/true)); + assert(typeInfo && "expected GlobalViewAttr typeinfo"); + assert(!typeInfo.getIndices() && "expected no indirection"); + + // The address of the destructor. + // + // Note: LLVM codegen already optimizes out the dtor if the + // type is a record with trivial dtor (by passing down a + // null dtor). In CIR, we forward this info and allow for + // Lowering pass to skip passing the trivial function. + // + if (const RecordType *recordTy = clangThrowType->getAs()) { + CXXRecordDecl *rec = + cast(recordTy->getOriginalDecl()->getDefinition()); + assert(!cir::MissingFeatures::isTrivialCtorOrDtor()); + if (!rec->hasTrivialDestructor()) { + cgm.errorNYI("emitThrow: non-trivial destructor"); + return; + } + } + + // Now throw the exception. + mlir::Location loc = cgf.getLoc(e->getSourceRange()); + insertThrowAndSplit(builder, loc, exceptionPtr, typeInfo.getSymbol()); +} + CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) { switch (cgm.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h index c87a6ef9660ad..66c1f76094c58 100644 --- a/clang/lib/CIR/CodeGen/EHScopeStack.h +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -175,6 +175,10 @@ class EHScopeStack { return stable_iterator(endOfBuffer - startOfData); } + /// Turn a stable reference to a scope depth into a unstable pointer + /// to the EH stack. + iterator find(stable_iterator savePoint) const; + /// Create a stable reference to the bottom of the EH stack. static stable_iterator stable_end() { return stable_iterator(0); } }; diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index bfb1262dec1f3..31f522f25d4f9 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2581,22 +2581,69 @@ void createLLVMFuncOpIfNotExist(mlir::ConversionPatternRewriter &rewriter, mlir::LogicalResult CIRToLLVMThrowOpLowering::matchAndRewrite( cir::ThrowOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { - if (op.rethrows()) { - auto voidTy = mlir::LLVM::LLVMVoidType::get(getContext()); - auto funcTy = - mlir::LLVM::LLVMFunctionType::get(getContext(), voidTy, {}, false); + mlir::Location loc = op.getLoc(); + auto voidTy = mlir::LLVM::LLVMVoidType::get(getContext()); - auto mlirModule = op->getParentOfType(); - rewriter.setInsertionPointToStart(&mlirModule.getBodyRegion().front()); + if (op.rethrows()) { + auto funcTy = mlir::LLVM::LLVMFunctionType::get(voidTy, {}); + // Get or create `declare void @__cxa_rethrow()` const llvm::StringRef functionName = "__cxa_rethrow"; createLLVMFuncOpIfNotExist(rewriter, op, functionName, funcTy); - rewriter.setInsertionPointAfter(op.getOperation()); - rewriter.replaceOpWithNewOp( - op, mlir::TypeRange{}, functionName, mlir::ValueRange{}); + auto cxaRethrow = mlir::LLVM::CallOp::create( + rewriter, loc, mlir::TypeRange{}, functionName); + + rewriter.replaceOp(op, cxaRethrow); + return mlir::success(); } + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto fnTy = mlir::LLVM::LLVMFunctionType::get( + voidTy, {llvmPtrTy, llvmPtrTy, llvmPtrTy}); + + // Get or create `declare void @__cxa_throw(ptr, ptr, ptr)` + const llvm::StringRef fnName = "__cxa_throw"; + createLLVMFuncOpIfNotExist(rewriter, op, fnName, fnTy); + + mlir::Value typeInfo = mlir::LLVM::AddressOfOp::create( + rewriter, loc, mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), + adaptor.getTypeInfoAttr()); + + mlir::Value dtor; + if (op.getDtor()) { + dtor = mlir::LLVM::AddressOfOp::create(rewriter, loc, llvmPtrTy, + adaptor.getDtorAttr()); + } else { + dtor = mlir::LLVM::ZeroOp::create(rewriter, loc, llvmPtrTy); + } + + auto cxaThrowCall = mlir::LLVM::CallOp::create( + rewriter, loc, mlir::TypeRange{}, fnName, + mlir::ValueRange{adaptor.getExceptionPtr(), typeInfo, dtor}); + + rewriter.replaceOp(op, cxaThrowCall); + return mlir::success(); +} + +mlir::LogicalResult CIRToLLVMAllocExceptionOpLowering::matchAndRewrite( + cir::AllocExceptionOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + // Get or create `declare ptr @__cxa_allocate_exception(i64)` + StringRef fnName = "__cxa_allocate_exception"; + auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); + auto int64Ty = mlir::IntegerType::get(rewriter.getContext(), 64); + auto fnTy = mlir::LLVM::LLVMFunctionType::get(llvmPtrTy, {int64Ty}); + + createLLVMFuncOpIfNotExist(rewriter, op, fnName, fnTy); + auto exceptionSize = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(), + adaptor.getSizeAttr()); + + auto allocaExceptionCall = mlir::LLVM::CallOp::create( + rewriter, op.getLoc(), mlir::TypeRange{llvmPtrTy}, fnName, + mlir::ValueRange{exceptionSize}); + + rewriter.replaceOp(op, allocaExceptionCall); return mlir::success(); } diff --git a/clang/test/CIR/CodeGen/throws.cpp b/clang/test/CIR/CodeGen/throws.cpp index 0122f3088f0bf..ff6aa62157faa 100644 --- a/clang/test/CIR/CodeGen/throws.cpp +++ b/clang/test/CIR/CodeGen/throws.cpp @@ -5,7 +5,7 @@ // RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fcxx-exceptions -fexceptions -emit-llvm %s -o %t.ll // RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG -void foo() { +void rethrow() { throw; } @@ -18,7 +18,7 @@ void foo() { // OGCG: call void @__cxa_rethrow() // OGCG: unreachable -int foo1(int a, int b) { +int rethrow_from_block(int a, int b) { if (b == 0) throw; return a / b; @@ -83,3 +83,43 @@ int foo1(int a, int b) { // OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4 // OGCG: %[[DIV_A_B:.*]] = sdiv i32 %[[TMP_A]], %[[TMP_B]] // OGCG: ret i32 %[[DIV_A_B]] + +void throw_scalar() { + throw 1; +} + +// CIR: %[[EXCEPTION_ADDR:.*]] = cir.alloc.exception 4 -> !cir.ptr +// CIR: %[[EXCEPTION_VALUE:.*]] = cir.const #cir.int<1> : !s32i +// CIR: cir.store{{.*}} %[[EXCEPTION_VALUE]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr +// CIR: cir.throw %[[EXCEPTION_ADDR]] : !cir.ptr, @_ZTIi +// CIR: cir.unreachable + +// LLVM: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4) +// LLVM: store i32 1, ptr %[[EXCEPTION_ADDR]], align 16 +// LLVM: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null) +// LLVM: unreachable + +// OGCG: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4) +// OGCG: store i32 1, ptr %[[EXCEPTION_ADDR]], align 16 +// OGCG: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null) +// OGCG: unreachable + +void paren_expr() { (throw 0, 1 + 2); } + +// CIR: %[[EXCEPTION_ADDR:.*]] = cir.alloc.exception 4 -> !cir.ptr +// CIR: %[[EXCEPTION_VALUE:.*]] = cir.const #cir.int<0> : !s32i +// CIR: cir.store{{.*}} %[[EXCEPTION_VALUE]], %[[EXCEPTION_ADDR]] : !s32i, !cir.ptr +// CIR: cir.throw %[[EXCEPTION_ADDR]] : !cir.ptr, @_ZTIi +// CIR: cir.unreachable +// CIR: ^bb1: +// CIR: %[[CONST_1:.*]] = cir.const #cir.int<1> : !s32i +// CIR: %[[CONST_2:.*]] = cir.const #cir.int<2> : !s32i +// CIR: %[[ADD:.*]] = cir.binop(add, %[[CONST_1]], %[[CONST_2]]) nsw : !s32i + +// LLVM: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4) +// LLVM: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16 +// LLVM: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null) + +// OGCG: %[[EXCEPTION_ADDR:.*]] = call ptr @__cxa_allocate_exception(i64 4) +// OGCG: store i32 0, ptr %[[EXCEPTION_ADDR]], align 16 +// OGCG: call void @__cxa_throw(ptr %[[EXCEPTION_ADDR]], ptr @_ZTIi, ptr null)