diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp index e6f8e6873004f..5bf48bc22a549 100644 --- a/clang/lib/CodeGen/CGCleanup.cpp +++ b/clang/lib/CodeGen/CGCleanup.cpp @@ -667,7 +667,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // - whether there's a fallthrough llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); - bool HasFallthrough = (FallthroughSource != nullptr && IsActive); + bool HasFallthrough = + FallthroughSource != nullptr && (IsActive || HasExistingBranches); // Branch-through fall-throughs leave the insertion point set to the // end of the last cleanup, which points to the current scope. The @@ -692,7 +693,11 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // If we have a prebranched fallthrough into an inactive normal // cleanup, rewrite it so that it leads to the appropriate place. - if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { + if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && + !RequiresNormalCleanup) { + // FIXME: Come up with a program which would need forwarding prebranched + // fallthrough and add tests. Otherwise delete this and assert against it. + assert(!IsActive); llvm::BasicBlock *prebranchDest; // If the prebranch is semantically branching through the next @@ -765,6 +770,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { EmitSehCppScopeEnd(); } destroyOptimisticNormalEntry(*this, Scope); + Scope.MarkEmitted(); EHStack.popCleanup(); } else { // If we have a fallthrough and no other need for the cleanup, @@ -781,6 +787,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { } destroyOptimisticNormalEntry(*this, Scope); + Scope.MarkEmitted(); EHStack.popCleanup(); EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); @@ -916,6 +923,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { } // IV. Pop the cleanup and emit it. + Scope.MarkEmitted(); EHStack.popCleanup(); assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); diff --git a/clang/lib/CodeGen/CGCleanup.h b/clang/lib/CodeGen/CGCleanup.h index 03e4a29d7b3db..c73c97146abc4 100644 --- a/clang/lib/CodeGen/CGCleanup.h +++ b/clang/lib/CodeGen/CGCleanup.h @@ -16,8 +16,11 @@ #include "EHScopeStack.h" #include "Address.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/IR/Instruction.h" namespace llvm { class BasicBlock; @@ -266,6 +269,51 @@ class alignas(8) EHCleanupScope : public EHScope { }; mutable struct ExtInfo *ExtInfo; + /// Erases auxillary allocas and their usages for an unused cleanup. + /// Cleanups should mark these allocas as 'used' if the cleanup is + /// emitted, otherwise these instructions would be erased. + struct AuxillaryAllocas { + SmallVector AuxAllocas; + bool used = false; + + // Records a potentially unused instruction to be erased later. + void Add(llvm::AllocaInst *Alloca) { AuxAllocas.push_back(Alloca); } + + // Mark all recorded instructions as used. These will not be erased later. + void MarkUsed() { + used = true; + AuxAllocas.clear(); + } + + ~AuxillaryAllocas() { + if (used) + return; + llvm::SetVector Uses; + for (auto *Inst : llvm::reverse(AuxAllocas)) + CollectUses(Inst, Uses); + // Delete uses in the reverse order of insertion. + for (auto *I : llvm::reverse(Uses)) + I->eraseFromParent(); + } + + private: + void CollectUses(llvm::Instruction *I, + llvm::SetVector &Uses) { + if (!I || !Uses.insert(I)) + return; + for (auto *User : I->users()) + CollectUses(cast(User), Uses); + } + }; + mutable struct AuxillaryAllocas *AuxAllocas; + + AuxillaryAllocas &getAuxillaryAllocas() { + if (!AuxAllocas) { + AuxAllocas = new struct AuxillaryAllocas(); + } + return *AuxAllocas; + } + /// The number of fixups required by enclosing scopes (not including /// this one). If this is the top cleanup scope, all the fixups /// from this index onwards belong to this scope. @@ -298,7 +346,7 @@ class alignas(8) EHCleanupScope : public EHScope { EHScopeStack::stable_iterator enclosingEH) : EHScope(EHScope::Cleanup, enclosingEH), EnclosingNormal(enclosingNormal), NormalBlock(nullptr), - ActiveFlag(Address::invalid()), ExtInfo(nullptr), + ActiveFlag(Address::invalid()), ExtInfo(nullptr), AuxAllocas(nullptr), FixupDepth(fixupDepth) { CleanupBits.IsNormalCleanup = isNormal; CleanupBits.IsEHCleanup = isEH; @@ -312,8 +360,15 @@ class alignas(8) EHCleanupScope : public EHScope { } void Destroy() { + if (AuxAllocas) + delete AuxAllocas; delete ExtInfo; } + void AddAuxAllocas(llvm::SmallVector Allocas) { + for (auto *Alloca : Allocas) + getAuxillaryAllocas().Add(Alloca); + } + void MarkEmitted() { getAuxillaryAllocas().MarkUsed(); } // Objects of EHCleanupScope are not destructed. Use Destroy(). ~EHCleanupScope() = delete; diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index 267f2e40a7bba..4f4013292b1fc 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -19,6 +19,7 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" +#include "EHScopeStack.h" #include "PatternInit.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" @@ -2201,6 +2202,24 @@ void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, destroyer, useEHCleanupForArray); } +// Pushes a destroy and defers its deactivation until its +// CleanupDeactivationScope is exited. +void CodeGenFunction::pushDestroyAndDeferDeactivation( + QualType::DestructionKind dtorKind, Address addr, QualType type) { + assert(dtorKind && "cannot push destructor for trivial type"); + + CleanupKind cleanupKind = getCleanupKind(dtorKind); + pushDestroyAndDeferDeactivation( + cleanupKind, addr, type, getDestroyer(dtorKind), cleanupKind & EHCleanup); +} + +void CodeGenFunction::pushDestroyAndDeferDeactivation( + CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, + bool useEHCleanupForArray) { + pushCleanupAndDeferDeactivation( + cleanupKind, addr, type, destroyer, useEHCleanupForArray); +} + void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { EHStack.pushCleanup(Kind, SPMem); } @@ -2217,16 +2236,19 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind, // If we're not in a conditional branch, we don't need to bother generating a // conditional cleanup. if (!isInConditionalBranch()) { - // Push an EH-only cleanup for the object now. // FIXME: When popping normal cleanups, we need to keep this EH cleanup // around in case a temporary's destructor throws an exception. - if (cleanupKind & EHCleanup) - EHStack.pushCleanup( - static_cast(cleanupKind & ~NormalCleanup), addr, type, - destroyer, useEHCleanupForArray); + // Add the cleanup to the EHStack. After the full-expr, this would be + // deactivated before being popped from the stack. + pushDestroyAndDeferDeactivation(cleanupKind, addr, type, destroyer, + useEHCleanupForArray); + + // Since this is lifetime-extended, push it once again to the EHStack after + // the full expression. return pushCleanupAfterFullExprWithActiveFlag( - cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray); + cleanupKind, Address::invalid(), addr, type, destroyer, + useEHCleanupForArray); } // Otherwise, we should only destroy the object if it's been initialized. @@ -2241,13 +2263,12 @@ void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind, Address ActiveFlag = createCleanupActiveFlag(); SavedType SavedAddr = saveValueInCond(addr); - if (cleanupKind & EHCleanup) { - EHStack.pushCleanup( - static_cast(cleanupKind & ~NormalCleanup), SavedAddr, type, - destroyer, useEHCleanupForArray); - initFullExprCleanupWithFlag(ActiveFlag); - } + pushCleanupAndDeferDeactivation( + cleanupKind, SavedAddr, type, destroyer, useEHCleanupForArray); + initFullExprCleanupWithFlag(ActiveFlag); + // Since this is lifetime-extended, push it once again to the EHStack after + // the full expression. pushCleanupAfterFullExprWithActiveFlag( cleanupKind, ActiveFlag, SavedAddr, type, destroyer, useEHCleanupForArray); @@ -2442,9 +2463,9 @@ namespace { }; } // end anonymous namespace -/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy -/// already-constructed elements of the given array. The cleanup -/// may be popped with DeactivateCleanupBlock or PopCleanupBlock. +/// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to +/// destroy already-constructed elements of the given array. The cleanup may be +/// popped with DeactivateCleanupBlock or PopCleanupBlock. /// /// \param elementType - the immediate element type of the array; /// possibly still an array type @@ -2453,10 +2474,9 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, QualType elementType, CharUnits elementAlign, Destroyer *destroyer) { - pushFullExprCleanup(EHCleanup, - arrayBegin, arrayEndPointer, - elementType, elementAlign, - destroyer); + pushFullExprCleanup( + NormalAndEHCleanup, arrayBegin, arrayEndPointer, elementType, + elementAlign, destroyer); } /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index cf696a1c9f560..c85a339f5e3f8 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -115,10 +115,16 @@ RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, const Twine &Name, llvm::Value *ArraySize) { + llvm::AllocaInst *Alloca; if (ArraySize) - return Builder.CreateAlloca(Ty, ArraySize, Name); - return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), - ArraySize, Name, AllocaInsertPt); + Alloca = Builder.CreateAlloca(Ty, ArraySize, Name); + else + Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), + ArraySize, Name, AllocaInsertPt); + if (Allocas) { + Allocas->Add(Alloca); + } + return Alloca; } /// CreateDefaultAlignTempAlloca - This creates an alloca with the diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 1b9287ea23934..560a9e2c5ead5 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -15,6 +15,7 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" +#include "EHScopeStack.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" @@ -24,6 +25,7 @@ #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instruction.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" using namespace clang; @@ -558,24 +560,27 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, // For that, we'll need an EH cleanup. QualType::DestructionKind dtorKind = elementType.isDestructedType(); Address endOfInit = Address::invalid(); - EHScopeStack::stable_iterator cleanup; - llvm::Instruction *cleanupDominator = nullptr; - if (CGF.needsEHCleanup(dtorKind)) { + CodeGenFunction::CleanupDeactivationScope deactivation(CGF); + + if (dtorKind) { + CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF); // In principle we could tell the cleanup where we are more // directly, but the control flow can get so varied here that it // would actually be quite complex. Therefore we go through an // alloca. + llvm::Instruction *dominatingIP = + Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy)); endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(), "arrayinit.endOfInit"); - cleanupDominator = Builder.CreateStore(begin, endOfInit); + Builder.CreateStore(begin, endOfInit); CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, elementAlign, CGF.getDestroyer(dtorKind)); - cleanup = CGF.EHStack.stable_begin(); + cast(*CGF.EHStack.find(CGF.EHStack.stable_begin())) + .AddAuxAllocas(allocaTracker.Take()); - // Otherwise, remember that we didn't need a cleanup. - } else { - dtorKind = QualType::DK_none; + CGF.DeferredDeactivationCleanupStack.push_back( + {CGF.EHStack.stable_begin(), dominatingIP}); } llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); @@ -671,9 +676,6 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, CGF.EmitBlock(endBB); } - - // Leave the partial-array cleanup if we entered one. - if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); } //===----------------------------------------------------------------------===// @@ -1374,9 +1376,8 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType()); // We'll need to enter cleanup scopes in case any of the element - // initializers throws an exception. - SmallVector Cleanups; - llvm::Instruction *CleanupDominator = nullptr; + // initializers throws an exception or contains branch out of the expressions. + CodeGenFunction::CleanupDeactivationScope scope(CGF); CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), @@ -1395,28 +1396,12 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { if (QualType::DestructionKind DtorKind = CurField->getType().isDestructedType()) { assert(LV.isSimple()); - if (CGF.needsEHCleanup(DtorKind)) { - if (!CleanupDominator) - CleanupDominator = CGF.Builder.CreateAlignedLoad( - CGF.Int8Ty, - llvm::Constant::getNullValue(CGF.Int8PtrTy), - CharUnits::One()); // placeholder - - CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(), - CGF.getDestroyer(DtorKind), false); - Cleanups.push_back(CGF.EHStack.stable_begin()); - } + if (DtorKind) + CGF.pushDestroyAndDeferDeactivation( + NormalAndEHCleanup, LV.getAddress(CGF), CurField->getType(), + CGF.getDestroyer(DtorKind), false); } } - - // Deactivate all the partial cleanups in reverse order, which - // generally means popping them. - for (unsigned i = Cleanups.size(); i != 0; --i) - CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator); - - // Destroy the placeholder if we made one. - if (CleanupDominator) - CleanupDominator->eraseFromParent(); } void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { @@ -1705,14 +1690,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // We'll need to enter cleanup scopes in case any of the element // initializers throws an exception. SmallVector cleanups; - llvm::Instruction *cleanupDominator = nullptr; - auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) { - cleanups.push_back(cleanup); - if (!cleanupDominator) // create placeholder once needed - cleanupDominator = CGF.Builder.CreateAlignedLoad( - CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy), - CharUnits::One()); - }; + CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF); unsigned curInitIndex = 0; @@ -1735,10 +1713,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot); if (QualType::DestructionKind dtorKind = - Base.getType().isDestructedType()) { - CGF.pushDestroy(dtorKind, V, Base.getType()); - addCleanup(CGF.EHStack.stable_begin()); - } + Base.getType().isDestructedType()) + CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType()); } } @@ -1813,10 +1789,10 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( if (QualType::DestructionKind dtorKind = field->getType().isDestructedType()) { assert(LV.isSimple()); - if (CGF.needsEHCleanup(dtorKind)) { - CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(), - CGF.getDestroyer(dtorKind), false); - addCleanup(CGF.EHStack.stable_begin()); + if (dtorKind) { + CGF.pushDestroyAndDeferDeactivation( + NormalAndEHCleanup, LV.getAddress(CGF), field->getType(), + CGF.getDestroyer(dtorKind), false); pushedCleanup = true; } } @@ -1829,17 +1805,6 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( if (GEP->use_empty()) GEP->eraseFromParent(); } - - // Deactivate all the partial cleanups in reverse order, which - // generally means popping them. - assert((cleanupDominator || cleanups.empty()) && - "Missing cleanupDominator before deactivating cleanup blocks"); - for (unsigned i = cleanups.size(); i != 0; --i) - CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); - - // Destroy the placeholder if we made one. - if (cleanupDominator) - cleanupDominator->eraseFromParent(); } void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index a4fb673284cec..a88b29b326bb9 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -1008,8 +1008,8 @@ void CodeGenFunction::EmitNewArrayInitializer( const Expr *Init = E->getInitializer(); Address EndOfInit = Address::invalid(); QualType::DestructionKind DtorKind = ElementType.isDestructedType(); - EHScopeStack::stable_iterator Cleanup; - llvm::Instruction *CleanupDominator = nullptr; + CleanupDeactivationScope deactivation(*this); + bool pushedCleanup = false; CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); CharUnits ElementAlign = @@ -1105,19 +1105,24 @@ void CodeGenFunction::EmitNewArrayInitializer( } // Enter a partial-destruction Cleanup if necessary. - if (needsEHCleanup(DtorKind)) { + if (DtorKind) { + AllocaTrackerRAII AllocaTracker(*this); // In principle we could tell the Cleanup where we are more // directly, but the control flow can get so varied here that it // would actually be quite complex. Therefore we go through an // alloca. + llvm::Instruction *DominatingIP = + Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy)); EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(), "array.init.end"); - CleanupDominator = - Builder.CreateStore(BeginPtr.emitRawPointer(*this), EndOfInit); pushIrregularPartialArrayCleanup(BeginPtr.emitRawPointer(*this), EndOfInit, ElementType, ElementAlign, getDestroyer(DtorKind)); - Cleanup = EHStack.stable_begin(); + cast(*EHStack.find(EHStack.stable_begin())) + .AddAuxAllocas(AllocaTracker.Take()); + DeferredDeactivationCleanupStack.push_back( + {EHStack.stable_begin(), DominatingIP}); + pushedCleanup = true; } CharUnits StartAlign = CurPtr.getAlignment(); @@ -1164,9 +1169,6 @@ void CodeGenFunction::EmitNewArrayInitializer( // initialization. llvm::ConstantInt *ConstNum = dyn_cast(NumElements); if (ConstNum && ConstNum->getZExtValue() <= InitListElements) { - // If there was a Cleanup, deactivate it. - if (CleanupDominator) - DeactivateCleanupBlock(Cleanup, CleanupDominator); return; } @@ -1281,13 +1283,14 @@ void CodeGenFunction::EmitNewArrayInitializer( Builder.CreateStore(CurPtr.emitRawPointer(*this), EndOfInit); // Enter a partial-destruction Cleanup if necessary. - if (!CleanupDominator && needsEHCleanup(DtorKind)) { - llvm::Value *BeginPtrRaw = BeginPtr.emitRawPointer(*this); - llvm::Value *CurPtrRaw = CurPtr.emitRawPointer(*this); - pushRegularPartialArrayCleanup(BeginPtrRaw, CurPtrRaw, ElementType, + if (!pushedCleanup && needsEHCleanup(DtorKind)) { + llvm::Instruction *DominatingIP = + Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy)); + pushRegularPartialArrayCleanup(BeginPtr.emitRawPointer(*this), + CurPtr.emitRawPointer(*this), ElementType, ElementAlign, getDestroyer(DtorKind)); - Cleanup = EHStack.stable_begin(); - CleanupDominator = Builder.CreateUnreachable(); + DeferredDeactivationCleanupStack.push_back( + {EHStack.stable_begin(), DominatingIP}); } // Emit the initializer into this element. @@ -1295,10 +1298,7 @@ void CodeGenFunction::EmitNewArrayInitializer( AggValueSlot::DoesNotOverlap); // Leave the Cleanup if we entered one. - if (CleanupDominator) { - DeactivateCleanupBlock(Cleanup, CleanupDominator); - CleanupDominator->eraseFromParent(); - } + deactivation.ForceDeactivate(); // Advance to the next element by adjusting the pointer type as necessary. llvm::Value *NextPtr = Builder.CreateConstInBoundsGEP1_32( diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 6474d6c8c1d1e..a2d746bb8f4f9 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -91,6 +91,8 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) CodeGenFunction::~CodeGenFunction() { assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); + assert(DeferredDeactivationCleanupStack.empty() && + "missed to deactivate a cleanup"); if (getLangOpts().OpenMP && CurFn) CGM.getOpenMPRuntime().functionFinished(*this); @@ -346,6 +348,10 @@ static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { assert(BreakContinueStack.empty() && "mismatched push/pop in break/continue stack!"); + assert(LifetimeExtendedCleanupStack.empty() && + "mismatched push/pop of cleanups in EHStack!"); + assert(DeferredDeactivationCleanupStack.empty() && + "mismatched activate/deactivate of cleanups!"); bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 && NumSimpleReturnExprs == NumReturnExprs diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index ff1873325d409..c49e9fd00c8d3 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -39,6 +39,7 @@ #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Utils/SanitizerStats.h" @@ -670,6 +671,51 @@ class CodeGenFunction : public CodeGenTypeCache { EHScopeStack EHStack; llvm::SmallVector LifetimeExtendedCleanupStack; + + // A stack of cleanups which were added to EHStack but have to be deactivated + // later before being popped or emitted. These are usually deactivated on + // exiting a `CleanupDeactivationScope` scope. For instance, after a + // full-expr. + // + // These are specially useful for correctly emitting cleanups while + // encountering branches out of expression (through stmt-expr or coroutine + // suspensions). + struct DeferredDeactivateCleanup { + EHScopeStack::stable_iterator Cleanup; + llvm::Instruction *DominatingIP; + }; + llvm::SmallVector DeferredDeactivationCleanupStack; + + // Enters a new scope for capturing cleanups which are deferred to be + // deactivated, all of which will be deactivated once the scope is exited. + struct CleanupDeactivationScope { + CodeGenFunction &CGF; + size_t OldDeactivateCleanupStackSize; + bool Deactivated; + CleanupDeactivationScope(CodeGenFunction &CGF) + : CGF(CGF), OldDeactivateCleanupStackSize( + CGF.DeferredDeactivationCleanupStack.size()), + Deactivated(false) {} + + void ForceDeactivate() { + assert(!Deactivated && "Deactivating already deactivated scope"); + auto &Stack = CGF.DeferredDeactivationCleanupStack; + for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) { + CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup, + Stack[I - 1].DominatingIP); + Stack[I - 1].DominatingIP->eraseFromParent(); + } + Stack.resize(OldDeactivateCleanupStackSize); + Deactivated = true; + } + + ~CleanupDeactivationScope() { + if (Deactivated) + return; + ForceDeactivate(); + } + }; + llvm::SmallVector SEHTryEpilogueStack; llvm::Instruction *CurrentFuncletPad = nullptr; @@ -875,6 +921,19 @@ class CodeGenFunction : public CodeGenTypeCache { new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag); } + // Push a cleanup onto EHStack and deactivate it later. It is usually + // deactivated when exiting a `CleanupDeactivationScope` (for example: after a + // full expression). + template + void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A) { + // Placeholder dominating IP for this cleanup. + llvm::Instruction *DominatingIP = + Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy)); + EHStack.pushCleanup(Kind, A...); + DeferredDeactivationCleanupStack.push_back( + {EHStack.stable_begin(), DominatingIP}); + } + /// Set up the last cleanup that was pushed as a conditional /// full-expression cleanup. void initFullExprCleanup() { @@ -926,6 +985,7 @@ class CodeGenFunction : public CodeGenTypeCache { class RunCleanupsScope { EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth; size_t LifetimeExtendedCleanupStackSize; + CleanupDeactivationScope DeactivateCleanups; bool OldDidCallStackSave; protected: bool PerformCleanup; @@ -940,8 +1000,7 @@ class CodeGenFunction : public CodeGenTypeCache { public: /// Enter a new cleanup scope. explicit RunCleanupsScope(CodeGenFunction &CGF) - : PerformCleanup(true), CGF(CGF) - { + : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) { CleanupStackDepth = CGF.EHStack.stable_begin(); LifetimeExtendedCleanupStackSize = CGF.LifetimeExtendedCleanupStack.size(); @@ -971,6 +1030,7 @@ class CodeGenFunction : public CodeGenTypeCache { void ForceCleanup(std::initializer_list ValuesToReload = {}) { assert(PerformCleanup && "Already forced cleanup"); CGF.DidCallStackSave = OldDidCallStackSave; + DeactivateCleanups.ForceDeactivate(); CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, ValuesToReload); PerformCleanup = false; @@ -2160,6 +2220,11 @@ class CodeGenFunction : public CodeGenTypeCache { Address addr, QualType type); void pushDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); + void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, + Address addr, QualType type); + void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, + QualType type, Destroyer *destroyer, + bool useEHCleanupForArray); void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); @@ -2698,6 +2763,33 @@ class CodeGenFunction : public CodeGenTypeCache { TBAAAccessInfo *TBAAInfo = nullptr); LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy); +private: + struct AllocaTracker { + void Add(llvm::AllocaInst *I) { Allocas.push_back(I); } + llvm::SmallVector Take() { return std::move(Allocas); } + + private: + llvm::SmallVector Allocas; + }; + AllocaTracker *Allocas = nullptr; + +public: + // Captures all the allocas created during the scope of its RAII object. + struct AllocaTrackerRAII { + AllocaTrackerRAII(CodeGenFunction &CGF) + : CGF(CGF), OldTracker(CGF.Allocas) { + CGF.Allocas = &Tracker; + } + ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; } + + llvm::SmallVector Take() { return Tracker.Take(); } + + private: + CodeGenFunction &CGF; + AllocaTracker *OldTracker; + AllocaTracker Tracker; + }; + /// CreateTempAlloca - This creates an alloca and inserts it into the entry /// block if \p ArraySize is nullptr, otherwise inserts it at the current /// insertion point of the builder. The caller is responsible for setting an diff --git a/clang/test/CodeGenCXX/control-flow-in-stmt-expr.cpp b/clang/test/CodeGenCXX/control-flow-in-stmt-expr.cpp new file mode 100644 index 0000000000000..ffde1bd6a724d --- /dev/null +++ b/clang/test/CodeGenCXX/control-flow-in-stmt-expr.cpp @@ -0,0 +1,364 @@ +// RUN: %clang_cc1 --std=c++20 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s + +struct Printy { + Printy(const char *name) : name(name) {} + ~Printy() {} + const char *name; +}; + +int foo() { return 2; } + +struct Printies { + Printy a; + Printy b; + Printy c; +}; + +void ParenInit() { + // CHECK-LABEL: define dso_local void @_Z9ParenInitv() + // CHECK: [[CLEANUP_DEST:%.+]] = alloca i32, align 4 + Printies ps(Printy("a"), + // CHECK: call void @_ZN6PrintyC1EPKc + ({ + if (foo()) return; + // CHECK: if.then: + // CHECK-NEXT: store i32 1, ptr [[CLEANUP_DEST]], align 4 + // CHECK-NEXT: br label %cleanup + Printy("b"); + // CHECK: if.end: + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + }), + ({ + if (foo()) return; + // CHECK: if.then{{.*}}: + // CHECK-NEXT: store i32 1, ptr [[CLEANUP_DEST]], align 4 + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: br label %cleanup + Printy("c"); + // CHECK: if.end{{.*}}: + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: call void @_ZN8PrintiesD1Ev + // CHECK-NEXT: br label %return + })); + // CHECK: cleanup: + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: br label %return +} + +void break_in_stmt_expr() { + // Verify that the "break" in "if.then".calls dtor before jumping to "for.end". + + // CHECK-LABEL: define dso_local void @_Z18break_in_stmt_exprv() + Printies p{Printy("a"), + // CHECK: call void @_ZN6PrintyC1EPKc + ({ + for (;;) { + Printies ps{ + Printy("b"), + // CHECK: for.cond: + // CHECK: call void @_ZN6PrintyC1EPKc + ({ + if (foo()) { + break; + // CHECK: if.then: + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: br label %for.end + } + Printy("c"); + // CHECK: if.end: + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + }), + Printy("d")}; + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: call void @_ZN8PrintiesD1Ev + // CHECK-NEXT: br label %for.cond + } + Printy("e"); + // CHECK: for.end: + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + }), + Printy("f")}; + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: call void @_ZN8PrintiesD1Ev +} + +void goto_in_stmt_expr() { + // Verify that: + // - correct branch fixups for deactivated normal cleanups are generated correctly. + + // CHECK-LABEL: define dso_local void @_Z17goto_in_stmt_exprv() + // CHECK: [[CLEANUP_DEST_SLOT:%cleanup.dest.slot.*]] = alloca i32, align 4 + { + Printies p1{Printy("a"), // CHECK: call void @_ZN6PrintyC1EPKc + ({ + { + Printies p2{Printy("b"), + // CHECK: call void @_ZN6PrintyC1EPKc + ({ + if (foo() == 1) { + goto in; + // CHECK: if.then: + // CHECK-NEXT: store i32 2, ptr [[CLEANUP_DEST_SLOT]], align 4 + // CHECK-NEXT: br label %[[CLEANUP1:.+]] + } + if (foo() == 2) { + goto out; + // CHECK: if.then{{.*}}: + // CHECK-NEXT: store i32 3, ptr [[CLEANUP_DEST_SLOT]], align 4 + // CHECK-NEXT: br label %[[CLEANUP1]] + } + Printy("c"); + // CHECK: if.end{{.*}}: + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + }), + Printy("d")}; + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: call void @_ZN8PrintiesD1Ev + // CHECK-NEXT: br label %in + + } + in: + Printy("e"); + // CHECK: in: ; preds = %if.end{{.*}}, %[[CLEANUP1]] + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + }), + Printy("f")}; + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: call void @_ZN8PrintiesD1Ev + // CHECK-NEXT: br label %out + } +out: + return; + // CHECK: out: + // CHECK-NEXT: ret void + + // CHECK: [[CLEANUP1]]: ; preds = %if.then{{.*}}, %if.then + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: %cleanup.dest = load i32, ptr [[CLEANUP_DEST_SLOT]], align 4 + // CHECK-NEXT: switch i32 %cleanup.dest, label %[[CLEANUP2:.+]] [ + // CHECK-NEXT: i32 2, label %in + // CHECK-NEXT: ] + + // CHECK: [[CLEANUP2]]: ; preds = %[[CLEANUP1]] + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: %cleanup.dest{{.*}} = load i32, ptr [[CLEANUP_DEST_SLOT]], align 4 + // CHECK-NEXT: switch i32 %cleanup.dest{{.*}}, label %unreachable [ + // CHECK-NEXT: i32 3, label %out + // CHECK-NEXT: ] +} + +void ArrayInit() { + // Printy arr[4] = {ctorA, ctorB, stmt-exprC, stmt-exprD}; + // Verify that: + // - We do the necessary stores for array cleanups (endOfInit and last constructed element). + // - We update the array init element correctly for ctorA, ctorB and stmt-exprC. + // - stmt-exprC and stmt-exprD share the array body dtor code (see %cleanup). + + // CHECK-LABEL: define dso_local void @_Z9ArrayInitv() + // CHECK: %arrayinit.endOfInit = alloca ptr, align 8 + // CHECK: %cleanup.dest.slot = alloca i32, align 4 + // CHECK: %arrayinit.begin = getelementptr inbounds [4 x %struct.Printy], ptr %arr, i64 0, i64 0 + // CHECK: store ptr %arrayinit.begin, ptr %arrayinit.endOfInit, align 8 + Printy arr[4] = { + Printy("a"), + // CHECK: call void @_ZN6PrintyC1EPKc(ptr noundef nonnull align 8 dereferenceable(8) %arrayinit.begin, ptr noundef @.str) + // CHECK: [[ARRAYINIT_ELEMENT1:%.+]] = getelementptr inbounds %struct.Printy, ptr %arrayinit.begin, i64 1 + // CHECK: store ptr [[ARRAYINIT_ELEMENT1]], ptr %arrayinit.endOfInit, align 8 + Printy("b"), + // CHECK: call void @_ZN6PrintyC1EPKc(ptr noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_ELEMENT1]], ptr noundef @.str.1) + // CHECK: [[ARRAYINIT_ELEMENT2:%.+]] = getelementptr inbounds %struct.Printy, ptr [[ARRAYINIT_ELEMENT1]], i64 1 + // CHECK: store ptr [[ARRAYINIT_ELEMENT2]], ptr %arrayinit.endOfInit, align 8 + ({ + // CHECK: br i1 {{.*}}, label %if.then, label %if.end + if (foo()) { + return; + // CHECK: if.then: + // CHECK-NEXT: store i32 1, ptr %cleanup.dest.slot, align 4 + // CHECK-NEXT: br label %cleanup + } + // CHECK: if.end: + Printy("c"); + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: %arrayinit.element2 = getelementptr inbounds %struct.Printy, ptr %arrayinit.element1, i64 1 + // CHECK-NEXT: store ptr %arrayinit.element2, ptr %arrayinit.endOfInit, align 8 + }), + ({ + // CHECK: br i1 {{%.+}} label %[[IF_THEN2:.+]], label %[[IF_END2:.+]] + if (foo()) { + return; + // CHECK: [[IF_THEN2]]: + // CHECK-NEXT: store i32 1, ptr %cleanup.dest.slot, align 4 + // CHECK-NEXT: br label %cleanup + } + // CHECK: [[IF_END2]]: + Printy("d"); + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: %array.begin = getelementptr inbounds [4 x %struct.Printy], ptr %arr, i32 0, i32 0 + // CHECK-NEXT: %0 = getelementptr inbounds %struct.Printy, ptr %array.begin, i64 4 + // CHECK-NEXT: br label %[[ARRAY_DESTROY_BODY1:.+]] + }), + }; + + // CHECK: [[ARRAY_DESTROY_BODY1]]: + // CHECK-NEXT: %arraydestroy.elementPast{{.*}} = phi ptr [ %0, %[[IF_END2]] ], [ %arraydestroy.element{{.*}}, %[[ARRAY_DESTROY_BODY1]] ] + // CHECK-NEXT: %arraydestroy.element{{.*}} = getelementptr inbounds %struct.Printy, ptr %arraydestroy.elementPast{{.*}}, i64 -1 + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: %arraydestroy.done{{.*}} = icmp eq ptr %arraydestroy.element{{.*}}, %array.begin + // CHECK-NEXT: br i1 %arraydestroy.done{{.*}}, label %[[ARRAY_DESTROY_DONE1:.+]], label %[[ARRAY_DESTROY_BODY1]] + + // CHECK: [[ARRAY_DESTROY_DONE1]]: + // CHECK-NEXT: ret void + + // CHECK: cleanup: + // CHECK-NEXT: %1 = load ptr, ptr %arrayinit.endOfInit, align 8 + // CHECK-NEXT: %arraydestroy.isempty = icmp eq ptr %arrayinit.begin, %1 + // CHECK-NEXT: br i1 %arraydestroy.isempty, label %[[ARRAY_DESTROY_DONE2:.+]], label %[[ARRAY_DESTROY_BODY2:.+]] + + // CHECK: [[ARRAY_DESTROY_BODY2]]: + // CHECK-NEXT: %arraydestroy.elementPast = phi ptr [ %1, %cleanup ], [ %arraydestroy.element, %[[ARRAY_DESTROY_BODY2]] ] + // CHECK-NEXT: %arraydestroy.element = getelementptr inbounds %struct.Printy, ptr %arraydestroy.elementPast, i64 -1 + // CHECK-NEXT: call void @_ZN6PrintyD1Ev(ptr noundef nonnull align 8 dereferenceable(8) %arraydestroy.element) + // CHECK-NEXT: %arraydestroy.done = icmp eq ptr %arraydestroy.element, %arrayinit.begin + // CHECK-NEXT: br i1 %arraydestroy.done, label %[[ARRAY_DESTROY_DONE2]], label %[[ARRAY_DESTROY_BODY2]] + + // CHECK: [[ARRAY_DESTROY_DONE2]]: + // CHECK-NEXT: br label %[[ARRAY_DESTROY_DONE1]] +} + +void ArraySubobjects() { + struct S { + Printy arr1[2]; + Printy arr2[2]; + Printy p; + }; + // CHECK-LABEL: define dso_local void @_Z15ArraySubobjectsv() + // CHECK: %arrayinit.endOfInit = alloca ptr, align 8 + S s{{Printy("a"), Printy("b")}, + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK: call void @_ZN6PrintyC1EPKc + {Printy("a"), + // CHECK: [[ARRAYINIT_BEGIN:%.+]] = getelementptr inbounds [2 x %struct.Printy] + // CHECK: store ptr [[ARRAYINIT_BEGIN]], ptr %arrayinit.endOfInit, align 8 + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK: [[ARRAYINIT_ELEMENT:%.+]] = getelementptr inbounds %struct.Printy + // CHECK: store ptr [[ARRAYINIT_ELEMENT]], ptr %arrayinit.endOfInit, align 8 + ({ + if (foo()) { + return; + // CHECK: if.then: + // CHECK-NEXT: [[V0:%.+]] = load ptr, ptr %arrayinit.endOfInit, align 8 + // CHECK-NEXT: %arraydestroy.isempty = icmp eq ptr [[ARRAYINIT_BEGIN]], [[V0]] + // CHECK-NEXT: br i1 %arraydestroy.isempty, label %[[ARRAY_DESTROY_DONE:.+]], label %[[ARRAY_DESTROY_BODY:.+]] + } + Printy("b"); + }) + }, + Printy("c") + // CHECK: if.end: + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK-NEXT: call void @_ZZ15ArraySubobjectsvEN1SD1Ev + // CHECK-NEXT: br label %return + }; + // CHECK: return: + // CHECK-NEXT: ret void + + // CHECK: [[ARRAY_DESTROY_BODY]]: + // CHECK-NEXT: %arraydestroy.elementPast = phi ptr [ %0, %if.then ], [ %arraydestroy.element, %[[ARRAY_DESTROY_BODY]] ] + // CHECK-NEXT: %arraydestroy.element = getelementptr inbounds %struct.Printy, ptr %arraydestroy.elementPast, i64 -1 + // CHECK-NEXT: call void @_ZN6PrintyD1Ev(ptr noundef nonnull align 8 dereferenceable(8) %arraydestroy.element) + // CHECK-NEXT: %arraydestroy.done = icmp eq ptr %arraydestroy.element, [[ARRAYINIT_BEGIN]] + // CHECK-NEXT: br i1 %arraydestroy.done, label %[[ARRAY_DESTROY_DONE]], label %[[ARRAY_DESTROY_BODY]] + + // CHECK: [[ARRAY_DESTROY_DONE]] + // CHECK-NEXT: [[ARRAY_BEGIN:%.+]] = getelementptr inbounds [2 x %struct.Printy], ptr %arr1, i32 0, i32 0 + // CHECK-NEXT: [[V1:%.+]] = getelementptr inbounds %struct.Printy, ptr [[ARRAY_BEGIN]], i64 2 + // CHECK-NEXT: br label %[[ARRAY_DESTROY_BODY2:.+]] + + // CHECK: [[ARRAY_DESTROY_BODY2]]: + // CHECK-NEXT: %arraydestroy.elementPast5 = phi ptr [ %1, %[[ARRAY_DESTROY_DONE]] ], [ %arraydestroy.element6, %[[ARRAY_DESTROY_BODY2]] ] + // CHECK-NEXT: %arraydestroy.element6 = getelementptr inbounds %struct.Printy, ptr %arraydestroy.elementPast5, i64 -1 + // CHECK-NEXT: call void @_ZN6PrintyD1Ev(ptr noundef nonnull align 8 dereferenceable(8) %arraydestroy.element6) + // CHECK-NEXT: %arraydestroy.done7 = icmp eq ptr %arraydestroy.element6, [[ARRAY_BEGIN]] + // CHECK-NEXT: br i1 %arraydestroy.done7, label %[[ARRAY_DESTROY_DONE2:.+]], label %[[ARRAY_DESTROY_BODY2]] + + + // CHECK: [[ARRAY_DESTROY_DONE2]]: + // CHECK-NEXT: br label %return +} + +void LambdaInit() { + // CHECK-LABEL: define dso_local void @_Z10LambdaInitv() + auto S = [a = Printy("a"), b = ({ + if (foo()) { + return; + // CHECK: if.then: + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: br label %return + } + Printy("b"); + })]() { return a; }; +} + +void LifetimeExtended() { + // CHECK-LABEL: define dso_local void @_Z16LifetimeExtendedv + struct PrintyRefBind { + const Printy &a; + const Printy &b; + }; + PrintyRefBind ps = {Printy("a"), ({ + if (foo()) { + return; + // CHECK: if.then: + // CHECK-NEXT: call void @_ZN6PrintyD1Ev + // CHECK-NEXT: br label %return + } + Printy("b"); + })}; +} + +void NewArrayInit() { + // CHECK-LABEL: define dso_local void @_Z12NewArrayInitv() + // CHECK: %array.init.end = alloca ptr, align 8 + // CHECK: store ptr %0, ptr %array.init.end, align 8 + Printy *array = new Printy[3]{ + "a", + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK: store ptr %array.exp.next, ptr %array.init.end, align 8 + "b", + // CHECK: call void @_ZN6PrintyC1EPKc + // CHECK: store ptr %array.exp.next1, ptr %array.init.end, align 8 + ({ + if (foo()) { + return; + // CHECK: if.then: + // CHECK: br i1 %arraydestroy.isempty, label %arraydestroy.done{{.*}}, label %arraydestroy.body + } + "b"; + // CHECK: if.end: + // CHECK: call void @_ZN6PrintyC1EPKc + })}; + // CHECK: arraydestroy.body: + // CHECK-NEXT: %arraydestroy.elementPast = phi ptr [ %{{.*}}, %if.then ], [ %arraydestroy.element, %arraydestroy.body ] + // CHECK-NEXT: %arraydestroy.element = getelementptr inbounds %struct.Printy, ptr %arraydestroy.elementPast, i64 -1 + // CHECK-NEXT: call void @_ZN6PrintyD1Ev(ptr noundef nonnull align 8 dereferenceable(8) %arraydestroy.element) + // CHECK-NEXT: %arraydestroy.done = icmp eq ptr %arraydestroy.element, %0 + // CHECK-NEXT: br i1 %arraydestroy.done, label %arraydestroy.done{{.*}}, label %arraydestroy.body + + // CHECK: arraydestroy.done{{.*}}: ; preds = %arraydestroy.body, %if.then + // CHECK-NEXT: br label %return +} + +void ArrayInitWithContinue() { + // CHECK-LABEL: @_Z21ArrayInitWithContinuev + // Verify that we start to emit the array destructor. + // CHECK: %arrayinit.endOfInit = alloca ptr, align 8 + for (int i = 0; i < 1; ++i) { + Printy arr[2] = {"a", ({ + if (foo()) { + continue; + } + "b"; + })}; + } +} diff --git a/clang/test/CodeGenCoroutines/coro-suspend-cleanups.cpp b/clang/test/CodeGenCoroutines/coro-suspend-cleanups.cpp new file mode 100644 index 0000000000000..06cc2069dbe9a --- /dev/null +++ b/clang/test/CodeGenCoroutines/coro-suspend-cleanups.cpp @@ -0,0 +1,93 @@ +// RUN: %clang_cc1 --std=c++20 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s + +#include "Inputs/coroutine.h" + +struct Printy { + Printy(const char *name) : name(name) {} + ~Printy() {} + const char *name; +}; + +struct coroutine { + struct promise_type; + std::coroutine_handle handle; + ~coroutine() { + if (handle) handle.destroy(); + } +}; + +struct coroutine::promise_type { + coroutine get_return_object() { + return {std::coroutine_handle::from_promise(*this)}; + } + std::suspend_never initial_suspend() noexcept { return {}; } + std::suspend_always final_suspend() noexcept { return {}; } + void return_void() {} + void unhandled_exception() {} +}; + +struct Awaiter : std::suspend_always { + Printy await_resume() { return {"awaited"}; } +}; + +int foo() { return 2; } + +coroutine ArrayInitCoro() { + // Verify that: + // - We do the necessary stores for array cleanups. + // - Array cleanups are called by await.cleanup. + // - We activate the cleanup after the first element and deactivate it in await.ready (see cleanup.isactive). + + // CHECK-LABEL: define dso_local void @_Z13ArrayInitCorov + // CHECK: %arrayinit.endOfInit = alloca ptr, align 8 + // CHECK: %cleanup.isactive = alloca i1, align 1 + Printy arr[2] = { + Printy("a"), + // CHECK: %arrayinit.begin = getelementptr inbounds [2 x %struct.Printy], ptr %arr.reload.addr, i64 0, i64 0 + // CHECK-NEXT: %arrayinit.begin.spill.addr = getelementptr inbounds %_Z13ArrayInitCorov.Frame, ptr %0, i32 0, i32 10 + // CHECK-NEXT: store ptr %arrayinit.begin, ptr %arrayinit.begin.spill.addr, align 8 + // CHECK-NEXT: store i1 true, ptr %cleanup.isactive.reload.addr, align 1 + // CHECK-NEXT: store ptr %arrayinit.begin, ptr %arrayinit.endOfInit.reload.addr, align 8 + // CHECK-NEXT: call void @_ZN6PrintyC1EPKc(ptr noundef nonnull align 8 dereferenceable(8) %arrayinit.begin, ptr noundef @.str) + // CHECK-NEXT: %arrayinit.element = getelementptr inbounds %struct.Printy, ptr %arrayinit.begin, i64 1 + // CHECK-NEXT: %arrayinit.element.spill.addr = getelementptr inbounds %_Z13ArrayInitCorov.Frame, ptr %0, i32 0, i32 11 + // CHECK-NEXT: store ptr %arrayinit.element, ptr %arrayinit.element.spill.addr, align 8 + // CHECK-NEXT: store ptr %arrayinit.element, ptr %arrayinit.endOfInit.reload.addr, align 8 + co_await Awaiter{} + // CHECK-NEXT: @_ZNSt14suspend_always11await_readyEv + // CHECK-NEXT: br i1 %{{.+}}, label %await.ready, label %CoroSave30 + }; + // CHECK: await.cleanup: ; preds = %AfterCoroSuspend{{.*}} + // CHECK-NEXT: br label %cleanup{{.*}}.from.await.cleanup + + // CHECK: cleanup{{.*}}.from.await.cleanup: ; preds = %await.cleanup + // CHECK: br label %cleanup{{.*}} + + // CHECK: await.ready: + // CHECK-NEXT: %arrayinit.element.reload.addr = getelementptr inbounds %_Z13ArrayInitCorov.Frame, ptr %0, i32 0, i32 11 + // CHECK-NEXT: %arrayinit.element.reload = load ptr, ptr %arrayinit.element.reload.addr, align 8 + // CHECK-NEXT: call void @_ZN7Awaiter12await_resumeEv + // CHECK-NEXT: store i1 false, ptr %cleanup.isactive.reload.addr, align 1 + // CHECK-NEXT: br label %cleanup{{.*}}.from.await.ready + + // CHECK: cleanup{{.*}}: ; preds = %cleanup{{.*}}.from.await.ready, %cleanup{{.*}}.from.await.cleanup + // CHECK: %cleanup.is_active = load i1, ptr %cleanup.isactive.reload.addr, align 1 + // CHECK-NEXT: br i1 %cleanup.is_active, label %cleanup.action, label %cleanup.done + + // CHECK: cleanup.action: + // CHECK: %arraydestroy.isempty = icmp eq ptr %arrayinit.begin.reload{{.*}}, %{{.*}} + // CHECK-NEXT: br i1 %arraydestroy.isempty, label %arraydestroy.done{{.*}}, label %arraydestroy.body.from.cleanup.action + // Ignore rest of the array cleanup. +} + +coroutine ArrayInitWithCoReturn() { + // CHECK-LABEL: define dso_local void @_Z21ArrayInitWithCoReturnv + // Verify that we start to emit the array destructor. + // CHECK: %arrayinit.endOfInit = alloca ptr, align 8 + Printy arr[2] = {"a", ({ + if (foo()) { + co_return; + } + "b"; + })}; +}