-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[CIR] Handle return with cleanups #163849
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-clangir @llvm/pr-subscribers-clang Author: Andy Kaylor (andykaylor) ChangesThis adds support for branching through a cleanup block when a return statement is encountered while we're in a scope with cleanups. Patch is 41.29 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/163849.diff 10 Files Affected:
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 4fbae150b587e..4a5027f22d054 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -210,6 +210,9 @@ struct MissingFeatures {
static bool checkBitfieldClipping() { return false; }
static bool cirgenABIInfo() { return false; }
static bool cleanupAfterErrorDiags() { return false; }
+ static bool cleanupAppendInsts() { return false; }
+ static bool cleanupBranchThrough() { return false; }
+ static bool cleanupIndexAndBIAdjustment() { return false; }
static bool cleanupsToDeactivate() { return false; }
static bool constEmitterAggILE() { return false; }
static bool constEmitterArrayILE() { return false; }
@@ -230,6 +233,7 @@ struct MissingFeatures {
static bool deleteArray() { return false; }
static bool devirtualizeMemberFunction() { return false; }
static bool ehCleanupFlags() { return false; }
+ static bool ehCleanupHasPrebranchedFallthrough() { return false; }
static bool ehCleanupScope() { return false; }
static bool ehCleanupScopeRequiresEHCleanup() { return false; }
static bool ehCleanupBranchFixups() { return false; }
@@ -285,6 +289,7 @@ struct MissingFeatures {
static bool setNonGC() { return false; }
static bool setObjCGCLValueClass() { return false; }
static bool setTargetAttributes() { return false; }
+ static bool simplifyCleanupEntry() { return false; }
static bool sourceLanguageCases() { return false; }
static bool stackBase() { return false; }
static bool stackSaveOp() { return false; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
index 870069715df22..b19ff426805db 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -28,6 +28,46 @@ using namespace clang::CIRGen;
// CIRGenFunction cleanup related
//===----------------------------------------------------------------------===//
+/// Build a unconditional branch to the lexical scope cleanup block
+/// or with the labeled blocked if already solved.
+///
+/// Track on scope basis, goto's we need to fix later.
+cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location loc,
+ JumpDest dest) {
+ // Insert a branch: to the cleanup block (unsolved) or to the already
+ // materialized label. Keep track of unsolved goto's.
+ assert(dest.getBlock() && "assumes incoming valid dest");
+ auto brOp = cir::BrOp::create(builder, loc, dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator topCleanup =
+ ehStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (topCleanup == ehStack.stable_end() ||
+ topCleanup.encloses(dest.getScopeDepth())) { // works for invalid
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!dest.getScopeDepth().isValid()) {
+ BranchFixup &fixup = ehStack.addBranchFixup();
+ fixup.destination = dest.getBlock();
+ fixup.destinationIndex = dest.getDestIndex();
+ fixup.initialBranch = brOp;
+ fixup.optimisticBranchBlock = nullptr;
+ // FIXME(cir): should we clear insertion point here?
+ return brOp;
+ }
+
+ cgm.errorNYI(loc, "emitBranchThroughCleanup: valid destination scope depth");
+ return brOp;
+}
+
/// Emits all the code to cause the given temporary to be cleaned up.
void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
QualType tempType, Address ptr) {
@@ -40,6 +80,19 @@ void CIRGenFunction::emitCXXTemporary(const CXXTemporary *temporary,
void EHScopeStack::Cleanup::anchor() {}
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ stable_iterator si = getInnermostNormalCleanup();
+ stable_iterator se = stable_end();
+ while (si != se) {
+ EHCleanupScope &cleanup = llvm::cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive())
+ return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
/// Push an entry of the given size onto this protected-scope stack.
char *EHScopeStack::allocate(size_t size) {
size = llvm::alignTo(size, ScopeStackAlignment);
@@ -75,14 +128,30 @@ void EHScopeStack::deallocate(size_t size) {
startOfData += llvm::alignTo(size, ScopeStackAlignment);
}
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ cgf->cgm.errorNYI("popNullFixups");
+}
+
void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) {
char *buffer = allocate(EHCleanupScope::getSizeForCleanupSize(size));
+ bool isNormalCleanup = kind & NormalCleanup;
bool isEHCleanup = kind & EHCleanup;
bool isLifetimeMarker = kind & LifetimeMarker;
assert(!cir::MissingFeatures::innermostEHScope());
- EHCleanupScope *scope = new (buffer) EHCleanupScope(size);
+ EHCleanupScope *scope = new (buffer)
+ EHCleanupScope(size, branchFixups.size(), innermostNormalCleanup);
+
+ if (isNormalCleanup)
+ innermostNormalCleanup = stable_begin();
if (isLifetimeMarker)
cgf->cgm.errorNYI("push lifetime marker cleanup");
@@ -100,12 +169,23 @@ void EHScopeStack::popCleanup() {
assert(isa<EHCleanupScope>(*begin()));
EHCleanupScope &cleanup = cast<EHCleanupScope>(*begin());
+ innermostNormalCleanup = cleanup.getEnclosingNormalCleanup();
deallocate(cleanup.getAllocatedSize());
// Destroy the cleanup.
cleanup.destroy();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // Check whether we can shrink the branch-fixups stack.
+ if (!branchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups()) {
+ branchFixups.clear();
+ } else {
+ // Otherwise we can still trim out unnecessary nulls.
+ popNullFixups();
+ }
+ }
}
static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
@@ -116,6 +196,18 @@ static void emitCleanup(CIRGenFunction &cgf, EHScopeStack::Cleanup *cleanup) {
assert(cgf.haveInsertPoint() && "cleanup ended with no insertion point?");
}
+static mlir::Block *createNormalEntry(CIRGenFunction &cgf,
+ EHCleanupScope &scope) {
+ assert(scope.isNormalCleanup());
+ mlir::Block *entry = scope.getNormalBlock();
+ if (!entry) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ entry = cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ scope.setNormalBlock(entry);
+ }
+ return entry;
+}
+
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
@@ -123,17 +215,21 @@ void CIRGenFunction::popCleanupBlock() {
assert(!ehStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*ehStack.begin()) && "top not a cleanup!");
EHCleanupScope &scope = cast<EHCleanupScope>(*ehStack.begin());
+ assert(scope.getFixupDepth() <= ehStack.getNumBranchFixups());
// Remember activation information.
bool isActive = scope.isActive();
- assert(!cir::MissingFeatures::ehCleanupBranchFixups());
+ // - whether there are branch fix-ups through this cleanup
+ unsigned fixupDepth = scope.getFixupDepth();
+ bool hasFixups = ehStack.getNumBranchFixups() != fixupDepth;
// - whether there's a fallthrough
mlir::Block *fallthroughSource = builder.getInsertionBlock();
bool hasFallthrough = fallthroughSource != nullptr && isActive;
- bool requiresNormalCleanup = scope.isNormalCleanup() && hasFallthrough;
+ bool requiresNormalCleanup =
+ scope.isNormalCleanup() && (hasFixups || hasFallthrough);
// If we don't need the cleanup at all, we're done.
assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
@@ -168,9 +264,119 @@ void CIRGenFunction::popCleanupBlock() {
assert(!cir::MissingFeatures::ehCleanupFlags());
- ehStack.popCleanup();
- scope.markEmitted();
- emitCleanup(*this, cleanup);
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (hasFallthrough && !hasFixups) {
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+ ehStack.popCleanup();
+ scope.markEmitted();
+ emitCleanup(*this, cleanup);
+ } else {
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+
+ // Force the entry block to exist.
+ mlir::Block *normalEntry = createNormalEntry(*this, scope);
+
+ // I. Set up the fallthrough edge in.
+ mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (hasFallthrough) {
+ assert(!cir::MissingFeatures::ehCleanupHasPrebranchedFallthrough());
+
+ } else if (fallthroughSource) {
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ assert(!isActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = builder.saveInsertionPoint();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ builder.setInsertionPointToEnd(normalEntry);
+
+ // intercept normal cleanup to mark SEH scope end
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+ bool hasEnclosingCleanups =
+ (scope.getEnclosingNormalCleanup() != ehStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ if (hasFixups && hasEnclosingCleanups)
+ cgm.errorNYI("cleanup branch-through dest");
+
+ mlir::Block *fallthroughDest = nullptr;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ // Skip for SEH, since ExitSwitch is used to generate code to indicate
+ // abnormal termination. (SEH: Except _leave and fall-through at
+ // the end, all other exits in a _try (return/goto/continue/break)
+ // are considered as abnormal terminations, using NormalCleanupDestSlot
+ // to indicate abnormal termination)
+ assert(!cir::MissingFeatures::cleanupBranchThrough());
+ assert(!cir::MissingFeatures::ehCleanupScopeRequiresEHCleanup());
+
+ // IV. Pop the cleanup and emit it.
+ scope.markEmitted();
+ ehStack.popCleanup();
+ assert(ehStack.hasNormalCleanups() == hasEnclosingCleanups);
+
+ emitCleanup(*this, cleanup);
+
+ // Append the prepared cleanup prologue from above.
+ assert(!cir::MissingFeatures::cleanupAppendInsts());
+
+ // Optimistically hope that any fixups will continue falling through.
+ if (fixupDepth != ehStack.getNumBranchFixups())
+ cgm.errorNYI("cleanup fixup depth mismatch");
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!hasFallthrough && fallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!isActive);
+ cgm.errorNYI("cleanup inactive fallthrough");
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (hasFallthrough && fallthroughDest) {
+ cgm.errorNYI("cleanup fallthrough destination");
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (hasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ // FIXME(cir): should we clear insertion point here?
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ //
+ // If it did invalidate those pointers, and normalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ assert(!cir::MissingFeatures::simplifyCleanupEntry());
+ }
}
/// Pops cleanup blocks until the given savepoint is reached.
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
index 30f5607d655da..63ccc65e53dac 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCleanup.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h
@@ -72,6 +72,18 @@ class EHScope {
/// A cleanup scope which generates the cleanup blocks lazily.
class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
: public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator enclosingNormal;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ mlir::Block *normalBlock = nullptr;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned fixupDepth = 0;
+
public:
/// Gets the size required for a lazy cleanup scope with the given
/// cleanup-data requirements.
@@ -83,7 +95,10 @@ class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
return sizeof(EHCleanupScope) + cleanupBits.cleanupSize;
}
- EHCleanupScope(unsigned cleanupSize) : EHScope(EHScope::Cleanup) {
+ EHCleanupScope(unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal)
+ : EHScope(EHScope::Cleanup), enclosingNormal(enclosingNormal),
+ fixupDepth(fixupDepth) {
// TODO(cir): When exception handling is upstreamed, isNormalCleanup and
// isEHCleanup will be arguments to the constructor.
cleanupBits.isNormalCleanup = true;
@@ -101,11 +116,19 @@ class alignas(EHScopeStack::ScopeStackAlignment) EHCleanupScope
// Objects of EHCleanupScope are not destructed. Use destroy().
~EHCleanupScope() = delete;
+ mlir::Block *getNormalBlock() const { return normalBlock; }
+ void setNormalBlock(mlir::Block *bb) { normalBlock = bb; }
+
bool isNormalCleanup() const { return cleanupBits.isNormalCleanup; }
bool isActive() const { return cleanupBits.isActive; }
void setActive(bool isActive) { cleanupBits.isActive = isActive; }
+ unsigned getFixupDepth() const { return fixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return enclosingNormal;
+ }
+
size_t getCleanupSize() const { return cleanupBits.cleanupSize; }
void *getCleanupBuffer() { return this + 1; }
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 0d64c31f01668..c6e05c45ea3aa 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -60,11 +60,44 @@ class CIRGenFunction : public CIRGenTypeCache {
/// is where the next operations will be introduced.
CIRGenBuilderTy &builder;
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() = default;
+ JumpDest(mlir::Block *block, EHScopeStack::stable_iterator depth = {},
+ unsigned index = 0)
+ : block(block) {}
+
+ bool isValid() const { return block != nullptr; }
+ mlir::Block *getBlock() const { return block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return scopeDepth; }
+ unsigned getDestIndex() const { return index; }
+
+ // This should be used cautiously.
+ void setScopeDepth(EHScopeStack::stable_iterator depth) {
+ scopeDepth = depth;
+ }
+
+ private:
+ mlir::Block *block = nullptr;
+ EHScopeStack::stable_iterator scopeDepth;
+ unsigned index;
+ };
+
public:
/// The GlobalDecl for the current function being compiled or the global
/// variable currently being initialized.
clang::GlobalDecl curGD;
+ /// Unified return block.
+ /// In CIR this is a function because each scope might have
+ /// its associated return block.
+ JumpDest returnBlock(mlir::Block *retBlock) {
+ return getJumpDestInCurrentScope(retBlock);
+ }
+
+ unsigned nextCleanupDestIndex = 1;
+
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
@@ -574,6 +607,16 @@ class CIRGenFunction : public CIRGenTypeCache {
}
};
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ /// CIRGen: this mostly tracks state for figuring out the proper scope
+ /// information, no actual branches are emitted.
+ JumpDest getJumpDestInCurrentScope(mlir::Block *target) {
+ return JumpDest(target, ehStack.getInnermostNormalCleanup(),
+ nextCleanupDestIndex++);
+ }
+
/// Perform the usual unary conversions on the specified expression and
/// compare the result against zero, returning an Int1Ty value.
mlir::Value evaluateExprAsBool(const clang::Expr *e);
@@ -1192,6 +1235,8 @@ class CIRGenFunction : public CIRGenTypeCache {
LValue emitBinaryOperatorLValue(const BinaryOperator *e);
+ cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest);
+
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s);
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID,
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 5ba64ddb85272..a09b6f672b0c3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -446,54 +446,88 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
mlir::Location loc = getLoc(s.getSourceRange());
const Expr *rv = s.getRetValue();
- if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
- s.getNRVOCandidate()->isNRVOVariable()) {
- assert(!cir::MissingFeatures::openMP());
- assert(!cir::MissingFeatures::nrvo());
- } else if (!rv) {
- // No return expression. Do nothing.
- } else if (rv->getType()->isVoidType()) {
- // Make sure not to return anything, but evaluate the expression
- // for side effects.
- if (rv) {
- emitAnyExpr(rv);
+ RunCleanupsScope cleanupScope(*this);
+ bool createNewScope = false;
+ if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
+ rv = ewc->getSubExpr();
+ createNewScope = true;
+ }
+
+ auto handleReturnVal = [&]() {
+ if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
+ s.getNRVOCandidate()->isNRVOVariable()) {
+ assert(!cir::MissingFeatures::openMP());
+ assert(!cir::MissingFeatures::nrvo());
+ } else if (!rv) {
+ // No return expression. Do nothing.
+ } el...
[truncated]
|
I'm not entirely convinced this is the direction we want to go long-term, so I'd like to solicit opinions on that question specifically. This matches the handling for this case in the incubator, which is closely modeled after the same handling in classic codegen. Classic codegen does more with the BranchFixups, which may just be missing implementation. The alternative for CIR would be to explicity represent EHStackScope entries in CIR, as @bcardosolopes has previously suggested, and apply the branching during FlattenCFG. So for example, this patch leads to the following CIR (before CIRCanonicalize) for the added VLA test:
The alternative would look someting like this:
The FlattenCFG transform would need to recognize the the Thoughts? |
I'm up for the tradeoff here, the lower level alternative will make code way harder to analyze. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
I'm not certain which tradeoff you're talking about. Are you saying we should proceed with representing the |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
// CIRGenFunction cleanup related | ||
//===----------------------------------------------------------------------===// | ||
|
||
/// Build a unconditional branch to the lexical scope cleanup block |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
/// Build a unconditional branch to the lexical scope cleanup block | |
/// Build an unconditional branch to the lexical scope cleanup block |
This adds support for branching through a cleanup block when a return statement is encountered while we're in a scope with cleanups.
a7dc9ef
to
c67e759
Compare
This adds support for branching through a cleanup block when a return statement is encountered while we're in a scope with cleanups.