diff --git a/include/swift/SIL/DebugUtils.h b/include/swift/SIL/DebugUtils.h index 39b309ba75d05..3f1095767ace4 100644 --- a/include/swift/SIL/DebugUtils.h +++ b/include/swift/SIL/DebugUtils.h @@ -171,8 +171,18 @@ inline SILInstruction *getSingleNonDebugUser(SILValue V) { /// Precondition: The instruction may only have debug instructions as uses. /// If the iterator \p InstIter references any deleted instruction, it is /// incremented. -inline void eraseFromParentWithDebugInsts(SILInstruction *I, - SILBasicBlock::iterator &InstIter) { +/// +/// \p callBack will be invoked before each instruction is deleted. \p callBack +/// is not responsible for deleting the instruction because this utility +/// unconditionally deletes the \p I and its debug users. +/// +/// Returns an iterator to the next non-deleted instruction after \p I. +inline SILBasicBlock::iterator eraseFromParentWithDebugInsts( + SILInstruction *I, llvm::function_ref callBack = + [](SILInstruction *) {}) { + + auto nextII = std::next(I->getIterator()); + auto results = I->getResults(); bool foundAny; @@ -183,26 +193,16 @@ inline void eraseFromParentWithDebugInsts(SILInstruction *I, foundAny = true; auto *User = result->use_begin()->getUser(); assert(User->isDebugInstruction()); - if (InstIter == User->getIterator()) - InstIter++; - + if (nextII == User->getIterator()) + nextII++; + callBack(User); User->eraseFromParent(); } } } while (foundAny); - if (InstIter == I->getIterator()) - ++InstIter; - I->eraseFromParent(); -} - -/// Erases the instruction \p I from it's parent block and deletes it, including -/// all debug instructions which use \p I. -/// Precondition: The instruction may only have debug instructions as uses. -inline void eraseFromParentWithDebugInsts(SILInstruction *I) { - SILBasicBlock::iterator nullIter; - eraseFromParentWithDebugInsts(I, nullIter); + return nextII; } /// Return true if the def-use graph rooted at \p V contains any non-debug, diff --git a/include/swift/SIL/SILInstruction.h b/include/swift/SIL/SILInstruction.h index f417d491f6520..ac9086de63af8 100644 --- a/include/swift/SIL/SILInstruction.h +++ b/include/swift/SIL/SILInstruction.h @@ -3252,9 +3252,27 @@ class BeginBorrowInst SingleValueInstruction> { friend class SILBuilder; + /// Predicate used to filter EndBorrowRange. + struct UseToEndBorrow; + BeginBorrowInst(SILDebugLocation DebugLoc, SILValue LValue) : UnaryInstructionBase(DebugLoc, LValue, LValue->getType().getObjectType()) {} + +public: + using EndBorrowRange = + OptionalTransformRange; + + /// Return a range over all EndBorrow instructions for this BeginBorrow. + EndBorrowRange getEndBorrows() const; + + /// Return the single use of this BeginBorrowInst, not including any + /// EndBorrowInst uses, or return nullptr if the borrow is dead or has + /// multiple uses. + /// + /// Useful for matching common SILGen patterns that emit one borrow per use, + /// and simplifying pass logic. + Operand *getSingleNonEndingUse() const; }; /// Represents a store of a borrowed value into an address. Returns the borrowed @@ -3346,6 +3364,20 @@ class EndBorrowInst } }; +struct BeginBorrowInst::UseToEndBorrow { + Optional operator()(Operand *use) const { + if (auto borrow = dyn_cast(use->getUser())) { + return borrow; + } else { + return None; + } + } +}; + +inline auto BeginBorrowInst::getEndBorrows() const -> EndBorrowRange { + return EndBorrowRange(getUses(), UseToEndBorrow()); +} + /// Different kinds of access. enum class SILAccessKind : uint8_t { /// An access which takes uninitialized memory and initializes it. diff --git a/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h b/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h index 19ccdd94ae3ff..3997a118aae15 100644 --- a/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h +++ b/include/swift/SILOptimizer/Analysis/SimplifyInstruction.h @@ -38,7 +38,7 @@ SILValue simplifyInstruction(SILInstruction *I); /// /// If it is nonnull, eraseNotify will be called before each instruction is /// deleted. -void replaceAllSimplifiedUsesAndErase( +SILBasicBlock::iterator replaceAllSimplifiedUsesAndErase( SILInstruction *I, SILValue result, std::function eraseNotify = nullptr); diff --git a/include/swift/SILOptimizer/PassManager/Passes.def b/include/swift/SILOptimizer/PassManager/Passes.def index dc4f3f422dfc4..eb6a5cdac035b 100644 --- a/include/swift/SILOptimizer/PassManager/Passes.def +++ b/include/swift/SILOptimizer/PassManager/Passes.def @@ -267,6 +267,8 @@ PASS(SideEffectsDumper, "side-effects-dump", "Print Side-Effect Information for all Functions") PASS(IRGenPrepare, "irgen-prepare", "Cleanup SIL in preparation for IRGen") +PASS(SILGenCleanup, "silgen-cleanup", + "Cleanup SIL in preparation for diagnostics") PASS(SILCombine, "sil-combine", "Combine SIL Instructions via Peephole Optimization") PASS(SILDebugInfoGenerator, "sil-debuginfo-gen", diff --git a/include/swift/SILOptimizer/Utils/CanonicalizeInstruction.h b/include/swift/SILOptimizer/Utils/CanonicalizeInstruction.h new file mode 100644 index 0000000000000..a82b117970a23 --- /dev/null +++ b/include/swift/SILOptimizer/Utils/CanonicalizeInstruction.h @@ -0,0 +1,87 @@ +//===-- CanonicalizeInstruction.h - canonical SIL peepholes -----*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// SSA-peephole transformations that yield a more canonical SIL representation. +/// +/// Unlike simplifyInstruction, these transformations may effect any +/// instruction, not only single-values, and may arbitrarily generate new SIL +/// instructions. +/// +/// Unlike SILCombine, these peepholes must work on 'raw' SIL form and should be +/// limited to those necessary to aid in diagnostics and other mandatory +/// pipelin/e passes. Optimization may only be done to the extent that it +/// neither interferes with diagnostics nor increases compile time. +/// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_SILOPTIMIZER_UTILS_CANONICALIZEINSTRUCTION_H +#define SWIFT_SILOPTIMIZER_UTILS_CANONICALIZEINSTRUCTION_H + +#include "swift/SIL/SILBasicBlock.h" +#include "swift/SIL/SILInstruction.h" +#include "llvm/Support/Debug.h" + +namespace swift { + +/// Abstract base class. Implements all canonicalization transforms. Extended by +/// passes to be notified of each SIL modification. +struct CanonicalizeInstruction { + // May be overriden by passes. + static constexpr const char *defaultDebugType = "sil-canonicalize"; + const char *debugType = defaultDebugType; + + CanonicalizeInstruction(const char *passDebugType) { +#ifndef NDEBUG + if (llvm::DebugFlag && !llvm::isCurrentDebugType(debugType)) + debugType = passDebugType; +#endif + } + + virtual ~CanonicalizeInstruction(); + + /// Rewrite this instruction, based on its operands and uses, into a more + /// canonical representation. + /// + /// Return an iterator to the next instruction or to the end of the block. + /// The returned iterator will follow any newly added or to-be-deleted + /// instructions, regardless of whether the pass immediately deletes the + /// instructions or simply records them for later deletion. + /// + /// To (re)visit new instructions, override notifyNewInstruction(). + /// + /// To determine if any transformation at all occurred, override + /// notifyNewInstruction(), killInstruction(), and notifyNewUsers(). + /// + /// Warning: If the \p inst argument is killed and the client immediately + /// erases \p inst, then it may be an invalid pointer upon return. + SILBasicBlock::iterator canonicalize(SILInstruction *inst); + + /// Record a newly generated instruction. + virtual void notifyNewInstruction(SILInstruction *inst) = 0; + + /// Kill an instruction that no longer has uses, or whose side effect is now + /// represented by a different instruction. The client can defer erasing the + /// instruction but must eventually erase all killed instructions to restore + /// valid SIL. + /// + /// This callback should not mutate any other instructions. It may only delete + /// the given argument. It will be called separately for each end-of-scope and + /// debug use before being called on the instruction they use. + virtual void killInstruction(SILInstruction *inst) = 0; + + /// Record a SIL value that has acquired new users. + virtual void notifyHasNewUsers(SILValue value) = 0; +}; + +} // end namespace swift + +#endif // SWIFT_SILOPTIMIZER_UTILS_CANONICALIZEINSTRUCTION_H diff --git a/include/swift/SILOptimizer/Utils/Local.h b/include/swift/SILOptimizer/Utils/Local.h index ba10c2717314d..a20e86ff36776 100644 --- a/include/swift/SILOptimizer/Utils/Local.h +++ b/include/swift/SILOptimizer/Utils/Local.h @@ -64,20 +64,6 @@ recursivelyDeleteTriviallyDeadInstructions( ArrayRef I, bool Force = false, llvm::function_ref C = [](SILInstruction *){}); -/// For each of the given instructions, if they are dead delete them -/// along with their dead operands. -/// -/// \param I The ArrayRef of instructions to be deleted. -/// \param InstIter is updated to the next valid instruction if it points to any -/// deleted instruction, including debug values. -/// \param Force If Force is set, don't check if the top level instructions -/// are considered dead - delete them regardless. -/// \param C a callback called whenever an instruction is deleted. -void recursivelyDeleteTriviallyDeadInstructions( - ArrayRef I, SILBasicBlock::iterator &InstIter, - bool Force = false, - llvm::function_ref C = [](SILInstruction *) {}); - /// If the given instruction is dead, delete it along with its dead /// operands. /// @@ -85,10 +71,7 @@ void recursivelyDeleteTriviallyDeadInstructions( /// \param Force If Force is set, don't check if the top level instruction is /// considered dead - delete it regardless. /// \param C a callback called whenever an instruction is deleted. -/// -/// Returns a valid instruction iterator to the next nondeleted instruction -/// after `I`. -SILBasicBlock::iterator recursivelyDeleteTriviallyDeadInstructions( +void recursivelyDeleteTriviallyDeadInstructions( SILInstruction *I, bool Force = false, llvm::function_ref C = [](SILInstruction *) {}); diff --git a/lib/SIL/SILInstruction.cpp b/lib/SIL/SILInstruction.cpp index fc305944db916..35756127f8ae9 100644 --- a/lib/SIL/SILInstruction.cpp +++ b/lib/SIL/SILInstruction.cpp @@ -279,6 +279,20 @@ void SILInstruction::replaceAllUsesPairwiseWith( } } +Operand *BeginBorrowInst::getSingleNonEndingUse() const { + Operand *singleUse = nullptr; + for (auto *use : getUses()) { + if (isa(use->getUser())) + continue; + + if (singleUse) + return nullptr; + + singleUse = use; + } + return singleUse; +} + namespace { class InstructionDestroyer : public SILInstructionVisitor { diff --git a/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp b/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp index 138ade8e5eef6..17fe07bcd2193 100644 --- a/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp +++ b/lib/SILOptimizer/Analysis/SimplifyInstruction.cpp @@ -2,13 +2,22 @@ // // This source file is part of the Swift.org open source project // -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// +/// +/// An SSA-peephole analysis. Given a single-value instruction, find an existing +/// equivalent but less costly or more canonical SIL value. +/// +/// This analysis must handle 'raw' SIL form. It should be possible to perform +/// the substitution discovered by the analysis without interfering with +/// subsequent diagnostic passes. +/// +//===----------------------------------------------------------------------===// #define DEBUG_TYPE "sil-simplify" #include "swift/SILOptimizer/Analysis/SimplifyInstruction.h" @@ -665,12 +674,17 @@ SILValue swift::simplifyInstruction(SILInstruction *I) { /// Replace an instruction with a simplified result, including any debug uses, /// and erase the instruction. If the instruction initiates a scope, do not /// replace the end of its scope; it will be deleted along with its parent. -void swift::replaceAllSimplifiedUsesAndErase( +/// +/// This is a simple transform based on the above analysis. +/// +/// Return an iterator to the next (nondeleted) instruction. +SILBasicBlock::iterator swift::replaceAllSimplifiedUsesAndErase( SILInstruction *I, SILValue result, - std::function eraseNotify) { + std::function eraseHandler) { auto *SVI = cast(I); assert(SVI != result && "Cannot RAUW a value with itself"); + SILBasicBlock::iterator nextii = std::next(I->getIterator()); // Only SingleValueInstructions are currently simplified. while (!SVI->use_empty()) { @@ -678,16 +692,22 @@ void swift::replaceAllSimplifiedUsesAndErase( SILInstruction *user = use->getUser(); // Erase the end of scope marker. if (isEndOfScopeMarker(user)) { - if (eraseNotify) - eraseNotify(user); - user->eraseFromParent(); + if (&*nextii == user) + ++nextii; + if (eraseHandler) + eraseHandler(user); + else + user->eraseFromParent(); continue; } use->set(result); } - I->eraseFromParent(); - if (eraseNotify) - eraseNotify(I); + if (eraseHandler) + eraseHandler(I); + else + I->eraseFromParent(); + + return nextii; } /// Simplify invocations of builtin operations that may overflow. diff --git a/lib/SILOptimizer/Mandatory/CMakeLists.txt b/lib/SILOptimizer/Mandatory/CMakeLists.txt index 58b55374d13df..da410b33958cb 100644 --- a/lib/SILOptimizer/Mandatory/CMakeLists.txt +++ b/lib/SILOptimizer/Mandatory/CMakeLists.txt @@ -2,6 +2,7 @@ silopt_register_sources( AccessEnforcementSelection.cpp AccessMarkerElimination.cpp AddressLowering.cpp + ClosureLifetimeFixup.cpp ConstantPropagation.cpp DefiniteInitialization.cpp DIMemoryUseCollector.cpp @@ -15,8 +16,8 @@ silopt_register_sources( MandatoryInlining.cpp PredictableMemOpt.cpp PMOMemoryUseCollector.cpp - SemanticARCOpts.cpp - ClosureLifetimeFixup.cpp RawSILInstLowering.cpp + SemanticARCOpts.cpp + SILGenCleanup.cpp YieldOnceCheck.cpp ) diff --git a/lib/SILOptimizer/Mandatory/DefiniteInitialization.cpp b/lib/SILOptimizer/Mandatory/DefiniteInitialization.cpp index 85203f9de0ec9..f550a2916c75f 100644 --- a/lib/SILOptimizer/Mandatory/DefiniteInitialization.cpp +++ b/lib/SILOptimizer/Mandatory/DefiniteInitialization.cpp @@ -861,8 +861,7 @@ void LifetimeChecker::handleLoadForTypeOfSelfUse(const DIMemoryUse &Use) { valueMetatype->getLoc(), metatypeArgument, valueMetatype->getType()); } - replaceAllSimplifiedUsesAndErase(valueMetatype, metatypeArgument, - [](SILInstruction*) { }); + replaceAllSimplifiedUsesAndErase(valueMetatype, metatypeArgument); } } diff --git a/lib/SILOptimizer/Mandatory/SILGenCleanup.cpp b/lib/SILOptimizer/Mandatory/SILGenCleanup.cpp new file mode 100644 index 0000000000000..5b51c722ec2a4 --- /dev/null +++ b/lib/SILOptimizer/Mandatory/SILGenCleanup.cpp @@ -0,0 +1,111 @@ +//===--- SILGenCleanup.cpp ------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// Perform peephole-style "cleanup" to aid SIL diagnostic passes. +/// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "silgen-cleanup" + +#include "swift/SIL/SILInstruction.h" +#include "swift/SILOptimizer/PassManager/Transforms.h" +#include "swift/SILOptimizer/Utils/CanonicalizeInstruction.h" +#include "swift/SILOptimizer/Utils/Local.h" + +using namespace swift; + +// Define a CanonicalizeInstruction subclass for use in SILGenCleanup. +struct SILGenCanonicalize final : CanonicalizeInstruction { + bool changed = false; + llvm::SmallPtrSet deadOperands; + + SILGenCanonicalize() : CanonicalizeInstruction(DEBUG_TYPE) {} + + void notifyNewInstruction(SILInstruction *) override { changed = true; } + + // Just delete the given 'inst' and record its operands. The callback isn't + // allowed to mutate any other instructions. + void killInstruction(SILInstruction *inst) override { + deadOperands.erase(inst); + for (auto &operand : inst->getAllOperands()) { + if (auto *operInst = operand.get()->getDefiningInstruction()) + deadOperands.insert(operInst); + } + inst->eraseFromParent(); + changed = true; + } + + void notifyHasNewUsers(SILValue) override { changed = true; } + + SILBasicBlock::iterator deleteDeadOperands(SILBasicBlock::iterator nextII) { + // Delete trivially dead instructions in non-determistic order. + while (!deadOperands.empty()) { + SILInstruction *deadOperInst = *deadOperands.begin(); + // Make sure at least the first instruction is removed from the set. + deadOperands.erase(deadOperInst); + recursivelyDeleteTriviallyDeadInstructions( + deadOperInst, false, + [&](SILInstruction *deadInst) { + LLVM_DEBUG(llvm::dbgs() << "Trivially dead: " << *deadInst); + if (nextII == deadInst->getIterator()) + ++nextII; + deadOperands.erase(deadInst); + }); + } + return nextII; + } +}; + +//===----------------------------------------------------------------------===// +// SILGenCleanup: Top-Level Module Transform +//===----------------------------------------------------------------------===// + +namespace { + +// SILGenCleanup must run on all functions that will be seen by any analysis +// used by diagnostics before transforming the function that requires the +// analysis. e.g. Closures need to be cleaned up before the closure's parent can +// be diagnosed. +// +// TODO: This pass can be converted to a function transform if the mandatory +// pipeline runs in bottom-up closure order. +struct SILGenCleanup : SILModuleTransform { + void run() override; +}; + +void SILGenCleanup::run() { + auto &module = *getModule(); + for (auto &function : module) { + LLVM_DEBUG(llvm::dbgs() + << "\nRunning SILGenCleanup on " << function.getName() << "\n"); + + SILGenCanonicalize sgCanonicalize; + + // Iterate over all blocks even if they aren't reachable. No phi-less + // dataflow cycles should have been created yet, and these transformations + // are simple enough they shouldn't be affected by cycles. + for (auto &bb : function) { + for (auto ii = bb.begin(), ie = bb.end(); ii != ie;) { + ii = sgCanonicalize.canonicalize(&*ii); + ii = sgCanonicalize.deleteDeadOperands(ii); + } + } + if (sgCanonicalize.changed) { + auto invalidKind = SILAnalysis::InvalidationKind::Instructions; + invalidateAnalysis(&function, invalidKind); + } + } +} + +} // end anonymous namespace + +SILTransform *swift::createSILGenCleanup() { return new SILGenCleanup(); } diff --git a/lib/SILOptimizer/PassManager/PassPipeline.cpp b/lib/SILOptimizer/PassManager/PassPipeline.cpp index 57b1e3534ee81..908ea8cf9c0b5 100644 --- a/lib/SILOptimizer/PassManager/PassPipeline.cpp +++ b/lib/SILOptimizer/PassManager/PassPipeline.cpp @@ -81,6 +81,7 @@ static void addDefiniteInitialization(SILPassPipelinePlan &P) { static void addMandatoryOptPipeline(SILPassPipelinePlan &P) { P.startPipeline("Guaranteed Passes"); + P.addSILGenCleanup(); P.addDiagnoseInvalidEscapingCaptures(); P.addDiagnoseStaticExclusivity(); P.addCapturePromotion(); diff --git a/lib/SILOptimizer/SILCombiner/SILCombine.cpp b/lib/SILOptimizer/SILCombiner/SILCombine.cpp index 1f9b23d756887..39257bbf3344b 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombine.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombine.cpp @@ -19,16 +19,17 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "sil-combine" -#include "swift/SILOptimizer/PassManager/Passes.h" #include "SILCombiner.h" +#include "swift/SIL/DebugUtils.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILVisitor.h" -#include "swift/SIL/DebugUtils.h" #include "swift/SILOptimizer/Analysis/AliasAnalysis.h" #include "swift/SILOptimizer/Analysis/SimplifyInstruction.h" +#include "swift/SILOptimizer/PassManager/Passes.h" #include "swift/SILOptimizer/PassManager/Transforms.h" -#include "swift/SILOptimizer/Utils/SILOptFunctionBuilder.h" +#include "swift/SILOptimizer/Utils/CanonicalizeInstruction.h" #include "swift/SILOptimizer/Utils/Local.h" +#include "swift/SILOptimizer/Utils/SILOptFunctionBuilder.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -36,7 +37,6 @@ using namespace swift; -STATISTIC(NumSimplified, "Number of instructions simplified"); STATISTIC(NumCombined, "Number of instructions combined"); STATISTIC(NumDeadInst, "Number of dead insts eliminated"); @@ -101,6 +101,28 @@ void SILCombiner::addReachableCodeToWorklist(SILBasicBlock *BB) { addInitialGroup(InstrsForSILCombineWorklist); } +static void eraseSingleInstFromFunction(SILInstruction &I, + SILCombineWorklist &Worklist, + bool AddOperandsToWorklist) { + LLVM_DEBUG(llvm::dbgs() << "SC: ERASE " << I << '\n'); + + assert(!I.hasUsesOfAnyResult() && "Cannot erase instruction that is used!"); + + // Make sure that we reprocess all operands now that we reduced their + // use counts. + if (I.getNumOperands() < 8 && AddOperandsToWorklist) { + for (auto &OpI : I.getAllOperands()) { + if (auto *Op = OpI.get()->getDefiningInstruction()) { + LLVM_DEBUG(llvm::dbgs() << "SC: add op " << *Op + << " from erased inst to worklist\n"); + Worklist.add(Op); + } + } + } + Worklist.remove(&I); + I.eraseFromParent(); +} + //===----------------------------------------------------------------------===// // Implementation //===----------------------------------------------------------------------===// @@ -113,6 +135,41 @@ void SILCombineWorklist::add(SILInstruction *I) { Worklist.push_back(I); } +// Define a CanonicalizeInstruction subclass for use in SILCombine. +class SILCombineCanonicalize final : CanonicalizeInstruction { + SILCombineWorklist &Worklist; + bool changed = false; + +public: + SILCombineCanonicalize(SILCombineWorklist &Worklist) + : CanonicalizeInstruction(DEBUG_TYPE), Worklist(Worklist) {} + + void notifyNewInstruction(SILInstruction *inst) override { + Worklist.add(inst); + Worklist.addUsersOfAllResultsToWorklist(inst); + changed = true; + } + + // Just delete the given 'inst' and record its operands. The callback isn't + // allowed to mutate any other instructions. + void killInstruction(SILInstruction *inst) override { + eraseSingleInstFromFunction(*inst, Worklist, + /*AddOperandsToWorklist*/ true); + changed = true; + } + + void notifyHasNewUsers(SILValue value) override { + Worklist.addUsersToWorklist(value); + changed = true; + } + + bool tryCanonicalize(SILInstruction *inst) { + changed = false; + canonicalize(inst); + return changed; + } +}; + bool SILCombiner::doOneIteration(SILFunction &F, unsigned Iteration) { MadeChange = false; @@ -122,6 +179,8 @@ bool SILCombiner::doOneIteration(SILFunction &F, unsigned Iteration) { // Add reachable instructions to our worklist. addReachableCodeToWorklist(&*F.begin()); + SILCombineCanonicalize scCanonicalize(Worklist); + // Process until we run out of items in our worklist. while (!Worklist.isEmpty()) { SILInstruction *I = Worklist.removeOne(); @@ -143,23 +202,8 @@ bool SILCombiner::doOneIteration(SILFunction &F, unsigned Iteration) { continue; } - // Check to see if we can instsimplify the instruction. - if (SILValue Result = simplifyInstruction(I)) { - ++NumSimplified; - - LLVM_DEBUG(llvm::dbgs() << "SC: Simplify Old = " << *I << '\n' - << " New = " << *Result << '\n'); - - // Erase the simplified instruction and any instructions that end its - // scope. Nothing needs to be added to the worklist except for Result, - // because the instruction and all non-replaced users will be deleted. - replaceAllSimplifiedUsesAndErase( - I, Result, - [this](SILInstruction *Deleted) { Worklist.remove(Deleted); }); - - // Push the new instruction and any users onto the worklist. - Worklist.addUsersToWorklist(Result); - + // Canonicalize the instruction. + if (scCanonicalize.tryCanonicalize(I)) { MadeChange = true; continue; } @@ -312,34 +356,28 @@ void SILCombiner::replaceInstUsesPairwiseWith(SILInstruction *oldI, // instruction, visit methods should use this method to delete the given // instruction and upon completion of their peephole return the value returned // by this method. -SILInstruction *SILCombiner::eraseInstFromFunction(SILInstruction &I, - SILBasicBlock::iterator &InstIter, - bool AddOperandsToWorklist) { - LLVM_DEBUG(llvm::dbgs() << "SC: ERASE " << I << '\n'); - - assert(onlyHaveDebugUsesOfAllResults(&I) && - "Cannot erase instruction that is used!"); - - // Make sure that we reprocess all operands now that we reduced their - // use counts. - if (I.getNumOperands() < 8 && AddOperandsToWorklist) { - for (auto &OpI : I.getAllOperands()) { - if (auto *Op = OpI.get()->getDefiningInstruction()) { - LLVM_DEBUG(llvm::dbgs() << "SC: add op " << *Op - << " from erased inst to worklist\n"); - Worklist.add(Op); - } +SILInstruction * +SILCombiner::eraseInstFromFunction(SILInstruction &I, + SILBasicBlock::iterator &InstIter, + bool AddOperandsToWorklist) { + // Delete any debug users first. + for (auto result : I.getResults()) { + while (!result->use_empty()) { + auto *user = result->use_begin()->getUser(); + assert(user->isDebugInstruction()); + if (InstIter == user->getIterator()) + ++InstIter; + Worklist.remove(user); + user->eraseFromParent(); } } + if (InstIter == I.getIterator()) + ++InstIter; - for (auto result : I.getResults()) - for (Operand *DU : getDebugUses(result)) - Worklist.remove(DU->getUser()); - - Worklist.remove(&I); - eraseFromParentWithDebugInsts(&I, InstIter); + eraseSingleInstFromFunction(I, Worklist, AddOperandsToWorklist); MadeChange = true; - return nullptr; // Don't do anything with I + // Dummy return, so the caller doesn't need to explicitly return nullptr. + return nullptr; } //===----------------------------------------------------------------------===// diff --git a/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp b/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp index b64efba4ab647..39471765bd533 100644 --- a/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp +++ b/lib/SILOptimizer/SILCombiner/SILCombinerMiscVisitors.cpp @@ -671,65 +671,7 @@ SILInstruction *SILCombiner::visitLoadInst(LoadInst *LI) { if (SILInstruction *I = optimizeLoadFromStringLiteral(LI)) return I; - // Given a load with multiple struct_extracts/tuple_extracts and no other - // uses, canonicalize the load into several (struct_element_addr (load)) - // pairs. - - struct ProjInstPair { - Projection P; - SingleValueInstruction *I; - - // When sorting, just look at the projection and ignore the instruction. - bool operator<(const ProjInstPair &RHS) const { return P < RHS.P; } - }; - - // Go through the loads uses and add any users that are projections to the - // projection list. - llvm::SmallVector Projections; - for (auto *UI : getNonDebugUses(LI)) { - auto *User = UI->getUser(); - - // If we have any non SEI, TEI instruction, don't do anything here. - if (!isa(User) && !isa(User)) - return nullptr; - - auto extract = cast(User); - Projections.push_back({Projection(extract), extract}); - } - - // The reason why we sort the list is so that we will process projections with - // the same value decl and tuples with the same indices together. This makes - // it easy to reuse the load from the first such projection for all subsequent - // projections on the same value decl or index. - std::sort(Projections.begin(), Projections.end()); - - // Go through our sorted list creating new GEPs only when we need to. - Projection *LastProj = nullptr; - LoadInst *LastNewLoad = nullptr; - for (auto &Pair : Projections) { - auto &Proj = Pair.P; - auto *Inst = Pair.I; - - // If this projection is the same as the last projection we processed, just - // replace all uses of the projection with the load we created previously. - if (LastProj && Proj == *LastProj) { - replaceInstUsesWith(*Inst, LastNewLoad); - eraseInstFromFunction(*Inst); - continue; - } - - // Ok, we have started to visit the range of instructions associated with - // a new projection. Create the new address projection. - auto I = Proj.createAddressProjection(Builder, LI->getLoc(), LI->getOperand()); - LastProj = &Proj; - LastNewLoad = Builder.createLoad(LI->getLoc(), I.get(), - LoadOwnershipQualifier::Unqualified); - replaceInstUsesWith(*Inst, LastNewLoad); - eraseInstFromFunction(*Inst); - } - - // Erase the old load. - return eraseInstFromFunction(*LI); + return nullptr; } /// Optimize nested index_addr instructions: diff --git a/lib/SILOptimizer/Transforms/CSE.cpp b/lib/SILOptimizer/Transforms/CSE.cpp index 76f48bbc72003..59b2e1f11cc48 100644 --- a/lib/SILOptimizer/Transforms/CSE.cpp +++ b/lib/SILOptimizer/Transforms/CSE.cpp @@ -524,8 +524,7 @@ class CSE { }; bool processNode(DominanceInfoNode *Node); - bool processOpenExistentialRef(OpenExistentialRefInst *Inst, ValueBase *V, - SILBasicBlock::iterator &I); + bool processOpenExistentialRef(OpenExistentialRefInst *Inst, ValueBase *V); }; } // namespace swift @@ -660,9 +659,8 @@ static void updateBasicBlockArgTypes(SILBasicBlock *BB, /// be replaced by a dominating instruction. /// \Inst is the open_existential_ref instruction /// \V is the dominating open_existential_ref instruction -/// \I is the iterator referring to the current instruction. -bool CSE::processOpenExistentialRef(OpenExistentialRefInst *Inst, ValueBase *V, - SILBasicBlock::iterator &I) { +bool CSE::processOpenExistentialRef(OpenExistentialRefInst *Inst, + ValueBase *V) { // All the open instructions are single-value instructions. auto VI = dyn_cast(V); if (!VI) return false; @@ -707,7 +705,7 @@ bool CSE::processOpenExistentialRef(OpenExistentialRefInst *Inst, ValueBase *V, OpenedArchetypesTracker.registerOpenedArchetypes(VI); // Use a cloner. It makes copying the instruction and remapping of // opened archetypes trivial. - InstructionCloner Cloner(I->getFunction()); + InstructionCloner Cloner(Inst->getFunction()); Cloner.registerOpenedExistentialRemapping( OldOpenedArchetype->castTo(), NewOpenedArchetype); auto &Builder = Cloner.getBuilder(); @@ -763,9 +761,7 @@ bool CSE::processOpenExistentialRef(OpenExistentialRefInst *Inst, ValueBase *V, // Result types of candidate's uses instructions may be using this archetype. // Thus, we need to try to replace it there. Candidate->replaceAllUsesPairwiseWith(NewI); - if (I == Candidate->getIterator()) - I = NewI->getIterator(); - eraseFromParentWithDebugInsts(Candidate, I); + eraseFromParentWithDebugInsts(Candidate); } return true; } @@ -787,7 +783,7 @@ bool CSE::processNode(DominanceInfoNode *Node) { // Dead instructions should just be removed. if (isInstructionTriviallyDead(Inst)) { LLVM_DEBUG(llvm::dbgs() << "SILCSE DCE: " << *Inst << '\n'); - eraseFromParentWithDebugInsts(Inst, nextI); + nextI = eraseFromParentWithDebugInsts(Inst); Changed = true; ++NumSimplify; continue; @@ -798,11 +794,7 @@ bool CSE::processNode(DominanceInfoNode *Node) { if (SILValue V = simplifyInstruction(Inst)) { LLVM_DEBUG(llvm::dbgs() << "SILCSE SIMPLIFY: " << *Inst << " to: " << *V << '\n'); - replaceAllSimplifiedUsesAndErase(Inst, V, - [&nextI](SILInstruction *deleteI) { - if (nextI == deleteI->getIterator()) - ++nextI; - }); + nextI = replaceAllSimplifiedUsesAndErase(Inst, V); Changed = true; ++NumSimplify; continue; @@ -827,9 +819,12 @@ bool CSE::processNode(DominanceInfoNode *Node) { // because replacing these instructions may require a replacement // of the opened archetype type operands in some of the uses. if (!isa(Inst) - || processOpenExistentialRef(cast(Inst), - cast(AvailInst), - nextI)) { + || processOpenExistentialRef( + cast(Inst), + cast(AvailInst))) { + // processOpenExistentialRef may delete instructions other than Inst, so + // nextI must be reassigned. + nextI = std::next(Inst->getIterator()); Inst->replaceAllUsesPairwiseWith(AvailInst); Inst->eraseFromParent(); Changed = true; diff --git a/lib/SILOptimizer/Utils/CMakeLists.txt b/lib/SILOptimizer/Utils/CMakeLists.txt index 1115a8acd17e0..b45f12afffded 100644 --- a/lib/SILOptimizer/Utils/CMakeLists.txt +++ b/lib/SILOptimizer/Utils/CMakeLists.txt @@ -1,5 +1,6 @@ silopt_register_sources( CFG.cpp + CanonicalizeInstruction.cpp CastOptimizer.cpp CheckedCastBrJumpThreading.cpp ConstantFolding.cpp diff --git a/lib/SILOptimizer/Utils/CanonicalizeInstruction.cpp b/lib/SILOptimizer/Utils/CanonicalizeInstruction.cpp new file mode 100644 index 0000000000000..bdc5a4cdefd40 --- /dev/null +++ b/lib/SILOptimizer/Utils/CanonicalizeInstruction.cpp @@ -0,0 +1,326 @@ +//===--- CanonicalizeInstruction.cpp - canonical SIL peepholes ------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// SSA-peephole transformations that yield a more canonical SIL representation. +/// +/// A superset of simplifyInstruction. +/// +//===----------------------------------------------------------------------===// + +// CanonicalizeInstruction defines a default DEBUG_TYPE: "sil-canonicalize" + +#include "swift/SILOptimizer/Utils/CanonicalizeInstruction.h" +#include "swift/SIL/DebugUtils.h" +#include "swift/SIL/InstructionUtils.h" +#include "swift/SIL/Projection.h" +#include "swift/SIL/SILBuilder.h" +#include "swift/SIL/SILFunction.h" +#include "swift/SILOptimizer/Analysis/SimplifyInstruction.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Support/Debug.h" + +using namespace swift; + +// STATISTIC uses the default DEBUG_TYPE. +#define DEBUG_TYPE CanonicalizeInstruction::defaultDebugType +STATISTIC(NumSimplified, "Number of instructions simplified"); + +// Tracing within the implementation can also be activiated by the pass. +#undef DEBUG_TYPE +#define DEBUG_TYPE pass.debugType + +// Vtable anchor. +CanonicalizeInstruction::~CanonicalizeInstruction() {} + +// Helper to delete an instruction, or mark it for deletion. +// +// Return an iterator to the next non-deleted instruction. The incoming iterator +// may already have advanced beyond 'inst'. +static SILBasicBlock::iterator killInstruction(SILInstruction *inst, + SILBasicBlock::iterator nextII, + CanonicalizeInstruction &pass) { + if (nextII == inst->getIterator()) + ++nextII; + pass.killInstruction(inst); + return nextII; +} + +// Helper to delete, or mark for deletion, an instruction with potential debug +// or end of scope uses. All "real" uses must already be removed. +// +// fix_lifetime uses are not currently handled here. They are generally +// (incorrectly) treated as "incidental" uses, but no canonicalizations need +// them yet. +static SILBasicBlock::iterator +killInstAndIncidentalUses(SingleValueInstruction *inst, + SILBasicBlock::iterator nextII, + CanonicalizeInstruction &pass) { + while (!inst->use_empty()) { + auto *user = inst->use_begin()->getUser(); + assert(user->isDebugInstruction() || isEndOfScopeMarker(user)); + nextII = killInstruction(user, nextII, pass); + } + return killInstruction(inst, nextII, pass); +} + +//===----------------------------------------------------------------------===// +// Instruction Simplification +//===----------------------------------------------------------------------===// + +// If simplification is successful, return a valid iterator to the next +// intruction that wasn't erased. +static Optional +simplifyAndReplace(SILInstruction *inst, CanonicalizeInstruction &pass) { + // FIXME: temporarily bypass simplification untill all simplifications + // preserve ownership SIL. + if (inst->getFunction()->hasOwnership()) + return None; + + SILValue result = simplifyInstruction(inst); + if (!result) + return None; + + ++NumSimplified; + + LLVM_DEBUG(llvm::dbgs() << "Simplify Old = " << *inst + << " New = " << *result << '\n'); + + // Erase the simplified instruction and any instructions that end its + // scope. Nothing needs to be added to the worklist except for Result, + // because the instruction and all non-replaced users will be deleted. + auto nextII = replaceAllSimplifiedUsesAndErase( + inst, result, + [&pass](SILInstruction *deleted) { pass.killInstruction(deleted); }); + + // Push the new instruction and any users onto the worklist. + pass.notifyHasNewUsers(result); + return nextII; +} + +//===----------------------------------------------------------------------===// +// Canonicalize Memory Operations +//===----------------------------------------------------------------------===// + +// Replace all uses of an original struct or tuple extract instruction with the +// given load instruction. The caller ensures that the load only loads the +// extracted field. +// +// \p extract has the form: +// (struct_extract (load %base), #field) +// +// \p loadInst has the form: +// (load (struct_element_addr %base, #field) +static void replaceUsesOfExtract(SingleValueInstruction *extract, + LoadInst *loadInst, + CanonicalizeInstruction &pass) { + assert(extract->getType() == loadInst->getType()); + + SingleValueInstruction *loadedVal = loadInst; + if (loadInst->getOwnershipQualifier() == LoadOwnershipQualifier::Copy) { + // Borrow the load-copied subelement, with precisely the same scope as + // the aggregate borrow. + assert(extract->getNumOperands() == 1); + auto *origBorrow = cast(extract->getOperand(0)); + auto *newBorrow = SILBuilderWithScope(origBorrow) + .createBeginBorrow(loadInst->getLoc(), loadInst); + pass.notifyNewInstruction(newBorrow); + + assert(extract == origBorrow->getSingleNonEndingUse()->getUser()); + for (auto *origEnd : origBorrow->getEndBorrows()) { + auto *endBorrow = SILBuilderWithScope(origEnd).createEndBorrow( + origEnd->getLoc(), newBorrow); + pass.notifyNewInstruction(endBorrow); + } + loadedVal = newBorrow; + } + LLVM_DEBUG(llvm::dbgs() << "Replacing " << *extract << " with " + << *loadedVal << "\n"); + extract->replaceAllUsesWith(loadedVal); +} + +// Given a load with multiple struct_extracts/tuple_extracts and no other uses, +// canonicalize the load into several (struct_element_addr (load)) pairs. +// +// (struct_extract (load %base)) +// -> +// (load (struct_element_addr %base, #field) +// +// TODO: Consider handling LoadBorrowInst. +static SILBasicBlock::iterator +splitAggregateLoad(LoadInst *loadInst, CanonicalizeInstruction &pass) { + // Keep track of the next iterator after any newly added or to-be-deleted + // instructions. This must be valid regardless of whether the pass immediately + // deletes the instructions or simply records them for later deletion. + auto nextII = std::next(loadInst->getIterator()); + + bool needsBorrow; + switch (loadInst->getOwnershipQualifier()) { + case LoadOwnershipQualifier::Unqualified: + case LoadOwnershipQualifier::Trivial: + needsBorrow = false; + break; + case LoadOwnershipQualifier::Copy: + needsBorrow = true; + break; + case LoadOwnershipQualifier::Take: + // TODO: To handle a "take", we would need to generate additional destroys + // for any fields that aren't already extracted. This would be out-of-place + // for this transform, and I'm not sure if this a case that needs to be + // handled in SILGenCleanup. + return nextII; + } + struct ProjInstPair { + Projection proj; + SingleValueInstruction *extract; + + // When sorting, just look at the projection and ignore the instruction. + // Including the instruction address in the sort key would be + // nondeterministic. + bool operator<(const ProjInstPair &rhs) const { return proj < rhs.proj; } + }; + + // Add load projections to a projection list. + llvm::SmallVector projections; + llvm::SmallVector borrows; + llvm::SmallVector destroys; + for (auto *use : getNonDebugUses(loadInst)) { + auto *user = use->getUser(); + if (needsBorrow) { + if (auto *destroy = dyn_cast(user)) { + destroys.push_back(destroy); + continue; + } + auto *borrow = dyn_cast(user); + if (!borrow) + return nextII; + + // The transformation below also assumes a single borrow use. + auto *borrowedOper = borrow->getSingleNonEndingUse(); + if (!borrowedOper) + return nextII; + + borrows.push_back(borrow); + user = borrowedOper->getUser(); + } + // If we have any non SEI, TEI instruction, don't do anything here. + if (!isa(user) && !isa(user)) + return nextII; + + auto extract = cast(user); + projections.push_back({Projection(extract), extract}); + } + // Sort the list so projections with the same value decl and tuples with the + // same indices will be processed together. This makes it easy to reuse the + // load from the first such projection for all subsequent projections on the + // same value decl or index. + std::sort(projections.begin(), projections.end()); + + // If the original load is dead, then do not delete it before + // diagnostics. Doing so would suppress DefiniteInitialization in cases like: + // + // struct S { + // let a: Int + // init() { + // _ = a // must be diagnosed as use before initialization + // a = 0 + // } + // } + // + // However, if the load has any projections, it must be deleted, otherwise + // exclusivity checking is too strict: + // + // extension S { + // mutating func foo() { + // _ = a // Must be diagnosed as a read of self.a only not the whole self. + // } + // } + // + // TODO: This logic subtly anticipates SILGen behavior. In the future, change + // SILGen to avoid emitting the full load and never delete loads in Raw SIL. + if (projections.empty() && loadInst->getModule().getStage() == SILStage::Raw) + return nextII; + + // Create a new address projection instruction and load instruction for each + // unique projection. + Projection *lastProj = nullptr; + LoadInst *lastNewLoad = nullptr; + for (auto &pair : projections) { + auto &proj = pair.proj; + auto *extract = pair.extract; + + // If this projection is the same as the last projection we processed, just + // replace all uses of the projection with the load we created previously. + if (lastProj && proj == *lastProj) { + replaceUsesOfExtract(extract, lastNewLoad, pass); + nextII = killInstruction(extract, nextII, pass); + continue; + } + + // This is a unique projection. Create the new address projection and load. + lastProj = &proj; + // Insert new instructions before the original load. + SILBuilderWithScope LoadBuilder(loadInst); + auto *projInst = + proj.createAddressProjection(LoadBuilder, loadInst->getLoc(), + loadInst->getOperand()) + .get(); + pass.notifyNewInstruction(projInst); + + // When loading a trivial subelement, convert ownership. + LoadOwnershipQualifier loadOwnership = loadInst->getOwnershipQualifier(); + if (loadOwnership != LoadOwnershipQualifier::Unqualified + && projInst->getType().isTrivial(*projInst->getFunction())) { + loadOwnership = LoadOwnershipQualifier::Trivial; + } + + lastNewLoad = + LoadBuilder.createLoad(loadInst->getLoc(), projInst, loadOwnership); + pass.notifyNewInstruction(lastNewLoad); + + if (loadOwnership == LoadOwnershipQualifier::Copy) { + // Destroy the loaded value wherever the aggregate load was destroyed. + assert(loadInst->getOwnershipQualifier() == LoadOwnershipQualifier::Copy); + for (DestroyValueInst *destroy : destroys) { + SILBuilderWithScope(destroy).createDestroyValue(destroy->getLoc(), + lastNewLoad); + pass.notifyNewInstruction(destroy); + } + } + replaceUsesOfExtract(extract, lastNewLoad, pass); + nextII = killInstruction(extract, nextII, pass); + } + // Remove the now unused borrows. + for (auto *borrow : borrows) + nextII = killInstAndIncidentalUses(borrow, nextII, pass); + + // Erase the old load. + for (auto *destroy : destroys) + nextII = killInstruction(destroy, nextII, pass); + + return killInstAndIncidentalUses(loadInst, nextII, pass); +} + +//===----------------------------------------------------------------------===// +// Top-Level Entry Point +//===----------------------------------------------------------------------===// + +SILBasicBlock::iterator +CanonicalizeInstruction::canonicalize(SILInstruction *inst) { + if (auto nextII = simplifyAndReplace(inst, *this)) + return nextII.getValue(); + + if (auto *loadInst = dyn_cast(inst)) + return splitAggregateLoad(loadInst, *this); + + // Skip ahead. + return std::next(inst->getIterator()); +} diff --git a/lib/SILOptimizer/Utils/Local.cpp b/lib/SILOptimizer/Utils/Local.cpp index 141549b34bd21..72001eef9552d 100644 --- a/lib/SILOptimizer/Utils/Local.cpp +++ b/lib/SILOptimizer/Utils/Local.cpp @@ -170,16 +170,8 @@ namespace { using CallbackTy = llvm::function_ref; } // end anonymous namespace -void swift:: -recursivelyDeleteTriviallyDeadInstructions(ArrayRef IA, - bool Force, CallbackTy Callback) { - SILBasicBlock::iterator instIter; - recursivelyDeleteTriviallyDeadInstructions(IA, instIter, Force, Callback); -} - void swift::recursivelyDeleteTriviallyDeadInstructions( - ArrayRef IA, SILBasicBlock::iterator &InstIter, - bool Force, CallbackTy Callback) { + ArrayRef IA, bool Force, CallbackTy Callback) { // Delete these instruction and others that become dead after it's deleted. llvm::SmallPtrSet DeadInsts; for (auto I : IA) { @@ -230,7 +222,7 @@ void swift::recursivelyDeleteTriviallyDeadInstructions( for (auto I : DeadInsts) { // This will remove this instruction and all its uses. - eraseFromParentWithDebugInsts(I, InstIter); + eraseFromParentWithDebugInsts(I, Callback); } NextInsts.swap(DeadInsts); @@ -244,13 +236,11 @@ void swift::recursivelyDeleteTriviallyDeadInstructions( /// \param I The instruction to be deleted. /// \param Force If Force is set, don't check if the top level instruction is /// considered dead - delete it regardless. -SILBasicBlock::iterator -swift::recursivelyDeleteTriviallyDeadInstructions(SILInstruction *I, bool Force, - CallbackTy Callback) { - SILBasicBlock::iterator nextI = std::next(I->getIterator()); +void swift::recursivelyDeleteTriviallyDeadInstructions(SILInstruction *I, + bool Force, + CallbackTy Callback) { ArrayRef AI = ArrayRef(I); - recursivelyDeleteTriviallyDeadInstructions(AI, nextI, Force, Callback); - return nextI; + recursivelyDeleteTriviallyDeadInstructions(AI, Force, Callback); } void swift::eraseUsesOfInstruction(SILInstruction *Inst, diff --git a/lib/SILOptimizer/Utils/SILInliner.cpp b/lib/SILOptimizer/Utils/SILInliner.cpp index bc618895bd945..b614013adea97 100644 --- a/lib/SILOptimizer/Utils/SILInliner.cpp +++ b/lib/SILOptimizer/Utils/SILInliner.cpp @@ -518,11 +518,13 @@ void SILInlineCloner::fixUp(SILFunction *calleeFunction) { assert(!Apply.getInstruction()->hasUsesOfAnyResult()); auto deleteCallback = [this](SILInstruction *deletedI) { + if (NextIter == deletedI->getIterator()) + ++NextIter; if (DeletionCallback) DeletionCallback(deletedI); }; - NextIter = recursivelyDeleteTriviallyDeadInstructions(Apply.getInstruction(), - true, deleteCallback); + recursivelyDeleteTriviallyDeadInstructions(Apply.getInstruction(), true, + deleteCallback); } SILValue SILInlineCloner::borrowFunctionArgument(SILValue callArg, diff --git a/test/IRGen/access_markers.sil b/test/IRGen/access_markers.sil index 88f6ee13038f0..872a14377820a 100644 --- a/test/IRGen/access_markers.sil +++ b/test/IRGen/access_markers.sil @@ -4,14 +4,14 @@ import Builtin import Swift class A { - @_hasStorage var property: Int { get set } + @_hasStorage var property: Int64 { get set } @_hasStorage var exProperty: Any { get set } deinit init() } // CHECK-DAG: [[C:%T14access_markers1AC]] = type -// CHECK-DAG: [[INT:%TSi]] = type <{ [[SIZE:i(32|64)]] }> +// CHECK-DAG: [[INT:%Ts5Int64V]] = type <{ i64 }> sil_vtable A {} @@ -27,24 +27,32 @@ bb0(%0 : $A): // CHECK-NEXT: [[T0:%.*]] = bitcast [[BUFFER]]* [[SCRATCH1]] to i8* // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 {{.*}}, i8* [[T0]]) // CHECK-NEXT: [[T1:%.*]] = bitcast [[INT]]* [[PROPERTY]] to i8* - // CHECK-NEXT: call void @swift_beginAccess(i8* [[T1]], [[BUFFER]]* [[SCRATCH1]], [[SIZE]] 33, i8* null) - %3 = begin_access [modify] [dynamic] %2 : $*Int + // CHECK-NEXT: call void @swift_beginAccess(i8* [[T1]], [[BUFFER]]* [[SCRATCH1]], [[SIZE:i(32|64)]] 33, i8* null) + %3 = begin_access [modify] [dynamic] %2 : $*Int64 + + // CHECK-NEXT: getelementptr inbounds %Ts5Int64V, %Ts5Int64V* [[PROPERTY]], i32 0, i32 0 + // CHECK-NEXT: load i64, i64* + %4 = load %3 : $*Int64 // CHECK-NEXT: call void @swift_endAccess([[BUFFER]]* [[SCRATCH1]]) // CHECK-NEXT: [[T0:%.*]] = bitcast [[BUFFER]]* [[SCRATCH1]] to i8* // CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 {{.*}}, i8* [[T0]]) - end_access %3 : $*Int + end_access %3 : $*Int64 // CHECK-NEXT: [[T0:%.*]] = bitcast [[BUFFER]]* [[SCRATCH2]] to i8* // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 {{.*}}, i8* [[T0]]) // CHECK-NEXT: [[T1:%.*]] = bitcast [[INT]]* [[PROPERTY]] to i8* // CHECK-NEXT: call void @swift_beginAccess(i8* [[T1]], [[BUFFER]]* [[SCRATCH2]], [[SIZE]] 32, i8* null) - %5 = begin_access [read] [dynamic] %2 : $*Int + %6 = begin_access [read] [dynamic] %2 : $*Int64 + + // CHECK-NEXT: getelementptr inbounds %Ts5Int64V, %Ts5Int64V* [[PROPERTY]], i32 0, i32 0 + // CHECK-NEXT: load i64, i64* + %7 = load %6 : $*Int64 // CHECK-NEXT: call void @swift_endAccess([[BUFFER]]* [[SCRATCH2]]) // CHECK-NEXT: [[T0:%.*]] = bitcast [[BUFFER]]* [[SCRATCH2]] to i8* // CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 {{.*}}, i8* [[T0]]) - end_access %5 : $*Int + end_access %6 : $*Int64 %20 = tuple () return %20 : $() @@ -61,14 +69,14 @@ bb0(%0 : $A): // CHECK-NEXT: [[T1:%.*]] = bitcast [[INT]]* [[PROPERTY]] to i8* // CHECK-NEXT: call void @swift_beginAccess(i8* [[T1]], [[BUFFER]]* [[SCRATCH]], [[SIZE]] 33, i8* null) - begin_unpaired_access [modify] [dynamic] %2 : $*Int, %1 : $*Builtin.UnsafeValueBuffer + begin_unpaired_access [modify] [dynamic] %2 : $*Int64, %1 : $*Builtin.UnsafeValueBuffer // CHECK-NEXT: call void @swift_endAccess([[BUFFER]]* [[SCRATCH]]) end_unpaired_access [dynamic] %1 : $*Builtin.UnsafeValueBuffer // CHECK-NEXT: [[T1:%.*]] = bitcast [[INT]]* [[PROPERTY]] to i8* // CHECK-NEXT: call void @swift_beginAccess(i8* [[T1]], [[BUFFER]]* [[SCRATCH]], [[SIZE]] 32, i8* null) - begin_unpaired_access [read] [dynamic] %2 : $*Int, %1 : $*Builtin.UnsafeValueBuffer + begin_unpaired_access [read] [dynamic] %2 : $*Int64, %1 : $*Builtin.UnsafeValueBuffer // CHECK-NEXT: call void @swift_endAccess([[BUFFER]]* [[SCRATCH]]) end_unpaired_access [dynamic] %1 : $*Builtin.UnsafeValueBuffer @@ -88,7 +96,7 @@ bb0(%0 : $A, %1 : $*Builtin.UnsafeValueBuffer): // CHECK-NEXT: [[T1:%.*]] = bitcast [[INT]]* [[PROPERTY]] to i8* // CHECK-NEXT: [[PC:%.*]] = call i8* @llvm.returnaddress(i32 0) // CHECK-NEXT: call void @swift_beginAccess(i8* [[T1]], [[BUFFER]]* [[SCRATCH:%1]], [[SIZE]] 33, i8* [[PC]]) - begin_unpaired_access [modify] [dynamic] %2 : $*Int, %1 : $*Builtin.UnsafeValueBuffer + begin_unpaired_access [modify] [dynamic] %2 : $*Int64, %1 : $*Builtin.UnsafeValueBuffer // CHECK-NEXT: call void @swift_endAccess([[BUFFER]]* [[SCRATCH]]) end_unpaired_access [dynamic] %1 : $*Builtin.UnsafeValueBuffer @@ -96,7 +104,7 @@ bb0(%0 : $A, %1 : $*Builtin.UnsafeValueBuffer): // CHECK-NEXT: [[T1:%.*]] = bitcast [[INT]]* [[PROPERTY]] to i8* // CHECK-NEXT: [[PC:%.*]] = call i8* @llvm.returnaddress(i32 0) // CHECK-NEXT: call void @swift_beginAccess(i8* [[T1]], [[BUFFER]]* [[SCRATCH]], [[SIZE]] 32, i8* [[PC]]) - begin_unpaired_access [read] [dynamic] %2 : $*Int, %1 : $*Builtin.UnsafeValueBuffer + begin_unpaired_access [read] [dynamic] %2 : $*Int64, %1 : $*Builtin.UnsafeValueBuffer // CHECK-NEXT: call void @swift_endAccess([[BUFFER]]* [[SCRATCH]]) end_unpaired_access [dynamic] %1 : $*Builtin.UnsafeValueBuffer @@ -149,20 +157,20 @@ bb0(%0 : $A): // CHECK-LABEL: define {{.*}}void @testNontracking( sil @testNontracking : $(@guaranteed A) -> () { bb0(%0 : $A): - %1 = alloc_stack $Int + %1 = alloc_stack $Int64 // CHECK: [[PROPERTY:%.*]] = getelementptr inbounds [[C]], [[C]]* %0, i32 0, i32 1 %2 = ref_element_addr %0 : $A, #A.property // CHECK: call void @swift_beginAccess(i8* %{{.*}}, [[BUFFER]]* %{{.*}}, [[SIZE]] 0, i8* null) - %3 = begin_access [read] [dynamic] [no_nested_conflict] %2 : $*Int - copy_addr %3 to [initialization] %1 : $*Int + %3 = begin_access [read] [dynamic] [no_nested_conflict] %2 : $*Int64 + copy_addr %3 to [initialization] %1 : $*Int64 // CHECK-NOT: end_access - end_access %3 : $*Int + end_access %3 : $*Int64 %9 = alloc_stack $Builtin.UnsafeValueBuffer // CHECK: call void @swift_beginAccess(i8* %{{.*}}, [[BUFFER]]* %{{.*}}, [[SIZE]] 0, i8* null) - begin_unpaired_access [read] [dynamic] [no_nested_conflict] %2 : $*Int, %9 : $*Builtin.UnsafeValueBuffer - copy_addr %2 to %1 : $*Int + begin_unpaired_access [read] [dynamic] [no_nested_conflict] %2 : $*Int64, %9 : $*Builtin.UnsafeValueBuffer + copy_addr %2 to %1 : $*Int64 dealloc_stack %9 : $*Builtin.UnsafeValueBuffer - dealloc_stack %1 : $*Int + dealloc_stack %1 : $*Int64 %20 = tuple () return %20 : $() } diff --git a/test/IRGen/enum.sil b/test/IRGen/enum.sil index 362e75cbdc490..9c15d2d96e5e5 100644 --- a/test/IRGen/enum.sil +++ b/test/IRGen/enum.sil @@ -1,7 +1,7 @@ // #if directives don't work with SIL keywords, therefore please put ObjC tests // in `enum_objc.sil`. -// RUN: %target-swift-frontend %s -gnone -emit-ir -enable-objc-interop | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize --check-prefix=CHECK-objc --check-prefix=CHECK-objc-%target-ptrsize --check-prefix=CHECK-objc-%target-ptrsize-simulator-%target-is-simulator -DWORD=i%target-ptrsize -// RUN: %target-swift-frontend %s -gnone -emit-ir -disable-objc-interop | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize --check-prefix=CHECK-native --check-prefix=CHECK-native-%target-ptrsize -DWORD=i%target-ptrsize +// RUN: %target-swift-frontend %s -gnone -emit-ir -disable-diagnostic-passes -enable-objc-interop | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize --check-prefix=CHECK-objc --check-prefix=CHECK-objc-%target-ptrsize --check-prefix=CHECK-objc-%target-ptrsize-simulator-%target-is-simulator -DWORD=i%target-ptrsize +// RUN: %target-swift-frontend %s -gnone -emit-ir -disable-diagnostic-passes -disable-objc-interop | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize --check-prefix=CHECK-native --check-prefix=CHECK-native-%target-ptrsize -DWORD=i%target-ptrsize // REQUIRES: CPU=i386 || CPU=x86_64 diff --git a/test/SILOptimizer/exclusivity_static_diagnostics.swift b/test/SILOptimizer/exclusivity_static_diagnostics.swift index 9e4dca5072ae4..f43769006137f 100644 --- a/test/SILOptimizer/exclusivity_static_diagnostics.swift +++ b/test/SILOptimizer/exclusivity_static_diagnostics.swift @@ -614,3 +614,37 @@ func nestedConflict(x: inout Int) { // expected-error@-1 2{{overlapping accesses to 'x', but modification requires exclusive access; consider copying to a local variable}} // expected-note@-2 2{{conflicting access is here}} } + +// Avoid diagnosing a conflict on disjoint struct properies when one is a `let`. +// This requires an address projection before loading the `let` property. +// +// [SR-10145][Exclusivity] SILGen loads entire struct when reading captured 'let' stored property +struct DisjointLetMember { + var dummy: AnyObject // Make this a nontrivial struct because the SIL is more involved. + mutating func get(makeValue: ()->Int) -> Int { + return makeValue() + } +} + +class IntWrapper { + var x = 0 +} + +struct DisjointLet { + let a = 2 // Using a `let` forces a full load. + let b: IntWrapper + var cache: DisjointLetMember + + init(b: IntWrapper) { + self.b = b + self.cache = DisjointLetMember(dummy: b) + } + + mutating func testDisjointLet() -> Int { + // Access to inout `self` for member .cache`. + return cache.get { + // Access to captured `self` for member .cache`. + a + b.x + } + } +} diff --git a/test/SILOptimizer/opaque_values_mandatory.sil b/test/SILOptimizer/opaque_values_mandatory.sil index 85bcee38dfd46..0e0a40971ac20 100644 --- a/test/SILOptimizer/opaque_values_mandatory.sil +++ b/test/SILOptimizer/opaque_values_mandatory.sil @@ -45,11 +45,8 @@ bb0(%0 : $*T, %1 : $T): // CHECK: bb0(%0 : $Builtin.Int64): // CHECK: %1 = function_ref @f040_multiResult : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) // CHECK: %2 = apply %1(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) -// CHECK: %3 = tuple_extract %2 : $(Builtin.Int64, Builtin.Int64, Builtin.Int64), 0 -// CHECK: %4 = tuple_extract %2 : $(Builtin.Int64, Builtin.Int64, Builtin.Int64), 1 -// CHECK: %5 = tuple_extract %2 : $(Builtin.Int64, Builtin.Int64, Builtin.Int64), 2 -// CHECK: %6 = tuple (%3 : $Builtin.Int64, %4 : $Builtin.Int64, %5 : $Builtin.Int64) -// CHECK: return %6 : $(Builtin.Int64, Builtin.Int64, Builtin.Int64) +// Note: the tuple construction is simplified away. +// CHECK: return %2 : $(Builtin.Int64, Builtin.Int64, Builtin.Int64) // CHECK-LABEL: } // end sil function 'f030_callMultiResult' sil @f030_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { bb0(%0 : $Int): diff --git a/test/SILOptimizer/silgen_cleanup.sil b/test/SILOptimizer/silgen_cleanup.sil new file mode 100644 index 0000000000000..1557900478c28 --- /dev/null +++ b/test/SILOptimizer/silgen_cleanup.sil @@ -0,0 +1,117 @@ +// RUN: %target-sil-opt -silgen-cleanup %s | %FileCheck %s + +import Builtin + +sil_stage raw + +import Swift +import SwiftShims + +// CHECK-LABEL: sil [ossa] @struct_extract_load_to_load_struct_element_addr +// CHECK: bb0([[IN:%[0-9]+]] : $*UInt8): +// CHECK-NEXT: [[IN_GEP:%[0-9]+]] = struct_element_addr [[IN]] : $*UInt8, #UInt8._value +// CHECK-NEXT: [[IN_LOADED:%[0-9]+]] = load [trivial] [[IN_GEP]] : $*Builtin.Int8 +// CHECK-NEXT: return [[IN_LOADED]] : $Builtin.Int8 +sil [ossa] @struct_extract_load_to_load_struct_element_addr : $@convention(thin) (@inout UInt8) -> (Builtin.Int8) { +bb0(%0 : $*UInt8): + %1 = load [trivial] %0 : $*UInt8 + %5 = struct_extract %1 : $UInt8, #UInt8._value + return %5 : $Builtin.Int8 +} + +// CHECK-LABEL: sil [ossa] @tuple_extract_load_to_load_tuple_element_addr +// CHECK: bb0([[IN:%[0-9]+]] : $*(Builtin.Int8, Builtin.Int8)): +// CHECK-NEXT: [[IN_GEP:%[0-9]+]] = tuple_element_addr [[IN]] : $*(Builtin.Int8, Builtin.Int8), 0 +// CHECK-NEXT: [[IN_LOADED:%[0-9]+]] = load [trivial] [[IN_GEP]] : $*Builtin.Int8 +// CHECK-NEXT: return [[IN_LOADED]] : $Builtin.Int8 +sil [ossa] @tuple_extract_load_to_load_tuple_element_addr : $@convention(thin) (@inout (Builtin.Int8, Builtin.Int8)) -> (Builtin.Int8) { +bb0(%0 : $*(Builtin.Int8, Builtin.Int8)): + %1 = load [trivial] %0 : $*(Builtin.Int8, Builtin.Int8) + %5 = tuple_extract %1 : $(Builtin.Int8, Builtin.Int8), 0 + return %5 : $Builtin.Int8 +} + +// Do not perform the optimization of the input load has multiple uses. +// +// CHECK-LABEL: sil [ossa] @multiple_use_struct_extract_load_to_load_struct_element_addr +// CHECK: bb0([[IN:%[0-9]+]] : $*UInt8): +// CHECK-NEXT: load +// CHECK-NEXT: struct_extract +// CHECK-NEXT: tuple +// CHECK-NEXT: return +sil [ossa] @multiple_use_struct_extract_load_to_load_struct_element_addr : $@convention(thin) (@inout UInt8) -> (UInt8, Builtin.Int8) { +bb0(%0 : $*UInt8): + %1 = load [trivial] %0 : $*UInt8 + %5 = struct_extract %1 : $UInt8, #UInt8._value + %6 = tuple (%1 : $UInt8, %5 : $Builtin.Int8) + return %6 : $(UInt8, Builtin.Int8) +} + +// Do not perform the optimization of the input load has multiple uses. +// +// CHECK-LABEL: sil [ossa] @multiple_use_tuple_extract_load_to_load_tuple_element_addr +// CHECK: bb0 +// CHECK-NEXT: load +// CHECK-NEXT: tuple_extract +// CHECK-NEXT: tuple +// CHECK-NEXT: return +sil [ossa] @multiple_use_tuple_extract_load_to_load_tuple_element_addr : $@convention(thin) (@inout (Builtin.Int8, Builtin.Int8)) -> ((Builtin.Int8, Builtin.Int8), Builtin.Int8) { +bb0(%0 : $*(Builtin.Int8, Builtin.Int8)): + %1 = load [trivial] %0 : $*(Builtin.Int8, Builtin.Int8) + %5 = tuple_extract %1 : $(Builtin.Int8, Builtin.Int8), 0 + %6 = tuple (%1 : $(Builtin.Int8, Builtin.Int8), %5 : $Builtin.Int8) + return %6 : $((Builtin.Int8, Builtin.Int8), Builtin.Int8) +} + +// Handle a combination of trivial and nontrivial elements. + +struct X1 { + @_hasStorage @_hasInitialValue let a: Int { get } + @_hasStorage @_hasInitialValue var obj1: AnyObject { get set } + @_hasStorage @_hasInitialValue var obj2: AnyObject { get set } + init(a: Int, obj1: AnyObject, obj2: AnyObject) +} + +// CHECK-LABEL: sil private [ossa] @testLoadNontrivial : $@convention(thin) (@inout_aliasable X1) -> (Int, @owned AnyObject, @owned AnyObject) { +// CHECK-LABEL: bb0(%0 : $*X1): +// CHECK: [[ACCESS:%.*]] = begin_access [read] [unknown] %0 : $*X1 +// CHECK: [[AA:%.*]] = struct_element_addr [[ACCESS]] : $*X1, #X1.a +// CHECK: load [trivial] [[AA]] : $*Int +// CHECK: [[OA1:%.*]] = struct_element_addr [[ACCESS]] : $*X1, #X1.obj1 +// CHECK: [[OV1:%.*]] = load [copy] [[OA1]] : $*AnyObject +// CHECK: [[OA2:%.*]] = struct_element_addr [[ACCESS]] : $*X1, #X1.obj2 +// CHECK: [[OV2:%.*]] = load [copy] [[OA2]] : $*AnyObject +// CHECK: end_access [[ACCESS]] : $*X1 +// CHECK: [[B1:%.*]] = begin_borrow [[OV1]] : $AnyObject +// CHECK: copy_value [[B1]] : $AnyObject +// CHECK: end_borrow [[B1]] : $AnyObject +// CHECK: [[B2:%.*]] = begin_borrow [[OV2]] : $AnyObject +// CHECK: copy_value [[B2]] : $AnyObject +// CHECK: end_borrow [[B2]] : $AnyObject +// CHECK: return +// CHECK-LABEL: } // end sil function 'testLoadNontrivial' +sil private [ossa] @testLoadNontrivial : $@convention(thin) (@inout_aliasable X1) -> (Int, @owned AnyObject, @owned AnyObject) { +bb0(%0 : $*X1): + %access = begin_access [read] [unknown] %0 : $*X1 + %load = load [copy] %access : $*X1 + end_access %access : $*X1 + + %borrowa = begin_borrow %load : $X1 + %a = struct_extract %borrowa : $X1, #X1.a + end_borrow %borrowa : $X1 + + %borrow1 = begin_borrow %load : $X1 + %o1 = struct_extract %borrow1 : $X1, #X1.obj1 + %copy1 = copy_value %o1 : $AnyObject + end_borrow %borrow1 : $X1 + + %borrow2 = begin_borrow %load : $X1 + %o2 = struct_extract %borrow2 : $X1, #X1.obj2 + %copy2 = copy_value %o2 : $AnyObject + end_borrow %borrow2 : $X1 + + destroy_value %load : $X1 + + %result = tuple (%a : $Int, %copy1 : $AnyObject, %copy2 : $AnyObject) + return %result : $(Int, AnyObject, AnyObject) +}